ai 5.0.0-canary.1 → 5.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +136 -0
  2. package/dist/index.d.mts +1449 -718
  3. package/dist/index.d.ts +1449 -718
  4. package/dist/index.js +2550 -760
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +2423 -670
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +730 -0
  9. package/dist/internal/index.d.ts +730 -0
  10. package/dist/internal/index.js +1482 -0
  11. package/dist/internal/index.js.map +1 -0
  12. package/{rsc/dist/rsc-server.mjs → dist/internal/index.mjs} +855 -1555
  13. package/dist/internal/index.mjs.map +1 -0
  14. package/{mcp-stdio/dist → dist/mcp-stdio}/index.js +1 -1
  15. package/dist/mcp-stdio/index.js.map +1 -0
  16. package/{mcp-stdio/dist → dist/mcp-stdio}/index.mjs +1 -1
  17. package/dist/mcp-stdio/index.mjs.map +1 -0
  18. package/{test/dist → dist/test}/index.d.mts +18 -16
  19. package/{test/dist → dist/test}/index.d.ts +18 -16
  20. package/{test/dist → dist/test}/index.js +28 -8
  21. package/dist/test/index.js.map +1 -0
  22. package/{test/dist → dist/test}/index.mjs +27 -7
  23. package/dist/test/index.mjs.map +1 -0
  24. package/package.json +28 -47
  25. package/mcp-stdio/create-child-process.test.ts +0 -92
  26. package/mcp-stdio/create-child-process.ts +0 -21
  27. package/mcp-stdio/dist/index.js.map +0 -1
  28. package/mcp-stdio/dist/index.mjs.map +0 -1
  29. package/mcp-stdio/get-environment.ts +0 -43
  30. package/mcp-stdio/index.ts +0 -4
  31. package/mcp-stdio/mcp-stdio-transport.test.ts +0 -262
  32. package/mcp-stdio/mcp-stdio-transport.ts +0 -157
  33. package/rsc/dist/index.d.ts +0 -813
  34. package/rsc/dist/index.mjs +0 -18
  35. package/rsc/dist/rsc-client.d.mts +0 -1
  36. package/rsc/dist/rsc-client.mjs +0 -18
  37. package/rsc/dist/rsc-client.mjs.map +0 -1
  38. package/rsc/dist/rsc-server.d.mts +0 -748
  39. package/rsc/dist/rsc-server.mjs.map +0 -1
  40. package/rsc/dist/rsc-shared.d.mts +0 -101
  41. package/rsc/dist/rsc-shared.mjs +0 -308
  42. package/rsc/dist/rsc-shared.mjs.map +0 -1
  43. package/test/dist/index.js.map +0 -1
  44. package/test/dist/index.mjs.map +0 -1
  45. package/{mcp-stdio/dist → dist/mcp-stdio}/index.d.mts +6 -6
  46. package/{mcp-stdio/dist → dist/mcp-stdio}/index.d.ts +6 -6
package/dist/index.d.ts CHANGED
@@ -1,14 +1,47 @@
1
- import { IDGenerator } from '@ai-sdk/provider-utils';
2
- export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
- import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1 } from '@ai-sdk/ui-utils';
4
- export { Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, formatDataStreamPart, jsonSchema, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
5
- import { LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2CallWarning, LanguageModelV2Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2ProviderMetadata, LanguageModelV2CallOptions, AISDKError, LanguageModelV2FunctionToolCall, JSONSchema7, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
6
- export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2Prompt, LanguageModelV2StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
1
+ import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
2
+ export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
+ import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
4
+ export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
7
5
  import { ServerResponse } from 'node:http';
8
6
  import { AttributeValue, Tracer } from '@opentelemetry/api';
9
7
  import { z } from 'zod';
8
+ import { JSONSchema7 } from 'json-schema';
10
9
  import { ServerResponse as ServerResponse$1 } from 'http';
11
10
 
11
+ /**
12
+ Embedding model that is used by the AI SDK Core functions.
13
+ */
14
+ type EmbeddingModel<VALUE> = EmbeddingModelV2<VALUE>;
15
+ /**
16
+ Embedding.
17
+ */
18
+ type Embedding = EmbeddingModelV2Embedding;
19
+
20
+ /**
21
+ Image model that is used by the AI SDK Core functions.
22
+ */
23
+ type ImageModel = ImageModelV1;
24
+ /**
25
+ Warning from the model provider for this call. The call will proceed, but e.g.
26
+ some settings might not be supported, which can lead to suboptimal results.
27
+ */
28
+ type ImageGenerationWarning = ImageModelV1CallWarning;
29
+
30
+ type ImageModelResponseMetadata = {
31
+ /**
32
+ Timestamp for the start of the generated response.
33
+ */
34
+ timestamp: Date;
35
+ /**
36
+ The ID of the response model that was used to generate the response.
37
+ */
38
+ modelId: string;
39
+ /**
40
+ Response headers.
41
+ */
42
+ headers?: Record<string, string>;
43
+ };
44
+
12
45
  /**
13
46
  Language model that is used by the AI SDK Core functions.
14
47
  */
@@ -50,132 +83,14 @@ Tool choice for the generation. It supports the following settings:
50
83
  */
51
84
  type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
52
85
  type: 'tool';
53
- toolName: keyof TOOLS;
54
- };
55
- /**
56
- * @deprecated Use `ToolChoice` instead.
57
- */
58
- type CoreToolChoice<TOOLS extends Record<string, unknown>> = ToolChoice<TOOLS>;
59
-
60
- interface DataStreamWriter {
61
- /**
62
- * Appends a data part to the stream.
63
- */
64
- write(data: DataStreamString): void;
65
- /**
66
- * Appends a data part to the stream.
67
- */
68
- writeData(value: JSONValue): void;
69
- /**
70
- * Appends a message annotation to the stream.
71
- */
72
- writeMessageAnnotation(value: JSONValue): void;
73
- /**
74
- * Appends a source part to the stream.
75
- */
76
- writeSource(source: Source): void;
77
- /**
78
- * Merges the contents of another stream to this stream.
79
- */
80
- merge(stream: ReadableStream<DataStreamString>): void;
81
- /**
82
- * Error handler that is used by the data stream writer.
83
- * This is intended for forwarding when merging streams
84
- * to prevent duplicated error masking.
85
- */
86
- onError: ((error: unknown) => string) | undefined;
87
- }
88
-
89
- declare function createDataStream({ execute, onError, }: {
90
- execute: (dataStream: DataStreamWriter) => Promise<void> | void;
91
- onError?: (error: unknown) => string;
92
- }): ReadableStream<DataStreamString>;
93
-
94
- declare function createDataStreamResponse({ status, statusText, headers, execute, onError, }: ResponseInit & {
95
- execute: (dataStream: DataStreamWriter) => Promise<void> | void;
96
- onError?: (error: unknown) => string;
97
- }): Response;
98
-
99
- declare function pipeDataStreamToResponse(response: ServerResponse, { status, statusText, headers, execute, onError, }: ResponseInit & {
100
- execute: (writer: DataStreamWriter) => Promise<void> | void;
101
- onError?: (error: unknown) => string;
102
- }): void;
103
-
104
- /**
105
- * Telemetry configuration.
106
- */
107
- type TelemetrySettings = {
108
- /**
109
- * Enable or disable telemetry. Disabled by default while experimental.
110
- */
111
- isEnabled?: boolean;
112
- /**
113
- * Enable or disable input recording. Enabled by default.
114
- *
115
- * You might want to disable input recording to avoid recording sensitive
116
- * information, to reduce data transfers, or to increase performance.
117
- */
118
- recordInputs?: boolean;
119
- /**
120
- * Enable or disable output recording. Enabled by default.
121
- *
122
- * You might want to disable output recording to avoid recording sensitive
123
- * information, to reduce data transfers, or to increase performance.
124
- */
125
- recordOutputs?: boolean;
126
- /**
127
- * Identifier for this function. Used to group telemetry data by function.
128
- */
129
- functionId?: string;
130
- /**
131
- * Additional information to include in the telemetry data.
132
- */
133
- metadata?: Record<string, AttributeValue>;
134
- /**
135
- * A custom tracer to use for the telemetry data.
136
- */
137
- tracer?: Tracer;
138
- };
139
-
140
- /**
141
- Embedding model that is used by the AI SDK Core functions.
142
- */
143
- type EmbeddingModel<VALUE> = EmbeddingModelV1<VALUE>;
144
- /**
145
- Embedding.
146
- */
147
- type Embedding = EmbeddingModelV1Embedding;
148
-
149
- /**
150
- Image model that is used by the AI SDK Core functions.
151
- */
152
- type ImageModel = ImageModelV1;
153
- /**
154
- Warning from the model provider for this call. The call will proceed, but e.g.
155
- some settings might not be supported, which can lead to suboptimal results.
156
- */
157
- type ImageGenerationWarning = ImageModelV1CallWarning;
158
-
159
- type ImageModelResponseMetadata = {
160
- /**
161
- Timestamp for the start of the generated response.
162
- */
163
- timestamp: Date;
164
- /**
165
- The ID of the response model that was used to generate the response.
166
- */
167
- modelId: string;
168
- /**
169
- Response headers.
170
- */
171
- headers?: Record<string, string>;
86
+ toolName: Extract<keyof TOOLS, string>;
172
87
  };
173
88
 
174
89
  type LanguageModelRequestMetadata = {
175
90
  /**
176
- Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
91
+ Request HTTP body that was sent to the provider API.
177
92
  */
178
- body?: string;
93
+ body?: unknown;
179
94
  };
180
95
 
181
96
  type LanguageModelResponseMetadata = {
@@ -240,40 +155,891 @@ Additional provider-specific metadata that is returned from the provider.
240
155
  This is needed to enable provider-specific functionality that can be
241
156
  fully encapsulated in the provider.
242
157
  */
243
- type ProviderMetadata = LanguageModelV2ProviderMetadata;
158
+ type ProviderMetadata = SharedV2ProviderMetadata;
244
159
  /**
245
160
  Additional provider-specific options.
246
161
 
247
- They are passed through to the provider from the AI SDK and enable
248
- provider-specific functionality that can be fully encapsulated in the provider.
249
- */
250
- type ProviderOptions = LanguageModelV2ProviderMetadata;
162
+ They are passed through to the provider from the AI SDK and enable
163
+ provider-specific functionality that can be fully encapsulated in the provider.
164
+ */
165
+ type ProviderOptions = SharedV2ProviderOptions;
166
+
167
+ /**
168
+ Represents the number of tokens used in a prompt and completion.
169
+ */
170
+ type LanguageModelUsage$1 = {
171
+ /**
172
+ The number of tokens used in the prompt.
173
+ */
174
+ promptTokens: number;
175
+ /**
176
+ The number of tokens used in the completion.
177
+ */
178
+ completionTokens: number;
179
+ /**
180
+ The total number of tokens used (promptTokens + completionTokens).
181
+ */
182
+ totalTokens: number;
183
+ };
184
+ /**
185
+ Represents the number of tokens used in an embedding.
186
+ */
187
+ type EmbeddingModelUsage = {
188
+ /**
189
+ The number of tokens used in the embedding.
190
+ */
191
+ tokens: number;
192
+ };
193
+
194
+ /**
195
+ Represents the number of tokens used in a prompt and completion.
196
+ */
197
+ type LanguageModelUsage = {
198
+ /**
199
+ The number of tokens used in the prompt.
200
+ */
201
+ promptTokens: number;
202
+ /**
203
+ The number of tokens used in the completion.
204
+ */
205
+ completionTokens: number;
206
+ /**
207
+ The total number of tokens used (promptTokens + completionTokens).
208
+ */
209
+ totalTokens: number;
210
+ };
211
+
212
+ type IdGenerator = () => string;
213
+ /**
214
+ Tool invocations are either tool calls or tool results. For each assistant tool call,
215
+ there is one tool invocation. While the call is in progress, the invocation is a tool call.
216
+ Once the call is complete, the invocation is a tool result.
217
+
218
+ The step is used to track how to map an assistant UI message with many tool invocations
219
+ back to a sequence of LLM assistant/tool result message pairs.
220
+ It is optional for backwards compatibility.
221
+ */
222
+ type ToolInvocation = ({
223
+ state: 'partial-call';
224
+ step?: number;
225
+ } & ToolCall<string, any>) | ({
226
+ state: 'call';
227
+ step?: number;
228
+ } & ToolCall<string, any>) | ({
229
+ state: 'result';
230
+ step?: number;
231
+ } & ToolResult<string, any, any>);
232
+ /**
233
+ * An attachment that can be sent along with a message.
234
+ */
235
+ interface Attachment {
236
+ /**
237
+ * The name of the attachment, usually the file name.
238
+ */
239
+ name?: string;
240
+ /**
241
+ * A string indicating the [media type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type).
242
+ * By default, it's extracted from the pathname's extension.
243
+ */
244
+ contentType?: string;
245
+ /**
246
+ * The URL of the attachment. It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
247
+ */
248
+ url: string;
249
+ }
250
+ /**
251
+ * AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
252
+ */
253
+ interface Message {
254
+ /**
255
+ A unique identifier for the message.
256
+ */
257
+ id: string;
258
+ /**
259
+ The timestamp of the message.
260
+ */
261
+ createdAt?: Date;
262
+ /**
263
+ Text content of the message. Use parts when possible.
264
+ */
265
+ content: string;
266
+ /**
267
+ Reasoning for the message.
268
+
269
+ @deprecated Use `parts` instead.
270
+ */
271
+ reasoning?: string;
272
+ /**
273
+ * Additional attachments to be sent along with the message.
274
+ */
275
+ experimental_attachments?: Attachment[];
276
+ /**
277
+ The 'data' role is deprecated.
278
+ */
279
+ role: 'system' | 'user' | 'assistant' | 'data';
280
+ /**
281
+ For data messages.
282
+
283
+ @deprecated Data messages will be removed.
284
+ */
285
+ data?: JSONValue;
286
+ /**
287
+ * Additional message-specific information added on the server via StreamData
288
+ */
289
+ annotations?: JSONValue[] | undefined;
290
+ /**
291
+ Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
292
+ that the assistant made as part of this message.
293
+
294
+ @deprecated Use `parts` instead.
295
+ */
296
+ toolInvocations?: Array<ToolInvocation>;
297
+ /**
298
+ * The parts of the message. Use this for rendering the message in the UI.
299
+ *
300
+ * Assistant messages can have text, reasoning and tool invocation parts.
301
+ * User messages can have text parts.
302
+ */
303
+ parts?: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
304
+ }
305
+ type UIMessage = Message & {
306
+ /**
307
+ * The parts of the message. Use this for rendering the message in the UI.
308
+ *
309
+ * Assistant messages can have text, reasoning and tool invocation parts.
310
+ * User messages can have text parts.
311
+ */
312
+ parts: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
313
+ };
314
+ /**
315
+ * A text part of a message.
316
+ */
317
+ type TextUIPart = {
318
+ type: 'text';
319
+ /**
320
+ * The text content.
321
+ */
322
+ text: string;
323
+ };
324
+ /**
325
+ * A reasoning part of a message.
326
+ */
327
+ type ReasoningUIPart = {
328
+ type: 'reasoning';
329
+ /**
330
+ * The reasoning text.
331
+ */
332
+ reasoning: string;
333
+ details: Array<{
334
+ type: 'text';
335
+ text: string;
336
+ signature?: string;
337
+ } | {
338
+ type: 'redacted';
339
+ data: string;
340
+ }>;
341
+ };
342
+ /**
343
+ * A tool invocation part of a message.
344
+ */
345
+ type ToolInvocationUIPart = {
346
+ type: 'tool-invocation';
347
+ /**
348
+ * The tool invocation.
349
+ */
350
+ toolInvocation: ToolInvocation;
351
+ };
352
+ /**
353
+ * A source part of a message.
354
+ */
355
+ type SourceUIPart = {
356
+ type: 'source';
357
+ /**
358
+ * The source.
359
+ */
360
+ source: LanguageModelV2Source;
361
+ };
362
+ /**
363
+ * A file part of a message.
364
+ */
365
+ type FileUIPart = {
366
+ type: 'file';
367
+ /**
368
+ * IANA media type of the file.
369
+ *
370
+ * @see https://www.iana.org/assignments/media-types/media-types.xhtml
371
+ */
372
+ mediaType: string;
373
+ /**
374
+ * The base64 encoded data.
375
+ */
376
+ data: string;
377
+ };
378
+ /**
379
+ * A step boundary part of a message.
380
+ */
381
+ type StepStartUIPart = {
382
+ type: 'step-start';
383
+ };
384
+ type CreateMessage = Omit<Message, 'id'> & {
385
+ id?: Message['id'];
386
+ };
387
+ type ChatRequest = {
388
+ /**
389
+ An optional object of headers to be passed to the API endpoint.
390
+ */
391
+ headers?: Record<string, string> | Headers;
392
+ /**
393
+ An optional object to be passed to the API endpoint.
394
+ */
395
+ body?: object;
396
+ /**
397
+ The messages of the chat.
398
+ */
399
+ messages: Message[];
400
+ /**
401
+ Additional data to be sent to the server.
402
+ */
403
+ data?: JSONValue;
404
+ };
405
+ type RequestOptions = {
406
+ /**
407
+ An optional object of headers to be passed to the API endpoint.
408
+ */
409
+ headers?: Record<string, string> | Headers;
410
+ /**
411
+ An optional object to be passed to the API endpoint.
412
+ */
413
+ body?: object;
414
+ };
415
+ type ChatRequestOptions = {
416
+ /**
417
+ Additional headers that should be to be passed to the API endpoint.
418
+ */
419
+ headers?: Record<string, string> | Headers;
420
+ /**
421
+ Additional body JSON properties that should be sent to the API endpoint.
422
+ */
423
+ body?: object;
424
+ /**
425
+ Additional data to be sent to the API endpoint.
426
+ */
427
+ data?: JSONValue;
428
+ /**
429
+ * Additional files to be sent to the server.
430
+ */
431
+ experimental_attachments?: FileList | Array<Attachment>;
432
+ /**
433
+ * Allow submitting an empty message. Defaults to `false`.
434
+ */
435
+ allowEmptySubmit?: boolean;
436
+ };
437
+ type UseChatOptions = {
438
+ /**
439
+ Keeps the last message when an error happens. Defaults to `true`.
440
+
441
+ @deprecated This option will be removed in the next major release.
442
+ */
443
+ keepLastMessageOnError?: boolean;
444
+ /**
445
+ * The API endpoint that accepts a `{ messages: Message[] }` object and returns
446
+ * a stream of tokens of the AI chat response. Defaults to `/api/chat`.
447
+ */
448
+ api?: string;
449
+ /**
450
+ * A unique identifier for the chat. If not provided, a random one will be
451
+ * generated. When provided, the `useChat` hook with the same `id` will
452
+ * have shared states across components.
453
+ */
454
+ id?: string;
455
+ /**
456
+ * Initial messages of the chat. Useful to load an existing chat history.
457
+ */
458
+ initialMessages?: Message[];
459
+ /**
460
+ * Initial input of the chat.
461
+ */
462
+ initialInput?: string;
463
+ /**
464
+ Optional callback function that is invoked when a tool call is received.
465
+ Intended for automatic client-side tool execution.
466
+
467
+ You can optionally return a result for the tool call,
468
+ either synchronously or asynchronously.
469
+ */
470
+ onToolCall?: ({ toolCall, }: {
471
+ toolCall: ToolCall<string, unknown>;
472
+ }) => void | Promise<unknown> | unknown;
473
+ /**
474
+ * Callback function to be called when the API response is received.
475
+ */
476
+ onResponse?: (response: Response) => void | Promise<void>;
477
+ /**
478
+ * Optional callback function that is called when the assistant message is finished streaming.
479
+ *
480
+ * @param message The message that was streamed.
481
+ * @param options.usage The token usage of the message.
482
+ * @param options.finishReason The finish reason of the message.
483
+ */
484
+ onFinish?: (message: Message, options: {
485
+ usage: LanguageModelUsage;
486
+ finishReason: LanguageModelV2FinishReason;
487
+ }) => void;
488
+ /**
489
+ * Callback function to be called when an error is encountered.
490
+ */
491
+ onError?: (error: Error) => void;
492
+ /**
493
+ * A way to provide a function that is going to be used for ids for messages and the chat.
494
+ * If not provided the default AI SDK `generateId` is used.
495
+ */
496
+ generateId?: IdGenerator;
497
+ /**
498
+ * The credentials mode to be used for the fetch request.
499
+ * Possible values are: 'omit', 'same-origin', 'include'.
500
+ * Defaults to 'same-origin'.
501
+ */
502
+ credentials?: RequestCredentials;
503
+ /**
504
+ * HTTP headers to be sent with the API request.
505
+ */
506
+ headers?: Record<string, string> | Headers;
507
+ /**
508
+ * Extra body object to be sent with the API request.
509
+ * @example
510
+ * Send a `sessionId` to the API along with the messages.
511
+ * ```js
512
+ * useChat({
513
+ * body: {
514
+ * sessionId: '123',
515
+ * }
516
+ * })
517
+ * ```
518
+ */
519
+ body?: object;
520
+ /**
521
+ * Whether to send extra message fields such as `message.id` and `message.createdAt` to the API.
522
+ * Defaults to `false`. When set to `true`, the API endpoint might need to
523
+ * handle the extra fields before forwarding the request to the AI service.
524
+ */
525
+ sendExtraMessageFields?: boolean;
526
+ /**
527
+ Streaming protocol that is used. Defaults to `data`.
528
+ */
529
+ streamProtocol?: 'data' | 'text';
530
+ /**
531
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
532
+ or to provide a custom fetch implementation for e.g. testing.
533
+ */
534
+ fetch?: FetchFunction;
535
+ };
536
+ type UseCompletionOptions = {
537
+ /**
538
+ * The API endpoint that accepts a `{ prompt: string }` object and returns
539
+ * a stream of tokens of the AI completion response. Defaults to `/api/completion`.
540
+ */
541
+ api?: string;
542
+ /**
543
+ * An unique identifier for the chat. If not provided, a random one will be
544
+ * generated. When provided, the `useChat` hook with the same `id` will
545
+ * have shared states across components.
546
+ */
547
+ id?: string;
548
+ /**
549
+ * Initial prompt input of the completion.
550
+ */
551
+ initialInput?: string;
552
+ /**
553
+ * Initial completion result. Useful to load an existing history.
554
+ */
555
+ initialCompletion?: string;
556
+ /**
557
+ * Callback function to be called when the API response is received.
558
+ */
559
+ onResponse?: (response: Response) => void | Promise<void>;
560
+ /**
561
+ * Callback function to be called when the completion is finished streaming.
562
+ */
563
+ onFinish?: (prompt: string, completion: string) => void;
564
+ /**
565
+ * Callback function to be called when an error is encountered.
566
+ */
567
+ onError?: (error: Error) => void;
568
+ /**
569
+ * The credentials mode to be used for the fetch request.
570
+ * Possible values are: 'omit', 'same-origin', 'include'.
571
+ * Defaults to 'same-origin'.
572
+ */
573
+ credentials?: RequestCredentials;
574
+ /**
575
+ * HTTP headers to be sent with the API request.
576
+ */
577
+ headers?: Record<string, string> | Headers;
578
+ /**
579
+ * Extra body object to be sent with the API request.
580
+ * @example
581
+ * Send a `sessionId` to the API along with the prompt.
582
+ * ```js
583
+ * useChat({
584
+ * body: {
585
+ * sessionId: '123',
586
+ * }
587
+ * })
588
+ * ```
589
+ */
590
+ body?: object;
591
+ /**
592
+ Streaming protocol that is used. Defaults to `data`.
593
+ */
594
+ streamProtocol?: 'data' | 'text';
595
+ /**
596
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
597
+ or to provide a custom fetch implementation for e.g. testing.
598
+ */
599
+ fetch?: FetchFunction;
600
+ };
601
+ /**
602
+ A JSON value can be a string, number, boolean, object, array, or null.
603
+ JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
604
+ */
605
+ type JSONValue = null | string | number | boolean | {
606
+ [value: string]: JSONValue;
607
+ } | Array<JSONValue>;
608
+
609
+ /**
610
+ Transcription model that is used by the AI SDK Core functions.
611
+ */
612
+ type TranscriptionModel = TranscriptionModelV1;
613
+ /**
614
+ Warning from the model provider for this call. The call will proceed, but e.g.
615
+ some settings might not be supported, which can lead to suboptimal results.
616
+ */
617
+ type TranscriptionWarning = TranscriptionModelV1CallWarning;
618
+
619
+ type TranscriptionModelResponseMetadata = {
620
+ /**
621
+ Timestamp for the start of the generated response.
622
+ */
623
+ timestamp: Date;
624
+ /**
625
+ The ID of the response model that was used to generate the response.
626
+ */
627
+ modelId: string;
628
+ /**
629
+ Response headers.
630
+ */
631
+ headers?: Record<string, string>;
632
+ };
633
+
634
+ /**
635
+ Speech model that is used by the AI SDK Core functions.
636
+ */
637
+ type SpeechModel = SpeechModelV1;
638
+ /**
639
+ Warning from the model provider for this call. The call will proceed, but e.g.
640
+ some settings might not be supported, which can lead to suboptimal results.
641
+ */
642
+ type SpeechWarning = SpeechModelV1CallWarning;
643
+
644
+ type SpeechModelResponseMetadata = {
645
+ /**
646
+ Timestamp for the start of the generated response.
647
+ */
648
+ timestamp: Date;
649
+ /**
650
+ The ID of the response model that was used to generate the response.
651
+ */
652
+ modelId: string;
653
+ /**
654
+ Response headers.
655
+ */
656
+ headers?: Record<string, string>;
657
+ /**
658
+ Response body.
659
+ */
660
+ body?: unknown;
661
+ };
662
+
663
+ declare const getOriginalFetch$1: () => typeof fetch;
664
+ declare function callChatApi({ api, body, streamProtocol, credentials, headers, abortController, restoreMessagesOnFailure, onResponse, onUpdate, onFinish, onToolCall, generateId, fetch, lastMessage, }: {
665
+ api: string;
666
+ body: Record<string, any>;
667
+ streamProtocol: 'data' | 'text' | undefined;
668
+ credentials: RequestCredentials | undefined;
669
+ headers: HeadersInit | undefined;
670
+ abortController: (() => AbortController | null) | undefined;
671
+ restoreMessagesOnFailure: () => void;
672
+ onResponse: ((response: Response) => void | Promise<void>) | undefined;
673
+ onUpdate: (options: {
674
+ message: UIMessage;
675
+ data: JSONValue[] | undefined;
676
+ replaceLastMessage: boolean;
677
+ }) => void;
678
+ onFinish: UseChatOptions['onFinish'];
679
+ onToolCall: UseChatOptions['onToolCall'];
680
+ generateId: IdGenerator;
681
+ fetch: ReturnType<typeof getOriginalFetch$1> | undefined;
682
+ lastMessage: UIMessage | undefined;
683
+ }): Promise<void>;
684
+
685
+ declare const getOriginalFetch: () => typeof fetch;
686
+ declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onResponse, onFinish, onError, onData, fetch, }: {
687
+ api: string;
688
+ prompt: string;
689
+ credentials: RequestCredentials | undefined;
690
+ headers: HeadersInit | undefined;
691
+ body: Record<string, any>;
692
+ streamProtocol: 'data' | 'text' | undefined;
693
+ setCompletion: (completion: string) => void;
694
+ setLoading: (loading: boolean) => void;
695
+ setError: (error: Error | undefined) => void;
696
+ setAbortController: (abortController: AbortController | null) => void;
697
+ onResponse: ((response: Response) => void | Promise<void>) | undefined;
698
+ onFinish: ((prompt: string, completion: string) => void) | undefined;
699
+ onError: ((error: Error) => void) | undefined;
700
+ onData: ((data: JSONValue[]) => void) | undefined;
701
+ fetch: ReturnType<typeof getOriginalFetch> | undefined;
702
+ }): Promise<string | null | undefined>;
703
+
704
+ type DataStreamString = `${(typeof DataStreamStringPrefixes)[keyof typeof DataStreamStringPrefixes]}:${string}\n`;
705
+ interface DataStreamPart<CODE extends string, NAME extends string, TYPE> {
706
+ code: CODE;
707
+ name: NAME;
708
+ parse: (value: JSONValue) => {
709
+ type: NAME;
710
+ value: TYPE;
711
+ };
712
+ }
713
+ declare const dataStreamParts: readonly [DataStreamPart<"0", "text", string>, DataStreamPart<"2", "data", JSONValue[]>, DataStreamPart<"3", "error", string>, DataStreamPart<"8", "message_annotations", JSONValue[]>, DataStreamPart<"9", "tool_call", ToolCall<string, any>>, DataStreamPart<"a", "tool_result", Omit<ToolResult<string, any, any>, "args" | "toolName">>, DataStreamPart<"b", "tool_call_streaming_start", {
714
+ toolCallId: string;
715
+ toolName: string;
716
+ }>, DataStreamPart<"c", "tool_call_delta", {
717
+ toolCallId: string;
718
+ argsTextDelta: string;
719
+ }>, DataStreamPart<"d", "finish_message", {
720
+ finishReason: LanguageModelV2FinishReason;
721
+ usage?: {
722
+ promptTokens: number;
723
+ completionTokens: number;
724
+ };
725
+ }>, DataStreamPart<"e", "finish_step", {
726
+ isContinued: boolean;
727
+ finishReason: LanguageModelV2FinishReason;
728
+ usage?: {
729
+ promptTokens: number;
730
+ completionTokens: number;
731
+ };
732
+ }>, DataStreamPart<"f", "start_step", {
733
+ messageId: string;
734
+ }>, DataStreamPart<"g", "reasoning", string>, DataStreamPart<"h", "source", LanguageModelV2Source>, DataStreamPart<"i", "redacted_reasoning", {
735
+ data: string;
736
+ }>, DataStreamPart<"j", "reasoning_signature", {
737
+ signature: string;
738
+ }>, DataStreamPart<"k", "file", {
739
+ data: string;
740
+ mimeType: string;
741
+ }>];
742
+ type DataStreamParts = (typeof dataStreamParts)[number];
743
+ /**
744
+ * Maps the type of a stream part to its value type.
745
+ */
746
+ type DataStreamPartValueType = {
747
+ [P in DataStreamParts as P['name']]: ReturnType<P['parse']>['value'];
748
+ };
749
+ type DataStreamPartType = ReturnType<DataStreamParts['parse']>;
750
+ /**
751
+ * The map of prefixes for data in the stream
752
+ *
753
+ * - 0: Text from the LLM response
754
+ * - 1: (OpenAI) function_call responses
755
+ * - 2: custom JSON added by the user using `Data`
756
+ * - 6: (OpenAI) tool_call responses
757
+ *
758
+ * Example:
759
+ * ```
760
+ * 0:Vercel
761
+ * 0:'s
762
+ * 0: AI
763
+ * 0: AI
764
+ * 0: SDK
765
+ * 0: is great
766
+ * 0:!
767
+ * 2: { "someJson": "value" }
768
+ * 1: {"function_call": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}
769
+ * 6: {"tool_call": {"id": "tool_0", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}}
770
+ *```
771
+ */
772
+ declare const DataStreamStringPrefixes: { [K in DataStreamParts["name"]]: (typeof dataStreamParts)[number]["code"]; };
773
+ /**
774
+ Parses a stream part from a string.
775
+
776
+ @param line The string to parse.
777
+ @returns The parsed stream part.
778
+ @throws An error if the string cannot be parsed.
779
+ */
780
+ declare const parseDataStreamPart: (line: string) => DataStreamPartType;
781
+ /**
782
+ Prepends a string with a prefix from the `StreamChunkPrefixes`, JSON-ifies it,
783
+ and appends a new line.
784
+
785
+ It ensures type-safety for the part type and value.
786
+ */
787
+ declare function formatDataStreamPart<T extends keyof DataStreamPartValueType>(type: T, value: DataStreamPartValueType[T]): DataStreamString;
788
+
789
+ /**
790
+ * Converts a data URL of type text/* to a text string.
791
+ */
792
+ declare function getTextFromDataUrl(dataUrl: string): string;
793
+
794
+ /**
795
+ Create a type from an object with all keys and nested keys set to optional.
796
+ The helper supports normal objects and Zod schemas (which are resolved automatically).
797
+ It always recurses into arrays.
798
+
799
+ Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
800
+ */
801
+ type DeepPartial<T> = T extends z.ZodTypeAny ? DeepPartialInternal<z.infer<T>> : DeepPartialInternal<T>;
802
+ type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
803
+ type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
804
+ type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
805
+ type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
806
+ type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
807
+ type PartialObject<ObjectType extends object> = {
808
+ [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
809
+ };
810
+
811
+ declare function extractMaxToolInvocationStep(toolInvocations: ToolInvocation[] | undefined): number | undefined;
812
+
813
+ declare function fillMessageParts(messages: Message[]): UIMessage[];
814
+
815
+ declare function getMessageParts(message: Message | CreateMessage | UIMessage): (TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart)[];
816
+
817
+ /**
818
+ * Performs a deep-equal comparison of two parsed JSON objects.
819
+ *
820
+ * @param {any} obj1 - The first object to compare.
821
+ * @param {any} obj2 - The second object to compare.
822
+ * @returns {boolean} - Returns true if the two objects are deeply equal, false otherwise.
823
+ */
824
+ declare function isDeepEqualData(obj1: any, obj2: any): boolean;
825
+
826
+ declare function parsePartialJson(jsonText: string | undefined): {
827
+ value: JSONValue$1 | undefined;
828
+ state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
829
+ };
830
+
831
+ declare function prepareAttachmentsForRequest(attachmentsFromOptions: FileList | Array<Attachment> | undefined): Promise<Attachment[]>;
832
+
833
+ declare function processDataStream({ stream, onTextPart, onReasoningPart, onReasoningSignaturePart, onRedactedReasoningPart, onSourcePart, onFilePart, onDataPart, onErrorPart, onToolCallStreamingStartPart, onToolCallDeltaPart, onToolCallPart, onToolResultPart, onMessageAnnotationsPart, onFinishMessagePart, onFinishStepPart, onStartStepPart, }: {
834
+ stream: ReadableStream<Uint8Array>;
835
+ onTextPart?: (streamPart: (DataStreamPartType & {
836
+ type: 'text';
837
+ })['value']) => Promise<void> | void;
838
+ onReasoningPart?: (streamPart: (DataStreamPartType & {
839
+ type: 'reasoning';
840
+ })['value']) => Promise<void> | void;
841
+ onReasoningSignaturePart?: (streamPart: (DataStreamPartType & {
842
+ type: 'reasoning_signature';
843
+ })['value']) => Promise<void> | void;
844
+ onRedactedReasoningPart?: (streamPart: (DataStreamPartType & {
845
+ type: 'redacted_reasoning';
846
+ })['value']) => Promise<void> | void;
847
+ onFilePart?: (streamPart: (DataStreamPartType & {
848
+ type: 'file';
849
+ })['value']) => Promise<void> | void;
850
+ onSourcePart?: (streamPart: (DataStreamPartType & {
851
+ type: 'source';
852
+ })['value']) => Promise<void> | void;
853
+ onDataPart?: (streamPart: (DataStreamPartType & {
854
+ type: 'data';
855
+ })['value']) => Promise<void> | void;
856
+ onErrorPart?: (streamPart: (DataStreamPartType & {
857
+ type: 'error';
858
+ })['value']) => Promise<void> | void;
859
+ onToolCallStreamingStartPart?: (streamPart: (DataStreamPartType & {
860
+ type: 'tool_call_streaming_start';
861
+ })['value']) => Promise<void> | void;
862
+ onToolCallDeltaPart?: (streamPart: (DataStreamPartType & {
863
+ type: 'tool_call_delta';
864
+ })['value']) => Promise<void> | void;
865
+ onToolCallPart?: (streamPart: (DataStreamPartType & {
866
+ type: 'tool_call';
867
+ })['value']) => Promise<void> | void;
868
+ onToolResultPart?: (streamPart: (DataStreamPartType & {
869
+ type: 'tool_result';
870
+ })['value']) => Promise<void> | void;
871
+ onMessageAnnotationsPart?: (streamPart: (DataStreamPartType & {
872
+ type: 'message_annotations';
873
+ })['value']) => Promise<void> | void;
874
+ onFinishMessagePart?: (streamPart: (DataStreamPartType & {
875
+ type: 'finish_message';
876
+ })['value']) => Promise<void> | void;
877
+ onFinishStepPart?: (streamPart: (DataStreamPartType & {
878
+ type: 'finish_step';
879
+ })['value']) => Promise<void> | void;
880
+ onStartStepPart?: (streamPart: (DataStreamPartType & {
881
+ type: 'start_step';
882
+ })['value']) => Promise<void> | void;
883
+ }): Promise<void>;
884
+
885
+ declare function processTextStream({ stream, onTextPart, }: {
886
+ stream: ReadableStream<Uint8Array>;
887
+ onTextPart: (chunk: string) => Promise<void> | void;
888
+ }): Promise<void>;
889
+
890
+ /**
891
+ * Used to mark schemas so we can support both Zod and custom schemas.
892
+ */
893
+ declare const schemaSymbol: unique symbol;
894
+ type Schema<OBJECT = unknown> = Validator<OBJECT> & {
895
+ /**
896
+ * Used to mark schemas so we can support both Zod and custom schemas.
897
+ */
898
+ [schemaSymbol]: true;
899
+ /**
900
+ * Schema type for inference.
901
+ */
902
+ _type: OBJECT;
903
+ /**
904
+ * The JSON Schema for the schema. It is passed to the providers.
905
+ */
906
+ readonly jsonSchema: JSONSchema7;
907
+ };
908
+ /**
909
+ * Create a schema using a JSON Schema.
910
+ *
911
+ * @param jsonSchema The JSON Schema for the schema.
912
+ * @param options.validate Optional. A validation function for the schema.
913
+ */
914
+ declare function jsonSchema<OBJECT = unknown>(jsonSchema: JSONSchema7, { validate, }?: {
915
+ validate?: (value: unknown) => {
916
+ success: true;
917
+ value: OBJECT;
918
+ } | {
919
+ success: false;
920
+ error: Error;
921
+ };
922
+ }): Schema<OBJECT>;
923
+ declare function asSchema<OBJECT>(schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT> | undefined): Schema<OBJECT>;
924
+
925
+ declare function shouldResubmitMessages({ originalMaxToolInvocationStep, originalMessageCount, maxSteps, messages, }: {
926
+ originalMaxToolInvocationStep: number | undefined;
927
+ originalMessageCount: number;
928
+ maxSteps: number;
929
+ messages: UIMessage[];
930
+ }): boolean;
931
+ /**
932
+ Check if the message is an assistant message with completed tool calls.
933
+ The last step of the message must have at least one tool invocation and
934
+ all tool invocations must have a result.
935
+ */
936
+ declare function isAssistantMessageWithCompletedToolCalls(message: UIMessage): message is UIMessage & {
937
+ role: 'assistant';
938
+ };
939
+
940
+ /**
941
+ * Updates the result of a specific tool invocation in the last message of the given messages array.
942
+ *
943
+ * @param {object} params - The parameters object.
944
+ * @param {UIMessage[]} params.messages - An array of messages, from which the last one is updated.
945
+ * @param {string} params.toolCallId - The unique identifier for the tool invocation to update.
946
+ * @param {unknown} params.toolResult - The result object to attach to the tool invocation.
947
+ * @returns {void} This function does not return anything.
948
+ */
949
+ declare function updateToolCallResult({ messages, toolCallId, toolResult: result, }: {
950
+ messages: UIMessage[];
951
+ toolCallId: string;
952
+ toolResult: unknown;
953
+ }): void;
954
+
955
+ declare function zodSchema<OBJECT>(zodSchema: z.Schema<OBJECT, z.ZodTypeDef, any>, options?: {
956
+ /**
957
+ * Enables support for references in the schema.
958
+ * This is required for recursive schemas, e.g. with `z.lazy`.
959
+ * However, not all language models and providers support such references.
960
+ * Defaults to `false`.
961
+ */
962
+ useReferences?: boolean;
963
+ }): Schema<OBJECT>;
964
+
965
+ interface DataStreamWriter {
966
+ /**
967
+ * Appends a data part to the stream.
968
+ */
969
+ write(data: DataStreamString): void;
970
+ /**
971
+ * Appends a data part to the stream.
972
+ */
973
+ writeData(value: JSONValue$1): void;
974
+ /**
975
+ * Appends a message annotation to the stream.
976
+ */
977
+ writeMessageAnnotation(value: JSONValue$1): void;
978
+ /**
979
+ * Appends a source part to the stream.
980
+ */
981
+ writeSource(source: Source): void;
982
+ /**
983
+ * Merges the contents of another stream to this stream.
984
+ */
985
+ merge(stream: ReadableStream<DataStreamString>): void;
986
+ /**
987
+ * Error handler that is used by the data stream writer.
988
+ * This is intended for forwarding when merging streams
989
+ * to prevent duplicated error masking.
990
+ */
991
+ onError: ((error: unknown) => string) | undefined;
992
+ }
993
+
994
+ declare function createDataStream({ execute, onError, }: {
995
+ execute: (dataStream: DataStreamWriter) => Promise<void> | void;
996
+ onError?: (error: unknown) => string;
997
+ }): ReadableStream<DataStreamString>;
998
+
999
+ declare function createDataStreamResponse({ status, statusText, headers, execute, onError, }: ResponseInit & {
1000
+ execute: (dataStream: DataStreamWriter) => Promise<void> | void;
1001
+ onError?: (error: unknown) => string;
1002
+ }): Response;
1003
+
1004
+ declare function pipeDataStreamToResponse(response: ServerResponse, { status, statusText, headers, execute, onError, }: ResponseInit & {
1005
+ execute: (writer: DataStreamWriter) => Promise<void> | void;
1006
+ onError?: (error: unknown) => string;
1007
+ }): void;
251
1008
 
252
1009
  /**
253
- Represents the number of tokens used in a prompt and completion.
1010
+ * Telemetry configuration.
254
1011
  */
255
- type LanguageModelUsage = {
1012
+ type TelemetrySettings = {
256
1013
  /**
257
- The number of tokens used in the prompt.
1014
+ * Enable or disable telemetry. Disabled by default while experimental.
258
1015
  */
259
- promptTokens: number;
1016
+ isEnabled?: boolean;
260
1017
  /**
261
- The number of tokens used in the completion.
262
- */
263
- completionTokens: number;
1018
+ * Enable or disable input recording. Enabled by default.
1019
+ *
1020
+ * You might want to disable input recording to avoid recording sensitive
1021
+ * information, to reduce data transfers, or to increase performance.
1022
+ */
1023
+ recordInputs?: boolean;
264
1024
  /**
265
- The total number of tokens used (promptTokens + completionTokens).
1025
+ * Enable or disable output recording. Enabled by default.
1026
+ *
1027
+ * You might want to disable output recording to avoid recording sensitive
1028
+ * information, to reduce data transfers, or to increase performance.
266
1029
  */
267
- totalTokens: number;
268
- };
269
- /**
270
- Represents the number of tokens used in an embedding.
271
- */
272
- type EmbeddingModelUsage = {
1030
+ recordOutputs?: boolean;
273
1031
  /**
274
- The number of tokens used in the embedding.
1032
+ * Identifier for this function. Used to group telemetry data by function.
275
1033
  */
276
- tokens: number;
1034
+ functionId?: string;
1035
+ /**
1036
+ * Additional information to include in the telemetry data.
1037
+ */
1038
+ metadata?: Record<string, AttributeValue>;
1039
+ /**
1040
+ * A custom tracer to use for the telemetry data.
1041
+ */
1042
+ tracer?: Tracer;
277
1043
  };
278
1044
 
279
1045
  /**
@@ -294,13 +1060,17 @@ interface EmbedResult<VALUE> {
294
1060
  */
295
1061
  readonly usage: EmbeddingModelUsage;
296
1062
  /**
297
- Optional raw response data.
1063
+ Optional response data.
298
1064
  */
299
- readonly rawResponse?: {
1065
+ readonly response?: {
300
1066
  /**
301
1067
  Response headers.
302
1068
  */
303
1069
  headers?: Record<string, string>;
1070
+ /**
1071
+ The response body.
1072
+ */
1073
+ body?: unknown;
304
1074
  };
305
1075
  }
306
1076
 
@@ -316,7 +1086,7 @@ Embed a value using an embedding model. The type of the value is defined by the
316
1086
 
317
1087
  @returns A result object that contains the embedding, the value, and additional information.
318
1088
  */
319
- declare function embed<VALUE>({ model, value, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
1089
+ declare function embed<VALUE>({ model, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
320
1090
  /**
321
1091
  The embedding model to use.
322
1092
  */
@@ -340,6 +1110,12 @@ declare function embed<VALUE>({ model, value, maxRetries: maxRetriesArg, abortSi
340
1110
  Only applicable for HTTP-based providers.
341
1111
  */
342
1112
  headers?: Record<string, string>;
1113
+ /**
1114
+ Additional provider-specific options. They are passed through
1115
+ to the provider from the AI SDK and enable provider-specific
1116
+ functionality that can be fully encapsulated in the provider.
1117
+ */
1118
+ providerOptions?: ProviderOptions;
343
1119
  /**
344
1120
  * Optional telemetry configuration (experimental).
345
1121
  */
@@ -363,6 +1139,19 @@ interface EmbedManyResult<VALUE> {
363
1139
  The embedding token usage.
364
1140
  */
365
1141
  readonly usage: EmbeddingModelUsage;
1142
+ /**
1143
+ Optional raw response data.
1144
+ */
1145
+ readonly responses?: Array<{
1146
+ /**
1147
+ Response headers.
1148
+ */
1149
+ headers?: Record<string, string>;
1150
+ /**
1151
+ The response body.
1152
+ */
1153
+ body?: unknown;
1154
+ } | undefined>;
366
1155
  }
367
1156
 
368
1157
  /**
@@ -381,7 +1170,7 @@ has a limit on how many embeddings can be generated in a single call.
381
1170
 
382
1171
  @returns A result object that contains the embeddings, the value, and additional information.
383
1172
  */
384
- declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
1173
+ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
385
1174
  /**
386
1175
  The embedding model to use.
387
1176
  */
@@ -409,13 +1198,19 @@ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, ab
409
1198
  * Optional telemetry configuration (experimental).
410
1199
  */
411
1200
  experimental_telemetry?: TelemetrySettings;
1201
+ /**
1202
+ Additional provider-specific options. They are passed through
1203
+ to the provider from the AI SDK and enable provider-specific
1204
+ functionality that can be fully encapsulated in the provider.
1205
+ */
1206
+ providerOptions?: ProviderOptions;
412
1207
  }): Promise<EmbedManyResult<VALUE>>;
413
1208
 
414
1209
  type CallSettings = {
415
1210
  /**
416
1211
  Maximum number of tokens to generate.
417
1212
  */
418
- maxTokens?: number;
1213
+ maxOutputTokens?: number;
419
1214
  /**
420
1215
  Temperature setting. This is a number between 0 (almost no randomness) and
421
1216
  1 (very random).
@@ -496,6 +1291,10 @@ type ToolResultContent = Array<{
496
1291
  } | {
497
1292
  type: 'image';
498
1293
  data: string;
1294
+ mediaType?: string;
1295
+ /**
1296
+ * @deprecated Use `mediaType` instead.
1297
+ */
499
1298
  mimeType?: string;
500
1299
  }>;
501
1300
 
@@ -514,10 +1313,6 @@ interface TextPart {
514
1313
  functionality that can be fully encapsulated in the provider.
515
1314
  */
516
1315
  providerOptions?: ProviderOptions;
517
- /**
518
- @deprecated Use `providerOptions` instead.
519
- */
520
- experimental_providerMetadata?: ProviderMetadata;
521
1316
  }
522
1317
  /**
523
1318
  Image content part of a prompt. It contains an image.
@@ -532,7 +1327,13 @@ interface ImagePart {
532
1327
  */
533
1328
  image: DataContent | URL;
534
1329
  /**
535
- Optional mime type of the image.
1330
+ Optional IANA media type of the image.
1331
+
1332
+ @see https://www.iana.org/assignments/media-types/media-types.xhtml
1333
+ */
1334
+ mediaType?: string;
1335
+ /**
1336
+ @deprecated Use `mediaType` instead.
536
1337
  */
537
1338
  mimeType?: string;
538
1339
  /**
@@ -541,10 +1342,6 @@ interface ImagePart {
541
1342
  functionality that can be fully encapsulated in the provider.
542
1343
  */
543
1344
  providerOptions?: ProviderOptions;
544
- /**
545
- @deprecated Use `providerOptions` instead.
546
- */
547
- experimental_providerMetadata?: ProviderMetadata;
548
1345
  }
549
1346
  /**
550
1347
  File content part of a prompt. It contains a file.
@@ -563,19 +1360,21 @@ interface FilePart {
563
1360
  */
564
1361
  filename?: string;
565
1362
  /**
566
- Mime type of the file.
1363
+ IANA media type of the file.
1364
+
1365
+ @see https://www.iana.org/assignments/media-types/media-types.xhtml
567
1366
  */
568
- mimeType: string;
1367
+ mediaType: string;
1368
+ /**
1369
+ @deprecated Use `mediaType` instead.
1370
+ */
1371
+ mimeType?: string;
569
1372
  /**
570
1373
  Additional provider-specific metadata. They are passed through
571
1374
  to the provider from the AI SDK and enable provider-specific
572
1375
  functionality that can be fully encapsulated in the provider.
573
1376
  */
574
1377
  providerOptions?: ProviderOptions;
575
- /**
576
- @deprecated Use `providerOptions` instead.
577
- */
578
- experimental_providerMetadata?: ProviderMetadata;
579
1378
  }
580
1379
  /**
581
1380
  * Reasoning content part of a prompt. It contains a reasoning.
@@ -596,10 +1395,6 @@ interface ReasoningPart {
596
1395
  functionality that can be fully encapsulated in the provider.
597
1396
  */
598
1397
  providerOptions?: ProviderOptions;
599
- /**
600
- @deprecated Use `providerOptions` instead.
601
- */
602
- experimental_providerMetadata?: ProviderMetadata;
603
1398
  }
604
1399
  /**
605
1400
  Redacted reasoning content part of a prompt.
@@ -616,10 +1411,6 @@ interface RedactedReasoningPart {
616
1411
  functionality that can be fully encapsulated in the provider.
617
1412
  */
618
1413
  providerOptions?: ProviderOptions;
619
- /**
620
- @deprecated Use `providerOptions` instead.
621
- */
622
- experimental_providerMetadata?: ProviderMetadata;
623
1414
  }
624
1415
  /**
625
1416
  Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
@@ -644,10 +1435,6 @@ interface ToolCallPart {
644
1435
  functionality that can be fully encapsulated in the provider.
645
1436
  */
646
1437
  providerOptions?: ProviderOptions;
647
- /**
648
- @deprecated Use `providerOptions` instead.
649
- */
650
- experimental_providerMetadata?: ProviderMetadata;
651
1438
  }
652
1439
  /**
653
1440
  Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
@@ -680,10 +1467,6 @@ interface ToolResultPart {
680
1467
  functionality that can be fully encapsulated in the provider.
681
1468
  */
682
1469
  providerOptions?: ProviderOptions;
683
- /**
684
- @deprecated Use `providerOptions` instead.
685
- */
686
- experimental_providerMetadata?: ProviderMetadata;
687
1470
  }
688
1471
 
689
1472
  /**
@@ -702,10 +1485,6 @@ type CoreSystemMessage = {
702
1485
  functionality that can be fully encapsulated in the provider.
703
1486
  */
704
1487
  providerOptions?: ProviderOptions;
705
- /**
706
- @deprecated Use `providerOptions` instead.
707
- */
708
- experimental_providerMetadata?: ProviderMetadata;
709
1488
  };
710
1489
  declare const coreSystemMessageSchema: z.ZodType<CoreSystemMessage>;
711
1490
  /**
@@ -720,10 +1499,6 @@ type CoreUserMessage = {
720
1499
  functionality that can be fully encapsulated in the provider.
721
1500
  */
722
1501
  providerOptions?: ProviderOptions;
723
- /**
724
- @deprecated Use `providerOptions` instead.
725
- */
726
- experimental_providerMetadata?: ProviderMetadata;
727
1502
  };
728
1503
  declare const coreUserMessageSchema: z.ZodType<CoreUserMessage>;
729
1504
  /**
@@ -742,10 +1517,6 @@ type CoreAssistantMessage = {
742
1517
  functionality that can be fully encapsulated in the provider.
743
1518
  */
744
1519
  providerOptions?: ProviderOptions;
745
- /**
746
- @deprecated Use `providerOptions` instead.
747
- */
748
- experimental_providerMetadata?: ProviderMetadata;
749
1520
  };
750
1521
  declare const coreAssistantMessageSchema: z.ZodType<CoreAssistantMessage>;
751
1522
  /**
@@ -765,10 +1536,6 @@ type CoreToolMessage = {
765
1536
  functionality that can be fully encapsulated in the provider.
766
1537
  */
767
1538
  providerOptions?: ProviderOptions;
768
- /**
769
- @deprecated Use `providerOptions` instead.
770
- */
771
- experimental_providerMetadata?: ProviderMetadata;
772
1539
  };
773
1540
  declare const coreToolMessageSchema: z.ZodType<CoreToolMessage>;
774
1541
  /**
@@ -814,12 +1581,14 @@ interface GeneratedFile {
814
1581
  */
815
1582
  readonly uint8Array: Uint8Array;
816
1583
  /**
817
- MIME type of the file
1584
+ The IANA media type of the file.
1585
+
1586
+ @see https://www.iana.org/assignments/media-types/media-types.xhtml
818
1587
  */
819
- readonly mimeType: string;
1588
+ readonly mediaType: string;
820
1589
  }
821
1590
 
822
- type ReasoningDetail = {
1591
+ type Reasoning = {
823
1592
  type: 'text';
824
1593
  text: string;
825
1594
  signature?: string;
@@ -828,133 +1597,6 @@ type ReasoningDetail = {
828
1597
  data: string;
829
1598
  };
830
1599
 
831
- type ToolParameters = z.ZodTypeAny | Schema<any>;
832
- type inferParameters<PARAMETERS extends ToolParameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
833
- interface ToolExecutionOptions {
834
- /**
835
- * The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
836
- */
837
- toolCallId: string;
838
- /**
839
- * Messages that were sent to the language model to initiate the response that contained the tool call.
840
- * The messages **do not** include the system prompt nor the assistant response that contained the tool call.
841
- */
842
- messages: CoreMessage[];
843
- /**
844
- * An optional abort signal that indicates that the overall operation should be aborted.
845
- */
846
- abortSignal?: AbortSignal;
847
- }
848
- /**
849
- A tool contains the description and the schema of the input that the tool expects.
850
- This enables the language model to generate the input.
851
-
852
- The tool can also contain an optional execute function for the actual execution function of the tool.
853
- */
854
- type Tool<PARAMETERS extends ToolParameters = any, RESULT = any> = {
855
- /**
856
- The schema of the input that the tool expects. The language model will use this to generate the input.
857
- It is also used to validate the output of the language model.
858
- Use descriptions to make the input understandable for the language model.
859
- */
860
- parameters: PARAMETERS;
861
- /**
862
- An optional description of what the tool does.
863
- Will be used by the language model to decide whether to use the tool.
864
- Not used for provider-defined tools.
865
- */
866
- description?: string;
867
- /**
868
- Optional conversion function that maps the tool result to multi-part tool content for LLMs.
869
- */
870
- experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;
871
- /**
872
- An async function that is called with the arguments from the tool call and produces a result.
873
- If not provided, the tool will not be executed automatically.
874
-
875
- @args is the input of the tool call.
876
- @options.abortSignal is a signal that can be used to abort the tool call.
877
- */
878
- execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
879
- } & ({
880
- /**
881
- Function tool.
882
- */
883
- type?: undefined | 'function';
884
- } | {
885
- /**
886
- Provider-defined tool.
887
- */
888
- type: 'provider-defined';
889
- /**
890
- The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
891
- */
892
- id: `${string}.${string}`;
893
- /**
894
- The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
895
- */
896
- args: Record<string, unknown>;
897
- });
898
- /**
899
- * @deprecated Use `Tool` instead.
900
- */
901
- type CoreTool<PARAMETERS extends ToolParameters = any, RESULT = any> = Tool<PARAMETERS, RESULT>;
902
- /**
903
- Helper function for inferring the execute args of a tool.
904
- */
905
- declare function tool<PARAMETERS extends ToolParameters, RESULT>(tool: Tool<PARAMETERS, RESULT> & {
906
- execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
907
- }): Tool<PARAMETERS, RESULT> & {
908
- execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
909
- };
910
- declare function tool<PARAMETERS extends ToolParameters, RESULT>(tool: Tool<PARAMETERS, RESULT> & {
911
- execute?: undefined;
912
- }): Tool<PARAMETERS, RESULT> & {
913
- execute: undefined;
914
- };
915
-
916
- /**
917
- Create a union of the given object's values, and optionally specify which keys to get the values from.
918
-
919
- Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
920
-
921
- @example
922
- ```
923
- // data.json
924
- {
925
- 'foo': 1,
926
- 'bar': 2,
927
- 'biz': 3
928
- }
929
-
930
- // main.ts
931
- import type {ValueOf} from 'type-fest';
932
- import data = require('./data.json');
933
-
934
- export function getData(name: string): ValueOf<typeof data> {
935
- return data[name];
936
- }
937
-
938
- export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
939
- return data[name];
940
- }
941
-
942
- // file.ts
943
- import {getData, onlyBar} from './main';
944
-
945
- getData('foo');
946
- //=> 1
947
-
948
- onlyBar('foo');
949
- //=> TypeError ...
950
-
951
- onlyBar('bar');
952
- //=> 2
953
- ```
954
- * @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
955
- */
956
- type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
957
-
958
1600
  declare const JSONRPCRequestSchema: z.ZodObject<z.objectUtil.extendShape<{
959
1601
  jsonrpc: z.ZodLiteral<"2.0">;
960
1602
  id: z.ZodUnion<[z.ZodString, z.ZodNumber]>;
@@ -1218,25 +1860,99 @@ type MCPTransportConfig = {
1218
1860
  */
1219
1861
  url: string;
1220
1862
  /**
1221
- * Additional HTTP headers to be sent with requests.
1222
- */
1223
- headers?: Record<string, string>;
1224
- };
1863
+ * Additional HTTP headers to be sent with requests.
1864
+ */
1865
+ headers?: Record<string, string>;
1866
+ };
1867
+
1868
+ type ToolParameters<T = JSONObject> = z.Schema<T> | Schema<T>;
1869
+ interface ToolExecutionOptions {
1870
+ /**
1871
+ * The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
1872
+ */
1873
+ toolCallId: string;
1874
+ /**
1875
+ * Messages that were sent to the language model to initiate the response that contained the tool call.
1876
+ * The messages **do not** include the system prompt nor the assistant response that contained the tool call.
1877
+ */
1878
+ messages: CoreMessage[];
1879
+ /**
1880
+ * An optional abort signal that indicates that the overall operation should be aborted.
1881
+ */
1882
+ abortSignal?: AbortSignal;
1883
+ }
1884
+ type NeverOptional<N, T> = 0 extends 1 & N ? Partial<T> : [N] extends [never] ? Partial<Record<keyof T, undefined>> : T;
1885
+ /**
1886
+ A tool contains the description and the schema of the input that the tool expects.
1887
+ This enables the language model to generate the input.
1888
+
1889
+ The tool can also contain an optional execute function for the actual execution function of the tool.
1890
+ */
1891
+ type Tool<PARAMETERS extends JSONValue$1 | unknown | never = any, RESULT = any> = {
1892
+ /**
1893
+ An optional description of what the tool does.
1894
+ Will be used by the language model to decide whether to use the tool.
1895
+ Not used for provider-defined tools.
1896
+ */
1897
+ description?: string;
1898
+ } & NeverOptional<PARAMETERS, {
1899
+ /**
1900
+ The schema of the input that the tool expects. The language model will use this to generate the input.
1901
+ It is also used to validate the output of the language model.
1902
+ Use descriptions to make the input understandable for the language model.
1903
+ */
1904
+ parameters: ToolParameters<PARAMETERS>;
1905
+ }> & NeverOptional<RESULT, {
1906
+ /**
1907
+ An async function that is called with the arguments from the tool call and produces a result.
1908
+ If not provided, the tool will not be executed automatically.
1909
+
1910
+ @args is the input of the tool call.
1911
+ @options.abortSignal is a signal that can be used to abort the tool call.
1912
+ */
1913
+ execute: (args: [PARAMETERS] extends [never] ? undefined : PARAMETERS, options: ToolExecutionOptions) => PromiseLike<RESULT>;
1914
+ /**
1915
+ Optional conversion function that maps the tool result to multi-part tool content for LLMs.
1916
+ */
1917
+ experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;
1918
+ }> & ({
1919
+ /**
1920
+ Function tool.
1921
+ */
1922
+ type?: undefined | 'function';
1923
+ } | {
1924
+ /**
1925
+ Provider-defined tool.
1926
+ */
1927
+ type: 'provider-defined';
1928
+ /**
1929
+ The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
1930
+ */
1931
+ id: `${string}.${string}`;
1932
+ /**
1933
+ The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
1934
+ */
1935
+ args: Record<string, unknown>;
1936
+ });
1937
+ /**
1938
+ Helper function for inferring the execute args of a tool.
1939
+ */
1940
+ declare function tool(tool: Tool<never, never>): Tool<never, never>;
1941
+ declare function tool<PARAMETERS>(tool: Tool<PARAMETERS, never>): Tool<PARAMETERS, never>;
1942
+ declare function tool<RESULT>(tool: Tool<never, RESULT>): Tool<never, RESULT>;
1943
+ declare function tool<PARAMETERS, RESULT>(tool: Tool<PARAMETERS, RESULT>): Tool<PARAMETERS, RESULT>;
1944
+ type MappedTool<T extends Tool | JSONObject, RESULT extends any> = T extends Tool<infer P> ? Tool<P, RESULT> : T extends JSONObject ? Tool<T, RESULT> : never;
1225
1945
 
1226
1946
  type ToolSchemas = Record<string, {
1227
- parameters: ToolParameters;
1947
+ parameters: ToolParameters<JSONObject | unknown>;
1228
1948
  }> | 'automatic' | undefined;
1229
1949
  type McpToolSet<TOOL_SCHEMAS extends ToolSchemas = 'automatic'> = TOOL_SCHEMAS extends Record<string, {
1230
- parameters: ToolParameters;
1950
+ parameters: ToolParameters<any>;
1231
1951
  }> ? {
1232
- [K in keyof TOOL_SCHEMAS]: Tool<TOOL_SCHEMAS[K]['parameters'], CallToolResult> & {
1233
- execute: (args: inferParameters<TOOL_SCHEMAS[K]['parameters']>, options: ToolExecutionOptions) => PromiseLike<CallToolResult>;
1234
- };
1235
- } : {
1236
- [k: string]: Tool<z.ZodUnknown, CallToolResult> & {
1237
- execute: (args: unknown, options: ToolExecutionOptions) => PromiseLike<CallToolResult>;
1238
- };
1239
- };
1952
+ [K in keyof TOOL_SCHEMAS]: MappedTool<TOOL_SCHEMAS[K], CallToolResult> & Required<Pick<MappedTool<TOOL_SCHEMAS[K], CallToolResult>, 'execute'>>;
1953
+ } : McpToolSet<Record<string, {
1954
+ parameters: ToolParameters<unknown>;
1955
+ }>>;
1240
1956
  declare const CallToolResultSchema: z.ZodUnion<[z.ZodObject<z.objectUtil.extendShape<{
1241
1957
  _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
1242
1958
  }, {
@@ -2000,27 +2716,60 @@ declare class MCPClient {
2000
2716
  private onResponse;
2001
2717
  }
2002
2718
 
2003
- type ToolSet = Record<string, Tool>;
2719
+ /**
2720
+ Create a union of the given object's values, and optionally specify which keys to get the values from.
2721
+
2722
+ Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
2723
+
2724
+ @example
2725
+ ```
2726
+ // data.json
2727
+ {
2728
+ 'foo': 1,
2729
+ 'bar': 2,
2730
+ 'biz': 3
2731
+ }
2732
+
2733
+ // main.ts
2734
+ import type {ValueOf} from 'type-fest';
2735
+ import data = require('./data.json');
2736
+
2737
+ export function getData(name: string): ValueOf<typeof data> {
2738
+ return data[name];
2739
+ }
2740
+
2741
+ export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
2742
+ return data[name];
2743
+ }
2744
+
2745
+ // file.ts
2746
+ import {getData, onlyBar} from './main';
2747
+
2748
+ getData('foo');
2749
+ //=> 1
2750
+
2751
+ onlyBar('foo');
2752
+ //=> TypeError ...
2753
+
2754
+ onlyBar('bar');
2755
+ //=> 2
2756
+ ```
2757
+ * @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
2758
+ */
2759
+ type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
2760
+
2761
+ type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute'>>;
2004
2762
 
2005
2763
  type ToolCallUnion<TOOLS extends ToolSet> = ValueOf<{
2006
2764
  [NAME in keyof TOOLS]: {
2007
2765
  type: 'tool-call';
2008
2766
  toolCallId: string;
2009
2767
  toolName: NAME & string;
2010
- args: inferParameters<TOOLS[NAME]['parameters']>;
2768
+ args: TOOLS[NAME] extends Tool<infer PARAMETERS> ? PARAMETERS : never;
2011
2769
  };
2012
2770
  }>;
2013
- /**
2014
- * @deprecated Use `ToolCallUnion` instead.
2015
- */
2016
- type CoreToolCallUnion<TOOLS extends ToolSet> = ToolCallUnion<ToolSet>;
2017
2771
  type ToolCallArray<TOOLS extends ToolSet> = Array<ToolCallUnion<TOOLS>>;
2018
2772
 
2019
- type ToToolsWithExecute<TOOLS extends ToolSet> = {
2020
- [K in keyof TOOLS as TOOLS[K] extends {
2021
- execute: any;
2022
- } ? K : never]: TOOLS[K];
2023
- };
2024
2773
  type ToToolsWithDefinedExecute<TOOLS extends ToolSet> = {
2025
2774
  [K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
2026
2775
  };
@@ -2029,15 +2778,11 @@ type ToToolResultObject<TOOLS extends ToolSet> = ValueOf<{
2029
2778
  type: 'tool-result';
2030
2779
  toolCallId: string;
2031
2780
  toolName: NAME & string;
2032
- args: inferParameters<TOOLS[NAME]['parameters']>;
2781
+ args: TOOLS[NAME] extends Tool<infer P> ? P : never;
2033
2782
  result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
2034
2783
  };
2035
2784
  }>;
2036
- type ToolResultUnion<TOOLS extends ToolSet> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
2037
- /**
2038
- * @deprecated Use `ToolResultUnion` instead.
2039
- */
2040
- type CoreToolResultUnion<TOOLS extends ToolSet> = ToolResultUnion<TOOLS>;
2785
+ type ToolResultUnion<TOOLS extends ToolSet> = ToToolResultObject<ToToolsWithDefinedExecute<TOOLS>>;
2041
2786
  type ToolResultArray<TOOLS extends ToolSet> = Array<ToolResultUnion<TOOLS>>;
2042
2787
 
2043
2788
  /**
@@ -2061,8 +2806,11 @@ type StepResult<TOOLS extends ToolSet> = {
2061
2806
  /**
2062
2807
  The reasoning that was generated during the generation.
2063
2808
  */
2064
- readonly reasoning: string | undefined;
2065
- readonly reasoningDetails: Array<ReasoningDetail>;
2809
+ readonly reasoning: Array<Reasoning>;
2810
+ /**
2811
+ The reasoning text that was generated during the generation.
2812
+ */
2813
+ readonly reasoningText: string | undefined;
2066
2814
  /**
2067
2815
  The files that were generated during the generation.
2068
2816
  */
@@ -2086,7 +2834,7 @@ type StepResult<TOOLS extends ToolSet> = {
2086
2834
  /**
2087
2835
  The token usage of the generated text.
2088
2836
  */
2089
- readonly usage: LanguageModelUsage;
2837
+ readonly usage: LanguageModelUsage$1;
2090
2838
  /**
2091
2839
  Warnings from the model provider (e.g. unsupported settings).
2092
2840
  */
@@ -2122,10 +2870,6 @@ type StepResult<TOOLS extends ToolSet> = {
2122
2870
  */
2123
2871
  readonly providerMetadata: ProviderMetadata | undefined;
2124
2872
  /**
2125
- @deprecated Use `providerMetadata` instead.
2126
- */
2127
- readonly experimental_providerMetadata: ProviderMetadata | undefined;
2128
- /**
2129
2873
  The type of step that this result is for. The first step is always
2130
2874
  an "initial" step, and subsequent steps are either "continue" steps
2131
2875
  or "tool-result" steps.
@@ -2147,19 +2891,19 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
2147
2891
  */
2148
2892
  readonly text: string;
2149
2893
  /**
2894
+ The full reasoning that the model has generated.
2895
+ */
2896
+ readonly reasoning: Array<Reasoning>;
2897
+ /**
2150
2898
  The reasoning text that the model has generated. Can be undefined if the model
2151
2899
  has only generated text.
2152
2900
  */
2153
- readonly reasoning: string | undefined;
2901
+ readonly reasoningText: string | undefined;
2154
2902
  /**
2155
2903
  The files that were generated. Empty array if no files were generated.
2156
2904
  */
2157
2905
  readonly files: Array<GeneratedFile>;
2158
2906
  /**
2159
- The full reasoning that the model has generated.
2160
- */
2161
- readonly reasoningDetails: Array<ReasoningDetail>;
2162
- /**
2163
2907
  Sources that have been used as input to generate the response.
2164
2908
  For multi-step generation, the sources are accumulated from all steps.
2165
2909
  */
@@ -2183,7 +2927,7 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
2183
2927
  /**
2184
2928
  The token usage of the generated text.
2185
2929
  */
2186
- readonly usage: LanguageModelUsage;
2930
+ readonly usage: LanguageModelUsage$1;
2187
2931
  /**
2188
2932
  Warnings from the model provider (e.g. unsupported settings)
2189
2933
  */
@@ -2229,13 +2973,9 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
2229
2973
  results that can be fully encapsulated in the provider.
2230
2974
  */
2231
2975
  readonly providerMetadata: ProviderMetadata | undefined;
2232
- /**
2233
- @deprecated Use `providerMetadata` instead.
2234
- */
2235
- readonly experimental_providerMetadata: ProviderMetadata | undefined;
2236
2976
  }
2237
2977
 
2238
- interface Output<OUTPUT, PARTIAL> {
2978
+ interface Output$1<OUTPUT, PARTIAL> {
2239
2979
  readonly type: 'object' | 'text';
2240
2980
  injectIntoSystemPrompt(options: {
2241
2981
  system: string | undefined;
@@ -2253,21 +2993,20 @@ interface Output<OUTPUT, PARTIAL> {
2253
2993
  text: string;
2254
2994
  }, context: {
2255
2995
  response: LanguageModelResponseMetadata;
2256
- usage: LanguageModelUsage;
2996
+ usage: LanguageModelUsage$1;
2257
2997
  finishReason: FinishReason;
2258
2998
  }): OUTPUT;
2259
2999
  }
2260
- declare const text: () => Output<string, string>;
3000
+ declare const text: () => Output$1<string, string>;
2261
3001
  declare const object: <OUTPUT>({ schema: inputSchema, }: {
2262
3002
  schema: z.Schema<OUTPUT, z.ZodTypeDef, any> | Schema<OUTPUT>;
2263
- }) => Output<OUTPUT, DeepPartial<OUTPUT>>;
3003
+ }) => Output$1<OUTPUT, DeepPartial<OUTPUT>>;
2264
3004
 
2265
- type output_Output<OUTPUT, PARTIAL> = Output<OUTPUT, PARTIAL>;
2266
3005
  declare const output_object: typeof object;
2267
3006
  declare const output_text: typeof text;
2268
3007
  declare namespace output {
2269
3008
  export {
2270
- output_Output as Output,
3009
+ Output$1 as Output,
2271
3010
  output_object as object,
2272
3011
  output_text as text,
2273
3012
  };
@@ -2352,13 +3091,13 @@ declare function convertToCoreMessages<TOOLS extends ToolSet = never>(messages:
2352
3091
  type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
2353
3092
  system: string | undefined;
2354
3093
  messages: CoreMessage[];
2355
- toolCall: LanguageModelV2FunctionToolCall;
3094
+ toolCall: LanguageModelV2ToolCall;
2356
3095
  tools: TOOLS;
2357
3096
  parameterSchema: (options: {
2358
3097
  toolName: string;
2359
- }) => JSONSchema7;
3098
+ }) => JSONSchema7$1;
2360
3099
  error: NoSuchToolError | InvalidToolArgumentsError;
2361
- }) => Promise<LanguageModelV2FunctionToolCall | null>;
3100
+ }) => Promise<LanguageModelV2ToolCall | null>;
2362
3101
 
2363
3102
  /**
2364
3103
  Callback that is set using the `onStepFinish` option.
@@ -2380,7 +3119,7 @@ This function does not stream the output. If you want to stream the output, use
2380
3119
  @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2381
3120
  @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2382
3121
 
2383
- @param maxTokens - Maximum number of tokens to generate.
3122
+ @param maxOutputTokens - Maximum number of tokens to generate.
2384
3123
  @param temperature - Temperature setting.
2385
3124
  The value is passed through to the provider. The range depends on the provider and model.
2386
3125
  It is recommended to set either `temperature` or `topP`, but not both.
@@ -2413,7 +3152,7 @@ If set and supported by the model, calls will generate deterministic results.
2413
3152
  @returns
2414
3153
  A result object that contains the generated text, the results of the tool calls, and additional information.
2415
3154
  */
2416
- declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
3155
+ declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
2417
3156
  /**
2418
3157
  The language model to use.
2419
3158
  */
@@ -2455,10 +3194,6 @@ functionality that can be fully encapsulated in the provider.
2455
3194
  */
2456
3195
  providerOptions?: ProviderOptions;
2457
3196
  /**
2458
- @deprecated Use `providerOptions` instead.
2459
- */
2460
- experimental_providerMetadata?: ProviderMetadata;
2461
- /**
2462
3197
  Limits the tools that are available for the model to call without
2463
3198
  changing the tool call and result types in the result.
2464
3199
  */
@@ -2466,7 +3201,7 @@ changing the tool call and result types in the result.
2466
3201
  /**
2467
3202
  Optional specification for parsing structured outputs from the LLM response.
2468
3203
  */
2469
- experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
3204
+ experimental_output?: Output$1<OUTPUT, OUTPUT_PARTIAL>;
2470
3205
  /**
2471
3206
  A function that attempts to repair a tool call that failed to parse.
2472
3207
  */
@@ -2497,8 +3232,8 @@ declare class StreamData {
2497
3232
  private warningTimeout;
2498
3233
  constructor();
2499
3234
  close(): Promise<void>;
2500
- append(value: JSONValue$1): void;
2501
- appendMessageAnnotation(value: JSONValue$1): void;
3235
+ append(value: JSONValue): void;
3236
+ appendMessageAnnotation(value: JSONValue): void;
2502
3237
  }
2503
3238
 
2504
3239
  type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
@@ -2539,6 +3274,9 @@ type DataStreamOptions = {
2539
3274
  */
2540
3275
  experimental_sendStart?: boolean;
2541
3276
  };
3277
+ type ConsumeStreamOptions = {
3278
+ onError?: (error: unknown) => void;
3279
+ };
2542
3280
  /**
2543
3281
  A result object for accessing different stream types and additional information.
2544
3282
  */
@@ -2553,7 +3291,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
2553
3291
 
2554
3292
  Resolved when the response is finished.
2555
3293
  */
2556
- readonly usage: Promise<LanguageModelUsage>;
3294
+ readonly usage: Promise<LanguageModelUsage$1>;
2557
3295
  /**
2558
3296
  Sources that have been used as input to generate the response.
2559
3297
  For multi-step generation, the sources are accumulated from all steps.
@@ -2580,27 +3318,23 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
2580
3318
  */
2581
3319
  readonly providerMetadata: Promise<ProviderMetadata | undefined>;
2582
3320
  /**
2583
- @deprecated Use `providerMetadata` instead.
2584
- */
2585
- readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
2586
- /**
2587
3321
  The full text that has been generated by the last step.
2588
3322
 
2589
3323
  Resolved when the response is finished.
2590
3324
  */
2591
3325
  readonly text: Promise<string>;
2592
3326
  /**
2593
- The reasoning that has been generated by the last step.
3327
+ The full reasoning that the model has generated.
2594
3328
 
2595
3329
  Resolved when the response is finished.
2596
- */
2597
- readonly reasoning: Promise<string | undefined>;
3330
+ */
3331
+ readonly reasoning: Promise<Array<Reasoning>>;
2598
3332
  /**
2599
- The full reasoning that the model has generated.
3333
+ The reasoning that has been generated by the last step.
2600
3334
 
2601
3335
  Resolved when the response is finished.
2602
- */
2603
- readonly reasoningDetails: Promise<Array<ReasoningDetail>>;
3336
+ */
3337
+ readonly reasoningText: Promise<string | undefined>;
2604
3338
  /**
2605
3339
  The tool calls that have been executed in the last step.
2606
3340
 
@@ -2659,8 +3393,10 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
2659
3393
  This is useful to force the stream to finish.
2660
3394
  It effectively removes the backpressure and allows the stream to finish,
2661
3395
  triggering the `onFinish` callback and the promise resolution.
3396
+
3397
+ If an error occurs, it is passed to the optional `onError` callback.
2662
3398
  */
2663
- consumeStream(): Promise<void>;
3399
+ consumeStream(options?: ConsumeStreamOptions): Promise<void>;
2664
3400
  /**
2665
3401
  Converts the result to a data stream.
2666
3402
 
@@ -2735,23 +3471,26 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
2735
3471
  toTextStreamResponse(init?: ResponseInit): Response;
2736
3472
  }
2737
3473
  type TextStreamPart<TOOLS extends ToolSet> = {
2738
- type: 'text-delta';
2739
- textDelta: string;
3474
+ type: 'text';
3475
+ text: string;
2740
3476
  } | {
2741
3477
  type: 'reasoning';
2742
- textDelta: string;
3478
+ reasoningType: 'text';
3479
+ text: string;
2743
3480
  } | {
2744
- type: 'reasoning-signature';
3481
+ type: 'reasoning';
3482
+ reasoningType: 'signature';
2745
3483
  signature: string;
2746
3484
  } | {
2747
- type: 'redacted-reasoning';
3485
+ type: 'reasoning';
3486
+ reasoningType: 'redacted';
2748
3487
  data: string;
2749
- } | {
2750
- type: 'source';
2751
- source: Source;
2752
3488
  } | ({
3489
+ type: 'source';
3490
+ } & Source) | {
2753
3491
  type: 'file';
2754
- } & GeneratedFile) | ({
3492
+ file: GeneratedFile;
3493
+ } | ({
2755
3494
  type: 'tool-call';
2756
3495
  } & ToolCallUnion<TOOLS>) | {
2757
3496
  type: 'tool-call-streaming-start';
@@ -2776,23 +3515,15 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2776
3515
  request: LanguageModelRequestMetadata;
2777
3516
  warnings: CallWarning[] | undefined;
2778
3517
  response: LanguageModelResponseMetadata;
2779
- usage: LanguageModelUsage;
3518
+ usage: LanguageModelUsage$1;
2780
3519
  finishReason: FinishReason;
2781
3520
  providerMetadata: ProviderMetadata | undefined;
2782
- /**
2783
- * @deprecated Use `providerMetadata` instead.
2784
- */
2785
- experimental_providerMetadata?: ProviderMetadata;
2786
3521
  isContinued: boolean;
2787
3522
  } | {
2788
3523
  type: 'finish';
2789
3524
  finishReason: FinishReason;
2790
- usage: LanguageModelUsage;
3525
+ usage: LanguageModelUsage$1;
2791
3526
  providerMetadata: ProviderMetadata | undefined;
2792
- /**
2793
- * @deprecated Use `providerMetadata` instead.
2794
- */
2795
- experimental_providerMetadata?: ProviderMetadata;
2796
3527
  /**
2797
3528
  * @deprecated will be moved into provider metadata
2798
3529
  */
@@ -2866,7 +3597,7 @@ Callback that is set using the `onChunk` option.
2866
3597
  */
2867
3598
  type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
2868
3599
  chunk: Extract<TextStreamPart<TOOLS>, {
2869
- type: 'text-delta' | 'reasoning' | 'source' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
3600
+ type: 'text' | 'reasoning' | 'source' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
2870
3601
  }>;
2871
3602
  }) => Promise<void> | void;
2872
3603
  /**
@@ -2892,7 +3623,7 @@ This function streams the output. If you do not want to stream the output, use `
2892
3623
  @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2893
3624
  @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2894
3625
 
2895
- @param maxTokens - Maximum number of tokens to generate.
3626
+ @param maxOutputTokens - Maximum number of tokens to generate.
2896
3627
  @param temperature - Temperature setting.
2897
3628
  The value is passed through to the provider. The range depends on the provider and model.
2898
3629
  It is recommended to set either `temperature` or `topP`, but not both.
@@ -2929,7 +3660,7 @@ If set and supported by the model, calls will generate deterministic results.
2929
3660
  @return
2930
3661
  A result object for accessing different stream types and additional information.
2931
3662
  */
2932
- declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
3663
+ declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
2933
3664
  /**
2934
3665
  The language model to use.
2935
3666
  */
@@ -2971,10 +3702,6 @@ functionality that can be fully encapsulated in the provider.
2971
3702
  */
2972
3703
  providerOptions?: ProviderOptions;
2973
3704
  /**
2974
- @deprecated Use `providerOptions` instead.
2975
- */
2976
- experimental_providerMetadata?: ProviderMetadata;
2977
- /**
2978
3705
  Limits the tools that are available for the model to call without
2979
3706
  changing the tool call and result types in the result.
2980
3707
  */
@@ -2982,7 +3709,7 @@ changing the tool call and result types in the result.
2982
3709
  /**
2983
3710
  Optional specification for parsing structured outputs from the LLM response.
2984
3711
  */
2985
- experimental_output?: Output<OUTPUT, PARTIAL_OUTPUT>;
3712
+ experimental_output?: Output$1<OUTPUT, PARTIAL_OUTPUT>;
2986
3713
  /**
2987
3714
  A function that attempts to repair a tool call that failed to parse.
2988
3715
  */
@@ -3112,7 +3839,7 @@ declare function generateImage({ model, prompt, n, size, aspectRatio, seed, prov
3112
3839
  }
3113
3840
  ```
3114
3841
  */
3115
- providerOptions?: Record<string, Record<string, JSONValue>>;
3842
+ providerOptions?: Record<string, Record<string, JSONValue$1>>;
3116
3843
  /**
3117
3844
  Maximum number of retries per embedding model call. Set to 0 to disable retries.
3118
3845
 
@@ -3145,7 +3872,7 @@ interface GenerateObjectResult<OBJECT> {
3145
3872
  /**
3146
3873
  The token usage of the generated text.
3147
3874
  */
3148
- readonly usage: LanguageModelUsage;
3875
+ readonly usage: LanguageModelUsage$1;
3149
3876
  /**
3150
3877
  Warnings from the model provider (e.g. unsupported settings).
3151
3878
  */
@@ -3177,186 +3904,54 @@ interface GenerateObjectResult<OBJECT> {
3177
3904
  */
3178
3905
  readonly providerMetadata: ProviderMetadata | undefined;
3179
3906
  /**
3180
- @deprecated Use `providerMetadata` instead.
3181
- */
3182
- readonly experimental_providerMetadata: ProviderMetadata | undefined;
3183
- /**
3184
- Converts the object to a JSON response.
3185
- The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
3186
- */
3187
- toJsonResponse(init?: ResponseInit): Response;
3188
- }
3189
-
3190
- /**
3191
- A function that attempts to repair the raw output of the mode
3192
- to enable JSON parsing.
3193
-
3194
- Should return the repaired text or null if the text cannot be repaired.
3195
- */
3196
- type RepairTextFunction = (options: {
3197
- text: string;
3198
- error: JSONParseError | TypeValidationError;
3199
- }) => Promise<string | null>;
3200
- /**
3201
- Generate a structured, typed object for a given prompt and schema using a language model.
3202
-
3203
- This function does not stream the output. If you want to stream the output, use `streamObject` instead.
3204
-
3205
- @returns
3206
- A result object that contains the generated object, the finish reason, the token usage, and additional information.
3207
- */
3208
- declare function generateObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3209
- output?: 'object' | undefined;
3210
- /**
3211
- The language model to use.
3212
- */
3213
- model: LanguageModel;
3214
- /**
3215
- The schema of the object that the model should generate.
3216
- */
3217
- schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
3218
- /**
3219
- Optional name of the output that should be generated.
3220
- Used by some providers for additional LLM guidance, e.g.
3221
- via tool or schema name.
3222
- */
3223
- schemaName?: string;
3224
- /**
3225
- Optional description of the output that should be generated.
3226
- Used by some providers for additional LLM guidance, e.g.
3227
- via tool or schema description.
3228
- */
3229
- schemaDescription?: string;
3230
- /**
3231
- The mode to use for object generation.
3232
-
3233
- The schema is converted into a JSON schema and used in one of the following ways
3234
-
3235
- - 'auto': The provider will choose the best mode for the model.
3236
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3237
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
3238
-
3239
- Please note that most providers do not support all modes.
3240
-
3241
- Default and recommended: 'auto' (best mode for the model).
3242
- */
3243
- mode?: 'auto' | 'json' | 'tool';
3244
- /**
3245
- A function that attempts to repair the raw output of the mode
3246
- to enable JSON parsing.
3247
- */
3248
- experimental_repairText?: RepairTextFunction;
3249
- /**
3250
- Optional telemetry configuration (experimental).
3251
- */
3252
- experimental_telemetry?: TelemetrySettings;
3253
- /**
3254
- Additional provider-specific options. They are passed through
3255
- to the provider from the AI SDK and enable provider-specific
3256
- functionality that can be fully encapsulated in the provider.
3257
- */
3258
- providerOptions?: ProviderOptions;
3259
- /**
3260
- @deprecated Use `providerOptions` instead.
3261
- */
3262
- experimental_providerMetadata?: ProviderMetadata;
3263
- /**
3264
- * Internal. For test use only. May change without notice.
3265
- */
3266
- _internal?: {
3267
- generateId?: () => string;
3268
- currentDate?: () => Date;
3269
- };
3270
- }): Promise<GenerateObjectResult<OBJECT>>;
3271
- /**
3272
- Generate an array with structured, typed elements for a given prompt and element schema using a language model.
3273
-
3274
- This function does not stream the output. If you want to stream the output, use `streamObject` instead.
3275
-
3276
- @return
3277
- A result object that contains the generated object, the finish reason, the token usage, and additional information.
3278
- */
3279
- declare function generateObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3280
- output: 'array';
3281
- /**
3282
- The language model to use.
3283
- */
3284
- model: LanguageModel;
3285
- /**
3286
- The element schema of the array that the model should generate.
3287
- */
3288
- schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
3289
- /**
3290
- Optional name of the array that should be generated.
3291
- Used by some providers for additional LLM guidance, e.g.
3292
- via tool or schema name.
3293
- */
3294
- schemaName?: string;
3295
- /**
3296
- Optional description of the array that should be generated.
3297
- Used by some providers for additional LLM guidance, e.g.
3298
- via tool or schema description.
3299
- */
3300
- schemaDescription?: string;
3301
- /**
3302
- The mode to use for object generation.
3303
-
3304
- The schema is converted into a JSON schema and used in one of the following ways
3305
-
3306
- - 'auto': The provider will choose the best mode for the model.
3307
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3308
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
3309
-
3310
- Please note that most providers do not support all modes.
3311
-
3312
- Default and recommended: 'auto' (best mode for the model).
3313
- */
3314
- mode?: 'auto' | 'json' | 'tool';
3315
- /**
3316
- A function that attempts to repair the raw output of the mode
3317
- to enable JSON parsing.
3318
- */
3319
- experimental_repairText?: RepairTextFunction;
3320
- /**
3321
- Optional telemetry configuration (experimental).
3322
- */
3323
- experimental_telemetry?: TelemetrySettings;
3324
- /**
3325
- Additional provider-specific options. They are passed through
3326
- to the provider from the AI SDK and enable provider-specific
3327
- functionality that can be fully encapsulated in the provider.
3328
- */
3329
- providerOptions?: ProviderOptions;
3330
- /**
3331
- @deprecated Use `providerOptions` instead.
3332
- */
3333
- experimental_providerMetadata?: ProviderMetadata;
3334
- /**
3335
- * Internal. For test use only. May change without notice.
3907
+ Converts the object to a JSON response.
3908
+ The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
3909
+ */
3910
+ toJsonResponse(init?: ResponseInit): Response;
3911
+ }
3912
+
3913
+ /**
3914
+ A function that attempts to repair the raw output of the mode
3915
+ to enable JSON parsing.
3916
+
3917
+ Should return the repaired text or null if the text cannot be repaired.
3336
3918
  */
3337
- _internal?: {
3338
- generateId?: () => string;
3339
- currentDate?: () => Date;
3340
- };
3341
- }): Promise<GenerateObjectResult<Array<ELEMENT>>>;
3919
+ type RepairTextFunction = (options: {
3920
+ text: string;
3921
+ error: JSONParseError | TypeValidationError;
3922
+ }) => Promise<string | null>;
3342
3923
  /**
3343
- Generate a value from an enum (limited list of string values) using a language model.
3924
+ Generate a structured, typed object for a given prompt and schema using a language model.
3344
3925
 
3345
- This function does not stream the output.
3926
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
3346
3927
 
3347
- @return
3348
- A result object that contains the generated value, the finish reason, the token usage, and additional information.
3928
+ @returns
3929
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
3349
3930
  */
3350
- declare function generateObject<ENUM extends string>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3931
+ declare function generateObject<TYPE extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'enum' | 'no-schema' = TYPE extends string ? 'enum' : 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'enum' ? {
3932
+ /**
3933
+ The enum values that the model should use.
3934
+ */
3935
+ enum: Array<TYPE>;
3936
+ mode?: 'json';
3351
3937
  output: 'enum';
3938
+ } : Output extends 'no-schema' ? {} : {
3352
3939
  /**
3353
- The language model to use.
3354
- */
3355
- model: LanguageModel;
3940
+ The schema of the object that the model should generate.
3941
+ */
3942
+ schema: SCHEMA;
3356
3943
  /**
3357
- The enum values that the model should use.
3358
- */
3359
- enum: Array<ENUM>;
3944
+ Optional name of the output that should be generated.
3945
+ Used by some providers for additional LLM guidance, e.g.
3946
+ via tool or schema name.
3947
+ */
3948
+ schemaName?: string;
3949
+ /**
3950
+ Optional description of the output that should be generated.
3951
+ Used by some providers for additional LLM guidance, e.g.
3952
+ via tool or schema description.
3953
+ */
3954
+ schemaDescription?: string;
3360
3955
  /**
3361
3956
  The mode to use for object generation.
3362
3957
 
@@ -3369,54 +3964,15 @@ The schema is converted into a JSON schema and used in one of the following ways
3369
3964
  Please note that most providers do not support all modes.
3370
3965
 
3371
3966
  Default and recommended: 'auto' (best mode for the model).
3372
- */
3967
+ */
3373
3968
  mode?: 'auto' | 'json' | 'tool';
3374
- /**
3375
- A function that attempts to repair the raw output of the mode
3376
- to enable JSON parsing.
3377
- */
3378
- experimental_repairText?: RepairTextFunction;
3379
- /**
3380
- Optional telemetry configuration (experimental).
3381
- */
3382
- experimental_telemetry?: TelemetrySettings;
3383
- /**
3384
- Additional provider-specific options. They are passed through
3385
- to the provider from the AI SDK and enable provider-specific
3386
- functionality that can be fully encapsulated in the provider.
3387
- */
3388
- providerOptions?: ProviderOptions;
3389
- /**
3390
- @deprecated Use `providerOptions` instead.
3391
- */
3392
- experimental_providerMetadata?: ProviderMetadata;
3393
- /**
3394
- * Internal. For test use only. May change without notice.
3395
- */
3396
- _internal?: {
3397
- generateId?: () => string;
3398
- currentDate?: () => Date;
3399
- };
3400
- }): Promise<GenerateObjectResult<ENUM>>;
3401
- /**
3402
- Generate JSON with any schema for a given prompt using a language model.
3403
-
3404
- This function does not stream the output. If you want to stream the output, use `streamObject` instead.
3405
-
3406
- @returns
3407
- A result object that contains the generated object, the finish reason, the token usage, and additional information.
3408
- */
3409
- declare function generateObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3410
- output: 'no-schema';
3969
+ }) & {
3970
+ output?: Output;
3411
3971
  /**
3412
3972
  The language model to use.
3413
3973
  */
3414
3974
  model: LanguageModel;
3415
3975
  /**
3416
- The mode to use for object generation. Must be "json" for no-schema output.
3417
- */
3418
- mode?: 'json';
3419
- /**
3420
3976
  A function that attempts to repair the raw output of the mode
3421
3977
  to enable JSON parsing.
3422
3978
  */
@@ -3432,17 +3988,13 @@ functionality that can be fully encapsulated in the provider.
3432
3988
  */
3433
3989
  providerOptions?: ProviderOptions;
3434
3990
  /**
3435
- @deprecated Use `providerOptions` instead.
3436
- */
3437
- experimental_providerMetadata?: ProviderMetadata;
3438
- /**
3439
3991
  * Internal. For test use only. May change without notice.
3440
3992
  */
3441
3993
  _internal?: {
3442
3994
  generateId?: () => string;
3443
3995
  currentDate?: () => Date;
3444
3996
  };
3445
- }): Promise<GenerateObjectResult<JSONValue>>;
3997
+ }): Promise<GenerateObjectResult<TYPE>>;
3446
3998
 
3447
3999
  /**
3448
4000
  The result of a `streamObject` call that contains the partial object stream and additional information.
@@ -3455,7 +4007,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
3455
4007
  /**
3456
4008
  The token usage of the generated response. Resolved when the response is finished.
3457
4009
  */
3458
- readonly usage: Promise<LanguageModelUsage>;
4010
+ readonly usage: Promise<LanguageModelUsage$1>;
3459
4011
  /**
3460
4012
  Additional provider-specific metadata. They are passed through
3461
4013
  from the provider to the AI SDK and enable provider-specific
@@ -3463,10 +4015,6 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
3463
4015
  */
3464
4016
  readonly providerMetadata: Promise<ProviderMetadata | undefined>;
3465
4017
  /**
3466
- @deprecated Use `providerMetadata` instead.
3467
- */
3468
- readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
3469
- /**
3470
4018
  Additional request information from the last step.
3471
4019
  */
3472
4020
  readonly request: Promise<LanguageModelRequestMetadata>;
@@ -3531,7 +4079,7 @@ type ObjectStreamPart<PARTIAL> = {
3531
4079
  type: 'finish';
3532
4080
  finishReason: FinishReason;
3533
4081
  logprobs?: LogProbs;
3534
- usage: LanguageModelUsage;
4082
+ usage: LanguageModelUsage$1;
3535
4083
  response: LanguageModelResponseMetadata;
3536
4084
  providerMetadata?: ProviderMetadata;
3537
4085
  };
@@ -3553,7 +4101,7 @@ type StreamObjectOnFinishCallback<RESULT> = (event: {
3553
4101
  /**
3554
4102
  The token usage of the generated response.
3555
4103
  */
3556
- usage: LanguageModelUsage;
4104
+ usage: LanguageModelUsage$1;
3557
4105
  /**
3558
4106
  The generated object. Can be undefined if the final object does not match the schema.
3559
4107
  */
@@ -3576,10 +4124,6 @@ type StreamObjectOnFinishCallback<RESULT> = (event: {
3576
4124
  functionality that can be fully encapsulated in the provider.
3577
4125
  */
3578
4126
  providerMetadata: ProviderMetadata | undefined;
3579
- /**
3580
- @deprecated Use `providerMetadata` instead.
3581
- */
3582
- experimental_providerMetadata?: ProviderMetadata;
3583
4127
  }) => Promise<void> | void;
3584
4128
  /**
3585
4129
  Generate a structured, typed object for a given prompt and schema using a language model.
@@ -3636,10 +4180,6 @@ functionality that can be fully encapsulated in the provider.
3636
4180
  */
3637
4181
  providerOptions?: ProviderOptions;
3638
4182
  /**
3639
- @deprecated Use `providerOptions` instead.
3640
- */
3641
- experimental_providerMetadata?: ProviderMetadata;
3642
- /**
3643
4183
  Callback that is invoked when an error occurs during streaming.
3644
4184
  You can use it to log errors.
3645
4185
  The stream processing will pause until the callback promise is resolved.
@@ -3713,10 +4253,6 @@ functionality that can be fully encapsulated in the provider.
3713
4253
  */
3714
4254
  providerOptions?: ProviderOptions;
3715
4255
  /**
3716
- @deprecated Use `providerOptions` instead.
3717
- */
3718
- experimental_providerMetadata?: ProviderMetadata;
3719
- /**
3720
4256
  Callback that is invoked when an error occurs during streaming.
3721
4257
  You can use it to log errors.
3722
4258
  The stream processing will pause until the callback promise is resolved.
@@ -3764,10 +4300,6 @@ functionality that can be fully encapsulated in the provider.
3764
4300
  */
3765
4301
  providerOptions?: ProviderOptions;
3766
4302
  /**
3767
- @deprecated Use `providerOptions` instead.
3768
- */
3769
- experimental_providerMetadata?: ProviderMetadata;
3770
- /**
3771
4303
  Callback that is invoked when an error occurs during streaming.
3772
4304
  You can use it to log errors.
3773
4305
  The stream processing will pause until the callback promise is resolved.
@@ -3776,7 +4308,7 @@ The stream processing will pause until the callback promise is resolved.
3776
4308
  /**
3777
4309
  Callback that is called when the LLM response and the final object validation are finished.
3778
4310
  */
3779
- onFinish?: StreamObjectOnFinishCallback<JSONValue>;
4311
+ onFinish?: StreamObjectOnFinishCallback<JSONValue$1>;
3780
4312
  /**
3781
4313
  * Internal. For test use only. May change without notice.
3782
4314
  */
@@ -3785,14 +4317,224 @@ Callback that is called when the LLM response and the final object validation ar
3785
4317
  currentDate?: () => Date;
3786
4318
  now?: () => number;
3787
4319
  };
3788
- }): StreamObjectResult<JSONValue, JSONValue, never>;
4320
+ }): StreamObjectResult<JSONValue$1, JSONValue$1, never>;
4321
+
4322
+ /**
4323
+ * A generated audio file.
4324
+ */
4325
+ interface GeneratedAudioFile extends GeneratedFile {
4326
+ /**
4327
+ * Audio format of the file (e.g., 'mp3', 'wav', etc.)
4328
+ */
4329
+ readonly format: string;
4330
+ }
4331
+
4332
+ /**
4333
+ The result of a `generateSpeech` call.
4334
+ It contains the audio data and additional information.
4335
+ */
4336
+ interface SpeechResult {
4337
+ /**
4338
+ * The audio data as a base64 encoded string or binary data.
4339
+ */
4340
+ readonly audio: GeneratedAudioFile;
4341
+ /**
4342
+ Warnings for the call, e.g. unsupported settings.
4343
+ */
4344
+ readonly warnings: Array<SpeechWarning>;
4345
+ /**
4346
+ Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
4347
+ */
4348
+ readonly responses: Array<SpeechModelResponseMetadata>;
4349
+ /**
4350
+ Provider metadata from the provider.
4351
+ */
4352
+ readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
4353
+ }
4354
+
4355
+ /**
4356
+ Generates speech audio using a speech model.
4357
+
4358
+ @param model - The speech model to use.
4359
+ @param text - The text to convert to speech.
4360
+ @param voice - The voice to use for speech generation.
4361
+ @param outputFormat - The output format to use for speech generation e.g. "mp3", "wav", etc.
4362
+ @param instructions - Instructions for the speech generation e.g. "Speak in a slow and steady tone".
4363
+ @param speed - The speed of the speech generation.
4364
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
4365
+ as body parameters.
4366
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4367
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
4368
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4369
+
4370
+ @returns A result object that contains the generated audio data.
4371
+ */
4372
+ declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4373
+ /**
4374
+ The speech model to use.
4375
+ */
4376
+ model: SpeechModelV1;
4377
+ /**
4378
+ The text to convert to speech.
4379
+ */
4380
+ text: string;
4381
+ /**
4382
+ The voice to use for speech generation.
4383
+ */
4384
+ voice?: string;
4385
+ /**
4386
+ * The desired output format for the audio e.g. "mp3", "wav", etc.
4387
+ */
4388
+ outputFormat?: 'mp3' | 'wav' | (string & {});
4389
+ /**
4390
+ Instructions for the speech generation e.g. "Speak in a slow and steady tone".
4391
+ */
4392
+ instructions?: string;
4393
+ /**
4394
+ The speed of the speech generation.
4395
+ */
4396
+ speed?: number;
4397
+ /**
4398
+ Additional provider-specific options that are passed through to the provider
4399
+ as body parameters.
4400
+
4401
+ The outer record is keyed by the provider name, and the inner
4402
+ record is keyed by the provider-specific metadata key.
4403
+ ```ts
4404
+ {
4405
+ "openai": {}
4406
+ }
4407
+ ```
4408
+ */
4409
+ providerOptions?: ProviderOptions;
4410
+ /**
4411
+ Maximum number of retries per speech model call. Set to 0 to disable retries.
4412
+
4413
+ @default 2
4414
+ */
4415
+ maxRetries?: number;
4416
+ /**
4417
+ Abort signal.
4418
+ */
4419
+ abortSignal?: AbortSignal;
4420
+ /**
4421
+ Additional headers to include in the request.
4422
+ Only applicable for HTTP-based providers.
4423
+ */
4424
+ headers?: Record<string, string>;
4425
+ }): Promise<SpeechResult>;
4426
+
4427
+ /**
4428
+ The result of a `transcribe` call.
4429
+ It contains the transcript and additional information.
4430
+ */
4431
+ interface TranscriptionResult {
4432
+ /**
4433
+ * The complete transcribed text from the audio.
4434
+ */
4435
+ readonly text: string;
4436
+ /**
4437
+ * Array of transcript segments with timing information.
4438
+ * Each segment represents a portion of the transcribed text with start and end times.
4439
+ */
4440
+ readonly segments: Array<{
4441
+ /**
4442
+ * The text content of this segment.
4443
+ */
4444
+ readonly text: string;
4445
+ /**
4446
+ * The start time of this segment in seconds.
4447
+ */
4448
+ readonly startSecond: number;
4449
+ /**
4450
+ * The end time of this segment in seconds.
4451
+ */
4452
+ readonly endSecond: number;
4453
+ }>;
4454
+ /**
4455
+ * The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
4456
+ * May be undefined if the language couldn't be detected.
4457
+ */
4458
+ readonly language: string | undefined;
4459
+ /**
4460
+ * The total duration of the audio file in seconds.
4461
+ * May be undefined if the duration couldn't be determined.
4462
+ */
4463
+ readonly durationInSeconds: number | undefined;
4464
+ /**
4465
+ Warnings for the call, e.g. unsupported settings.
4466
+ */
4467
+ readonly warnings: Array<TranscriptionWarning>;
4468
+ /**
4469
+ Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
4470
+ */
4471
+ readonly responses: Array<TranscriptionModelResponseMetadata>;
4472
+ /**
4473
+ Provider metadata from the provider.
4474
+ */
4475
+ readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
4476
+ }
4477
+
4478
+ /**
4479
+ Generates transcripts using a transcription model.
4480
+
4481
+ @param model - The transcription model to use.
4482
+ @param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
4483
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
4484
+ as body parameters.
4485
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4486
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
4487
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4488
+
4489
+ @returns A result object that contains the generated transcript.
4490
+ */
4491
+ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4492
+ /**
4493
+ The transcription model to use.
4494
+ */
4495
+ model: TranscriptionModelV1;
4496
+ /**
4497
+ The audio data to transcribe.
4498
+ */
4499
+ audio: DataContent | URL;
4500
+ /**
4501
+ Additional provider-specific options that are passed through to the provider
4502
+ as body parameters.
4503
+
4504
+ The outer record is keyed by the provider name, and the inner
4505
+ record is keyed by the provider-specific metadata key.
4506
+ ```ts
4507
+ {
4508
+ "openai": {
4509
+ "temperature": 0
4510
+ }
4511
+ }
4512
+ ```
4513
+ */
4514
+ providerOptions?: ProviderOptions;
4515
+ /**
4516
+ Maximum number of retries per transcript model call. Set to 0 to disable retries.
4517
+
4518
+ @default 2
4519
+ */
4520
+ maxRetries?: number;
4521
+ /**
4522
+ Abort signal.
4523
+ */
4524
+ abortSignal?: AbortSignal;
4525
+ /**
4526
+ Additional headers to include in the request.
4527
+ Only applicable for HTTP-based providers.
4528
+ */
4529
+ headers?: Record<string, string>;
4530
+ }): Promise<TranscriptionResult>;
3789
4531
 
3790
4532
  /**
3791
4533
  * Applies default settings for a language model.
3792
4534
  */
3793
4535
  declare function defaultSettingsMiddleware({ settings, }: {
3794
4536
  settings: Partial<LanguageModelV2CallOptions & {
3795
- providerMetadata?: LanguageModelV2ProviderMetadata;
4537
+ providerOptions?: SharedV2ProviderOptions;
3796
4538
  }>;
3797
4539
  }): LanguageModelV2Middleware;
3798
4540
 
@@ -3833,15 +4575,6 @@ declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, p
3833
4575
  modelId?: string;
3834
4576
  providerId?: string;
3835
4577
  }) => LanguageModelV2;
3836
- /**
3837
- * @deprecated Use `wrapLanguageModel` instead.
3838
- */
3839
- declare const experimental_wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
3840
- model: LanguageModelV2;
3841
- middleware: LanguageModelV2Middleware | LanguageModelV2Middleware[];
3842
- modelId?: string;
3843
- providerId?: string;
3844
- }) => LanguageModelV2;
3845
4578
 
3846
4579
  /**
3847
4580
  * Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
@@ -3912,21 +4645,13 @@ declare const experimental_createProviderRegistry: typeof createProviderRegistry
3912
4645
  *
3913
4646
  * @param vector1 - The first vector.
3914
4647
  * @param vector2 - The second vector.
3915
- * @param options - Optional configuration.
3916
- * @param options.throwErrorForEmptyVectors - If true, throws an error for empty vectors. Default: false.
3917
4648
  *
3918
4649
  * @returns The cosine similarity between vector1 and vector2.
3919
4650
  * @returns 0 if either vector is the zero vector.
3920
4651
  *
3921
- * @throws {InvalidArgumentError} If throwErrorForEmptyVectors is true and vectors are empty.
3922
4652
  * @throws {InvalidArgumentError} If the vectors do not have the same length.
3923
4653
  */
3924
- declare function cosineSimilarity(vector1: number[], vector2: number[], options?: {
3925
- /**
3926
- * @deprecated will be removed in 5.0
3927
- */
3928
- throwErrorForEmptyVectors?: boolean;
3929
- }): number;
4654
+ declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
3930
4655
 
3931
4656
  /**
3932
4657
  * Creates a ReadableStream that emits the provided values with an optional delay between each value.
@@ -3960,23 +4685,29 @@ declare class InvalidArgumentError extends AISDKError {
3960
4685
  }
3961
4686
 
3962
4687
  type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
3963
- type: 'text-delta';
3964
- textDelta: string;
4688
+ type: 'stream-start';
4689
+ warnings: LanguageModelV2CallWarning[];
4690
+ } | {
4691
+ type: 'text';
4692
+ text: string;
3965
4693
  } | {
3966
4694
  type: 'reasoning';
3967
- textDelta: string;
4695
+ reasoningType: 'text';
4696
+ text: string;
3968
4697
  } | {
3969
- type: 'reasoning-signature';
4698
+ type: 'reasoning';
4699
+ reasoningType: 'signature';
3970
4700
  signature: string;
3971
4701
  } | {
3972
- type: 'redacted-reasoning';
4702
+ type: 'reasoning';
4703
+ reasoningType: 'redacted';
3973
4704
  data: string;
3974
- } | ({
4705
+ } | {
3975
4706
  type: 'file';
3976
- } & GeneratedFile) | {
3977
- type: 'source';
3978
- source: Source;
4707
+ file: GeneratedFile;
3979
4708
  } | ({
4709
+ type: 'source';
4710
+ } & Source) | ({
3980
4711
  type: 'tool-call';
3981
4712
  } & ToolCallUnion<TOOLS>) | {
3982
4713
  type: 'tool-call-streaming-start';
@@ -3998,8 +4729,8 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
3998
4729
  type: 'finish';
3999
4730
  finishReason: FinishReason;
4000
4731
  logprobs?: LogProbs;
4001
- usage: LanguageModelUsage;
4002
- experimental_providerMetadata?: ProviderMetadata;
4732
+ usage: LanguageModelUsage$1;
4733
+ providerMetadata?: ProviderMetadata;
4003
4734
  } | {
4004
4735
  type: 'error';
4005
4736
  error: unknown;
@@ -4062,7 +4793,7 @@ declare class NoObjectGeneratedError extends AISDKError {
4062
4793
  /**
4063
4794
  The usage of the model.
4064
4795
  */
4065
- readonly usage: LanguageModelUsage | undefined;
4796
+ readonly usage: LanguageModelUsage$1 | undefined;
4066
4797
  /**
4067
4798
  Reason why the model finished generating a response.
4068
4799
  */
@@ -4072,7 +4803,7 @@ declare class NoObjectGeneratedError extends AISDKError {
4072
4803
  cause?: Error;
4073
4804
  text?: string;
4074
4805
  response: LanguageModelResponseMetadata;
4075
- usage: LanguageModelUsage;
4806
+ usage: LanguageModelUsage$1;
4076
4807
  finishReason: FinishReason;
4077
4808
  });
4078
4809
  static isInstance(error: unknown): error is NoObjectGeneratedError;
@@ -4106,11 +4837,11 @@ declare const symbol$6: unique symbol;
4106
4837
  declare class ToolExecutionError extends AISDKError {
4107
4838
  private readonly [symbol$6];
4108
4839
  readonly toolName: string;
4109
- readonly toolArgs: JSONValue;
4840
+ readonly toolArgs: JSONValue$1 | unknown;
4110
4841
  readonly toolCallId: string;
4111
4842
  constructor({ toolArgs, toolName, toolCallId, cause, message, }: {
4112
4843
  message?: string;
4113
- toolArgs: JSONValue;
4844
+ toolArgs: JSONValue$1 | unknown;
4114
4845
  toolName: string;
4115
4846
  toolCallId: string;
4116
4847
  cause: unknown;
@@ -4249,7 +4980,7 @@ The following streams are supported:
4249
4980
  - `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
4250
4981
  - `string` streams (LangChain `StringOutputParser` output)
4251
4982
  */
4252
- declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
4983
+ declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array<ArrayBufferLike>>;
4253
4984
  declare function toDataStreamResponse$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options?: {
4254
4985
  init?: ResponseInit;
4255
4986
  data?: StreamData;
@@ -4271,7 +5002,7 @@ declare namespace langchainAdapter {
4271
5002
  type EngineResponse = {
4272
5003
  delta: string;
4273
5004
  };
4274
- declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
5005
+ declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array<ArrayBufferLike>>;
4275
5006
  declare function toDataStreamResponse(stream: AsyncIterable<EngineResponse>, options?: {
4276
5007
  init?: ResponseInit;
4277
5008
  data?: StreamData;
@@ -4293,4 +5024,4 @@ declare namespace llamaindexAdapter {
4293
5024
  };
4294
5025
  }
4295
5026
 
4296
- export { AssistantContent, CallWarning, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
5027
+ export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };