ai 5.0.0-canary.3 → 5.0.0-canary.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,14 +1,47 @@
1
- import { IDGenerator } from '@ai-sdk/provider-utils';
1
+ import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
2
2
  export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
- import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1 } from '@ai-sdk/ui-utils';
4
- export { Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, formatDataStreamPart, jsonSchema, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
5
- import { LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2CallWarning, LanguageModelV2Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2ProviderMetadata, LanguageModelV2CallOptions, AISDKError, LanguageModelV2FunctionToolCall, JSONSchema7, JSONParseError, TypeValidationError, LanguageModelV2ProviderOptions, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
3
+ import { EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2CallWarning, LanguageModelV2Source, LanguageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV2CallOptions, AISDKError, LanguageModelV2FunctionToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, TranscriptionModelV1CallWarning, TranscriptionModelV1, LanguageModelV2ProviderOptions, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
6
4
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2Prompt, LanguageModelV2StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
7
5
  import { ServerResponse } from 'node:http';
8
6
  import { AttributeValue, Tracer } from '@opentelemetry/api';
9
7
  import { z } from 'zod';
8
+ import { JSONSchema7 } from 'json-schema';
10
9
  import { ServerResponse as ServerResponse$1 } from 'http';
11
10
 
11
+ /**
12
+ Embedding model that is used by the AI SDK Core functions.
13
+ */
14
+ type EmbeddingModel<VALUE> = EmbeddingModelV1<VALUE>;
15
+ /**
16
+ Embedding.
17
+ */
18
+ type Embedding = EmbeddingModelV1Embedding;
19
+
20
+ /**
21
+ Image model that is used by the AI SDK Core functions.
22
+ */
23
+ type ImageModel = ImageModelV1;
24
+ /**
25
+ Warning from the model provider for this call. The call will proceed, but e.g.
26
+ some settings might not be supported, which can lead to suboptimal results.
27
+ */
28
+ type ImageGenerationWarning = ImageModelV1CallWarning;
29
+
30
+ type ImageModelResponseMetadata = {
31
+ /**
32
+ Timestamp for the start of the generated response.
33
+ */
34
+ timestamp: Date;
35
+ /**
36
+ The ID of the response model that was used to generate the response.
37
+ */
38
+ modelId: string;
39
+ /**
40
+ Response headers.
41
+ */
42
+ headers?: Record<string, string>;
43
+ };
44
+
12
45
  /**
13
46
  Language model that is used by the AI SDK Core functions.
14
47
  */
@@ -48,14 +81,836 @@ Tool choice for the generation. It supports the following settings:
48
81
  - `none`: the model must not call tools
49
82
  - `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
50
83
  */
51
- type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
52
- type: 'tool';
53
- toolName: keyof TOOLS;
84
+ type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
85
+ type: 'tool';
86
+ toolName: keyof TOOLS;
87
+ };
88
+ /**
89
+ * @deprecated Use `ToolChoice` instead.
90
+ */
91
+ type CoreToolChoice<TOOLS extends Record<string, unknown>> = ToolChoice<TOOLS>;
92
+
93
+ type LanguageModelRequestMetadata = {
94
+ /**
95
+ Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
96
+ */
97
+ body?: string;
98
+ };
99
+
100
+ type LanguageModelResponseMetadata = {
101
+ /**
102
+ ID for the generated response.
103
+ */
104
+ id: string;
105
+ /**
106
+ Timestamp for the start of the generated response.
107
+ */
108
+ timestamp: Date;
109
+ /**
110
+ The ID of the response model that was used to generate the response.
111
+ */
112
+ modelId: string;
113
+ /**
114
+ Response headers (available only for providers that use HTTP requests).
115
+ */
116
+ headers?: Record<string, string>;
117
+ };
118
+
119
+ /**
120
+ * Provider for language, text embedding, and image models.
121
+ */
122
+ type Provider = {
123
+ /**
124
+ Returns the language model with the given id.
125
+ The model id is then passed to the provider function to get the model.
126
+
127
+ @param {string} id - The id of the model to return.
128
+
129
+ @returns {LanguageModel} The language model associated with the id
130
+
131
+ @throws {NoSuchModelError} If no such model exists.
132
+ */
133
+ languageModel(modelId: string): LanguageModel;
134
+ /**
135
+ Returns the text embedding model with the given id.
136
+ The model id is then passed to the provider function to get the model.
137
+
138
+ @param {string} id - The id of the model to return.
139
+
140
+ @returns {LanguageModel} The language model associated with the id
141
+
142
+ @throws {NoSuchModelError} If no such model exists.
143
+ */
144
+ textEmbeddingModel(modelId: string): EmbeddingModel<string>;
145
+ /**
146
+ Returns the image model with the given id.
147
+ The model id is then passed to the provider function to get the model.
148
+
149
+ @param {string} id - The id of the model to return.
150
+
151
+ @returns {ImageModel} The image model associated with the id
152
+ */
153
+ imageModel(modelId: string): ImageModel;
154
+ };
155
+
156
+ /**
157
+ Additional provider-specific metadata that is returned from the provider.
158
+
159
+ This is needed to enable provider-specific functionality that can be
160
+ fully encapsulated in the provider.
161
+ */
162
+ type ProviderMetadata = LanguageModelV2ProviderMetadata;
163
+ /**
164
+ Additional provider-specific options.
165
+
166
+ They are passed through to the provider from the AI SDK and enable
167
+ provider-specific functionality that can be fully encapsulated in the provider.
168
+ */
169
+ type ProviderOptions = LanguageModelV2ProviderMetadata;
170
+
171
+ /**
172
+ Represents the number of tokens used in a prompt and completion.
173
+ */
174
+ type LanguageModelUsage$1 = {
175
+ /**
176
+ The number of tokens used in the prompt.
177
+ */
178
+ promptTokens: number;
179
+ /**
180
+ The number of tokens used in the completion.
181
+ */
182
+ completionTokens: number;
183
+ /**
184
+ The total number of tokens used (promptTokens + completionTokens).
185
+ */
186
+ totalTokens: number;
187
+ };
188
+ /**
189
+ Represents the number of tokens used in an embedding.
190
+ */
191
+ type EmbeddingModelUsage = {
192
+ /**
193
+ The number of tokens used in the embedding.
194
+ */
195
+ tokens: number;
196
+ };
197
+
198
+ /**
199
+ Represents the number of tokens used in a prompt and completion.
200
+ */
201
+ type LanguageModelUsage = {
202
+ /**
203
+ The number of tokens used in the prompt.
204
+ */
205
+ promptTokens: number;
206
+ /**
207
+ The number of tokens used in the completion.
208
+ */
209
+ completionTokens: number;
210
+ /**
211
+ The total number of tokens used (promptTokens + completionTokens).
212
+ */
213
+ totalTokens: number;
214
+ };
215
+
216
+ type IdGenerator = () => string;
217
+ /**
218
+ Tool invocations are either tool calls or tool results. For each assistant tool call,
219
+ there is one tool invocation. While the call is in progress, the invocation is a tool call.
220
+ Once the call is complete, the invocation is a tool result.
221
+
222
+ The step is used to track how to map an assistant UI message with many tool invocations
223
+ back to a sequence of LLM assistant/tool result message pairs.
224
+ It is optional for backwards compatibility.
225
+ */
226
+ type ToolInvocation = ({
227
+ state: 'partial-call';
228
+ step?: number;
229
+ } & ToolCall<string, any>) | ({
230
+ state: 'call';
231
+ step?: number;
232
+ } & ToolCall<string, any>) | ({
233
+ state: 'result';
234
+ step?: number;
235
+ } & ToolResult<string, any, any>);
236
+ /**
237
+ * An attachment that can be sent along with a message.
238
+ */
239
+ interface Attachment {
240
+ /**
241
+ * The name of the attachment, usually the file name.
242
+ */
243
+ name?: string;
244
+ /**
245
+ * A string indicating the [media type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type).
246
+ * By default, it's extracted from the pathname's extension.
247
+ */
248
+ contentType?: string;
249
+ /**
250
+ * The URL of the attachment. It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
251
+ */
252
+ url: string;
253
+ }
254
+ /**
255
+ * AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
256
+ */
257
+ interface Message {
258
+ /**
259
+ A unique identifier for the message.
260
+ */
261
+ id: string;
262
+ /**
263
+ The timestamp of the message.
264
+ */
265
+ createdAt?: Date;
266
+ /**
267
+ Text content of the message. Use parts when possible.
268
+ */
269
+ content: string;
270
+ /**
271
+ Reasoning for the message.
272
+
273
+ @deprecated Use `parts` instead.
274
+ */
275
+ reasoning?: string;
276
+ /**
277
+ * Additional attachments to be sent along with the message.
278
+ */
279
+ experimental_attachments?: Attachment[];
280
+ /**
281
+ The 'data' role is deprecated.
282
+ */
283
+ role: 'system' | 'user' | 'assistant' | 'data';
284
+ /**
285
+ For data messages.
286
+
287
+ @deprecated Data messages will be removed.
288
+ */
289
+ data?: JSONValue;
290
+ /**
291
+ * Additional message-specific information added on the server via StreamData
292
+ */
293
+ annotations?: JSONValue[] | undefined;
294
+ /**
295
+ Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
296
+ that the assistant made as part of this message.
297
+
298
+ @deprecated Use `parts` instead.
299
+ */
300
+ toolInvocations?: Array<ToolInvocation>;
301
+ /**
302
+ * The parts of the message. Use this for rendering the message in the UI.
303
+ *
304
+ * Assistant messages can have text, reasoning and tool invocation parts.
305
+ * User messages can have text parts.
306
+ */
307
+ parts?: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
308
+ }
309
+ type UIMessage = Message & {
310
+ /**
311
+ * The parts of the message. Use this for rendering the message in the UI.
312
+ *
313
+ * Assistant messages can have text, reasoning and tool invocation parts.
314
+ * User messages can have text parts.
315
+ */
316
+ parts: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
317
+ };
318
+ /**
319
+ * A text part of a message.
320
+ */
321
+ type TextUIPart = {
322
+ type: 'text';
323
+ /**
324
+ * The text content.
325
+ */
326
+ text: string;
327
+ };
328
+ /**
329
+ * A reasoning part of a message.
330
+ */
331
+ type ReasoningUIPart = {
332
+ type: 'reasoning';
333
+ /**
334
+ * The reasoning text.
335
+ */
336
+ reasoning: string;
337
+ details: Array<{
338
+ type: 'text';
339
+ text: string;
340
+ signature?: string;
341
+ } | {
342
+ type: 'redacted';
343
+ data: string;
344
+ }>;
345
+ };
346
+ /**
347
+ * A tool invocation part of a message.
348
+ */
349
+ type ToolInvocationUIPart = {
350
+ type: 'tool-invocation';
351
+ /**
352
+ * The tool invocation.
353
+ */
354
+ toolInvocation: ToolInvocation;
355
+ };
356
+ /**
357
+ * A source part of a message.
358
+ */
359
+ type SourceUIPart = {
360
+ type: 'source';
361
+ /**
362
+ * The source.
363
+ */
364
+ source: LanguageModelV2Source;
365
+ };
366
+ /**
367
+ * A file part of a message.
368
+ */
369
+ type FileUIPart = {
370
+ type: 'file';
371
+ /**
372
+ * IANA media type of the file.
373
+ *
374
+ * @see https://www.iana.org/assignments/media-types/media-types.xhtml
375
+ */
376
+ mediaType: string;
377
+ /**
378
+ * The base64 encoded data.
379
+ */
380
+ data: string;
381
+ };
382
+ /**
383
+ * A step boundary part of a message.
384
+ */
385
+ type StepStartUIPart = {
386
+ type: 'step-start';
387
+ };
388
+ type CreateMessage = Omit<Message, 'id'> & {
389
+ id?: Message['id'];
390
+ };
391
+ type ChatRequest = {
392
+ /**
393
+ An optional object of headers to be passed to the API endpoint.
394
+ */
395
+ headers?: Record<string, string> | Headers;
396
+ /**
397
+ An optional object to be passed to the API endpoint.
398
+ */
399
+ body?: object;
400
+ /**
401
+ The messages of the chat.
402
+ */
403
+ messages: Message[];
404
+ /**
405
+ Additional data to be sent to the server.
406
+ */
407
+ data?: JSONValue;
408
+ };
409
+ type RequestOptions = {
410
+ /**
411
+ An optional object of headers to be passed to the API endpoint.
412
+ */
413
+ headers?: Record<string, string> | Headers;
414
+ /**
415
+ An optional object to be passed to the API endpoint.
416
+ */
417
+ body?: object;
418
+ };
419
+ type ChatRequestOptions = {
420
+ /**
421
+ Additional headers that should be to be passed to the API endpoint.
422
+ */
423
+ headers?: Record<string, string> | Headers;
424
+ /**
425
+ Additional body JSON properties that should be sent to the API endpoint.
426
+ */
427
+ body?: object;
428
+ /**
429
+ Additional data to be sent to the API endpoint.
430
+ */
431
+ data?: JSONValue;
432
+ /**
433
+ * Additional files to be sent to the server.
434
+ */
435
+ experimental_attachments?: FileList | Array<Attachment>;
436
+ /**
437
+ * Allow submitting an empty message. Defaults to `false`.
438
+ */
439
+ allowEmptySubmit?: boolean;
440
+ };
441
+ type UseChatOptions = {
442
+ /**
443
+ Keeps the last message when an error happens. Defaults to `true`.
444
+
445
+ @deprecated This option will be removed in the next major release.
446
+ */
447
+ keepLastMessageOnError?: boolean;
448
+ /**
449
+ * The API endpoint that accepts a `{ messages: Message[] }` object and returns
450
+ * a stream of tokens of the AI chat response. Defaults to `/api/chat`.
451
+ */
452
+ api?: string;
453
+ /**
454
+ * A unique identifier for the chat. If not provided, a random one will be
455
+ * generated. When provided, the `useChat` hook with the same `id` will
456
+ * have shared states across components.
457
+ */
458
+ id?: string;
459
+ /**
460
+ * Initial messages of the chat. Useful to load an existing chat history.
461
+ */
462
+ initialMessages?: Message[];
463
+ /**
464
+ * Initial input of the chat.
465
+ */
466
+ initialInput?: string;
467
+ /**
468
+ Optional callback function that is invoked when a tool call is received.
469
+ Intended for automatic client-side tool execution.
470
+
471
+ You can optionally return a result for the tool call,
472
+ either synchronously or asynchronously.
473
+ */
474
+ onToolCall?: ({ toolCall, }: {
475
+ toolCall: ToolCall<string, unknown>;
476
+ }) => void | Promise<unknown> | unknown;
477
+ /**
478
+ * Callback function to be called when the API response is received.
479
+ */
480
+ onResponse?: (response: Response) => void | Promise<void>;
481
+ /**
482
+ * Optional callback function that is called when the assistant message is finished streaming.
483
+ *
484
+ * @param message The message that was streamed.
485
+ * @param options.usage The token usage of the message.
486
+ * @param options.finishReason The finish reason of the message.
487
+ */
488
+ onFinish?: (message: Message, options: {
489
+ usage: LanguageModelUsage;
490
+ finishReason: LanguageModelV2FinishReason;
491
+ }) => void;
492
+ /**
493
+ * Callback function to be called when an error is encountered.
494
+ */
495
+ onError?: (error: Error) => void;
496
+ /**
497
+ * A way to provide a function that is going to be used for ids for messages and the chat.
498
+ * If not provided the default AI SDK `generateId` is used.
499
+ */
500
+ generateId?: IdGenerator;
501
+ /**
502
+ * The credentials mode to be used for the fetch request.
503
+ * Possible values are: 'omit', 'same-origin', 'include'.
504
+ * Defaults to 'same-origin'.
505
+ */
506
+ credentials?: RequestCredentials;
507
+ /**
508
+ * HTTP headers to be sent with the API request.
509
+ */
510
+ headers?: Record<string, string> | Headers;
511
+ /**
512
+ * Extra body object to be sent with the API request.
513
+ * @example
514
+ * Send a `sessionId` to the API along with the messages.
515
+ * ```js
516
+ * useChat({
517
+ * body: {
518
+ * sessionId: '123',
519
+ * }
520
+ * })
521
+ * ```
522
+ */
523
+ body?: object;
524
+ /**
525
+ * Whether to send extra message fields such as `message.id` and `message.createdAt` to the API.
526
+ * Defaults to `false`. When set to `true`, the API endpoint might need to
527
+ * handle the extra fields before forwarding the request to the AI service.
528
+ */
529
+ sendExtraMessageFields?: boolean;
530
+ /**
531
+ Streaming protocol that is used. Defaults to `data`.
532
+ */
533
+ streamProtocol?: 'data' | 'text';
534
+ /**
535
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
536
+ or to provide a custom fetch implementation for e.g. testing.
537
+ */
538
+ fetch?: FetchFunction;
539
+ };
540
+ type UseCompletionOptions = {
541
+ /**
542
+ * The API endpoint that accepts a `{ prompt: string }` object and returns
543
+ * a stream of tokens of the AI completion response. Defaults to `/api/completion`.
544
+ */
545
+ api?: string;
546
+ /**
547
+ * An unique identifier for the chat. If not provided, a random one will be
548
+ * generated. When provided, the `useChat` hook with the same `id` will
549
+ * have shared states across components.
550
+ */
551
+ id?: string;
552
+ /**
553
+ * Initial prompt input of the completion.
554
+ */
555
+ initialInput?: string;
556
+ /**
557
+ * Initial completion result. Useful to load an existing history.
558
+ */
559
+ initialCompletion?: string;
560
+ /**
561
+ * Callback function to be called when the API response is received.
562
+ */
563
+ onResponse?: (response: Response) => void | Promise<void>;
564
+ /**
565
+ * Callback function to be called when the completion is finished streaming.
566
+ */
567
+ onFinish?: (prompt: string, completion: string) => void;
568
+ /**
569
+ * Callback function to be called when an error is encountered.
570
+ */
571
+ onError?: (error: Error) => void;
572
+ /**
573
+ * The credentials mode to be used for the fetch request.
574
+ * Possible values are: 'omit', 'same-origin', 'include'.
575
+ * Defaults to 'same-origin'.
576
+ */
577
+ credentials?: RequestCredentials;
578
+ /**
579
+ * HTTP headers to be sent with the API request.
580
+ */
581
+ headers?: Record<string, string> | Headers;
582
+ /**
583
+ * Extra body object to be sent with the API request.
584
+ * @example
585
+ * Send a `sessionId` to the API along with the prompt.
586
+ * ```js
587
+ * useChat({
588
+ * body: {
589
+ * sessionId: '123',
590
+ * }
591
+ * })
592
+ * ```
593
+ */
594
+ body?: object;
595
+ /**
596
+ Streaming protocol that is used. Defaults to `data`.
597
+ */
598
+ streamProtocol?: 'data' | 'text';
599
+ /**
600
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
601
+ or to provide a custom fetch implementation for e.g. testing.
602
+ */
603
+ fetch?: FetchFunction;
604
+ };
605
+ /**
606
+ A JSON value can be a string, number, boolean, object, array, or null.
607
+ JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
608
+ */
609
+ type JSONValue = null | string | number | boolean | {
610
+ [value: string]: JSONValue;
611
+ } | Array<JSONValue>;
612
+
613
+ declare const getOriginalFetch$1: () => typeof fetch;
614
+ declare function callChatApi({ api, body, streamProtocol, credentials, headers, abortController, restoreMessagesOnFailure, onResponse, onUpdate, onFinish, onToolCall, generateId, fetch, lastMessage, }: {
615
+ api: string;
616
+ body: Record<string, any>;
617
+ streamProtocol: 'data' | 'text' | undefined;
618
+ credentials: RequestCredentials | undefined;
619
+ headers: HeadersInit | undefined;
620
+ abortController: (() => AbortController | null) | undefined;
621
+ restoreMessagesOnFailure: () => void;
622
+ onResponse: ((response: Response) => void | Promise<void>) | undefined;
623
+ onUpdate: (options: {
624
+ message: UIMessage;
625
+ data: JSONValue[] | undefined;
626
+ replaceLastMessage: boolean;
627
+ }) => void;
628
+ onFinish: UseChatOptions['onFinish'];
629
+ onToolCall: UseChatOptions['onToolCall'];
630
+ generateId: IdGenerator;
631
+ fetch: ReturnType<typeof getOriginalFetch$1> | undefined;
632
+ lastMessage: UIMessage | undefined;
633
+ }): Promise<void>;
634
+
635
+ declare const getOriginalFetch: () => typeof fetch;
636
+ declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onResponse, onFinish, onError, onData, fetch, }: {
637
+ api: string;
638
+ prompt: string;
639
+ credentials: RequestCredentials | undefined;
640
+ headers: HeadersInit | undefined;
641
+ body: Record<string, any>;
642
+ streamProtocol: 'data' | 'text' | undefined;
643
+ setCompletion: (completion: string) => void;
644
+ setLoading: (loading: boolean) => void;
645
+ setError: (error: Error | undefined) => void;
646
+ setAbortController: (abortController: AbortController | null) => void;
647
+ onResponse: ((response: Response) => void | Promise<void>) | undefined;
648
+ onFinish: ((prompt: string, completion: string) => void) | undefined;
649
+ onError: ((error: Error) => void) | undefined;
650
+ onData: ((data: JSONValue[]) => void) | undefined;
651
+ fetch: ReturnType<typeof getOriginalFetch> | undefined;
652
+ }): Promise<string | null | undefined>;
653
+
654
+ type DataStreamString = `${(typeof DataStreamStringPrefixes)[keyof typeof DataStreamStringPrefixes]}:${string}\n`;
655
+ interface DataStreamPart<CODE extends string, NAME extends string, TYPE> {
656
+ code: CODE;
657
+ name: NAME;
658
+ parse: (value: JSONValue) => {
659
+ type: NAME;
660
+ value: TYPE;
661
+ };
662
+ }
663
+ declare const dataStreamParts: readonly [DataStreamPart<"0", "text", string>, DataStreamPart<"2", "data", JSONValue[]>, DataStreamPart<"3", "error", string>, DataStreamPart<"8", "message_annotations", JSONValue[]>, DataStreamPart<"9", "tool_call", ToolCall<string, any>>, DataStreamPart<"a", "tool_result", Omit<ToolResult<string, any, any>, "args" | "toolName">>, DataStreamPart<"b", "tool_call_streaming_start", {
664
+ toolCallId: string;
665
+ toolName: string;
666
+ }>, DataStreamPart<"c", "tool_call_delta", {
667
+ toolCallId: string;
668
+ argsTextDelta: string;
669
+ }>, DataStreamPart<"d", "finish_message", {
670
+ finishReason: LanguageModelV2FinishReason;
671
+ usage?: {
672
+ promptTokens: number;
673
+ completionTokens: number;
674
+ };
675
+ }>, DataStreamPart<"e", "finish_step", {
676
+ isContinued: boolean;
677
+ finishReason: LanguageModelV2FinishReason;
678
+ usage?: {
679
+ promptTokens: number;
680
+ completionTokens: number;
681
+ };
682
+ }>, DataStreamPart<"f", "start_step", {
683
+ messageId: string;
684
+ }>, DataStreamPart<"g", "reasoning", string>, DataStreamPart<"h", "source", LanguageModelV2Source>, DataStreamPart<"i", "redacted_reasoning", {
685
+ data: string;
686
+ }>, DataStreamPart<"j", "reasoning_signature", {
687
+ signature: string;
688
+ }>, DataStreamPart<"k", "file", {
689
+ data: string;
690
+ mimeType: string;
691
+ }>];
692
+ type DataStreamParts = (typeof dataStreamParts)[number];
693
+ /**
694
+ * Maps the type of a stream part to its value type.
695
+ */
696
+ type DataStreamPartValueType = {
697
+ [P in DataStreamParts as P['name']]: ReturnType<P['parse']>['value'];
698
+ };
699
+ type DataStreamPartType = ReturnType<DataStreamParts['parse']>;
700
+ /**
701
+ * The map of prefixes for data in the stream
702
+ *
703
+ * - 0: Text from the LLM response
704
+ * - 1: (OpenAI) function_call responses
705
+ * - 2: custom JSON added by the user using `Data`
706
+ * - 6: (OpenAI) tool_call responses
707
+ *
708
+ * Example:
709
+ * ```
710
+ * 0:Vercel
711
+ * 0:'s
712
+ * 0: AI
713
+ * 0: AI
714
+ * 0: SDK
715
+ * 0: is great
716
+ * 0:!
717
+ * 2: { "someJson": "value" }
718
+ * 1: {"function_call": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}
719
+ * 6: {"tool_call": {"id": "tool_0", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}}
720
+ *```
721
+ */
722
+ declare const DataStreamStringPrefixes: { [K in DataStreamParts["name"]]: (typeof dataStreamParts)[number]["code"]; };
723
+ /**
724
+ Parses a stream part from a string.
725
+
726
+ @param line The string to parse.
727
+ @returns The parsed stream part.
728
+ @throws An error if the string cannot be parsed.
729
+ */
730
+ declare const parseDataStreamPart: (line: string) => DataStreamPartType;
731
+ /**
732
+ Prepends a string with a prefix from the `StreamChunkPrefixes`, JSON-ifies it,
733
+ and appends a new line.
734
+
735
+ It ensures type-safety for the part type and value.
736
+ */
737
+ declare function formatDataStreamPart<T extends keyof DataStreamPartValueType>(type: T, value: DataStreamPartValueType[T]): DataStreamString;
738
+
739
+ /**
740
+ * Converts a data URL of type text/* to a text string.
741
+ */
742
+ declare function getTextFromDataUrl(dataUrl: string): string;
743
+
744
+ /**
745
+ Create a type from an object with all keys and nested keys set to optional.
746
+ The helper supports normal objects and Zod schemas (which are resolved automatically).
747
+ It always recurses into arrays.
748
+
749
+ Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
750
+ */
751
+ type DeepPartial<T> = T extends z.ZodTypeAny ? DeepPartialInternal<z.infer<T>> : DeepPartialInternal<T>;
752
+ type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
753
+ type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
754
+ type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
755
+ type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
756
+ type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
757
+ type PartialObject<ObjectType extends object> = {
758
+ [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
759
+ };
760
+
761
+ declare function extractMaxToolInvocationStep(toolInvocations: ToolInvocation[] | undefined): number | undefined;
762
+
763
+ declare function fillMessageParts(messages: Message[]): UIMessage[];
764
+
765
+ declare function getMessageParts(message: Message | CreateMessage | UIMessage): (TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart)[];
766
+
767
+ /**
768
+ * Performs a deep-equal comparison of two parsed JSON objects.
769
+ *
770
+ * @param {any} obj1 - The first object to compare.
771
+ * @param {any} obj2 - The second object to compare.
772
+ * @returns {boolean} - Returns true if the two objects are deeply equal, false otherwise.
773
+ */
774
+ declare function isDeepEqualData(obj1: any, obj2: any): boolean;
775
+
776
+ declare function parsePartialJson(jsonText: string | undefined): {
777
+ value: JSONValue$1 | undefined;
778
+ state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
779
+ };
780
+
781
+ declare function prepareAttachmentsForRequest(attachmentsFromOptions: FileList | Array<Attachment> | undefined): Promise<Attachment[]>;
782
+
783
+ declare function processDataStream({ stream, onTextPart, onReasoningPart, onReasoningSignaturePart, onRedactedReasoningPart, onSourcePart, onFilePart, onDataPart, onErrorPart, onToolCallStreamingStartPart, onToolCallDeltaPart, onToolCallPart, onToolResultPart, onMessageAnnotationsPart, onFinishMessagePart, onFinishStepPart, onStartStepPart, }: {
784
+ stream: ReadableStream<Uint8Array>;
785
+ onTextPart?: (streamPart: (DataStreamPartType & {
786
+ type: 'text';
787
+ })['value']) => Promise<void> | void;
788
+ onReasoningPart?: (streamPart: (DataStreamPartType & {
789
+ type: 'reasoning';
790
+ })['value']) => Promise<void> | void;
791
+ onReasoningSignaturePart?: (streamPart: (DataStreamPartType & {
792
+ type: 'reasoning_signature';
793
+ })['value']) => Promise<void> | void;
794
+ onRedactedReasoningPart?: (streamPart: (DataStreamPartType & {
795
+ type: 'redacted_reasoning';
796
+ })['value']) => Promise<void> | void;
797
+ onFilePart?: (streamPart: (DataStreamPartType & {
798
+ type: 'file';
799
+ })['value']) => Promise<void> | void;
800
+ onSourcePart?: (streamPart: (DataStreamPartType & {
801
+ type: 'source';
802
+ })['value']) => Promise<void> | void;
803
+ onDataPart?: (streamPart: (DataStreamPartType & {
804
+ type: 'data';
805
+ })['value']) => Promise<void> | void;
806
+ onErrorPart?: (streamPart: (DataStreamPartType & {
807
+ type: 'error';
808
+ })['value']) => Promise<void> | void;
809
+ onToolCallStreamingStartPart?: (streamPart: (DataStreamPartType & {
810
+ type: 'tool_call_streaming_start';
811
+ })['value']) => Promise<void> | void;
812
+ onToolCallDeltaPart?: (streamPart: (DataStreamPartType & {
813
+ type: 'tool_call_delta';
814
+ })['value']) => Promise<void> | void;
815
+ onToolCallPart?: (streamPart: (DataStreamPartType & {
816
+ type: 'tool_call';
817
+ })['value']) => Promise<void> | void;
818
+ onToolResultPart?: (streamPart: (DataStreamPartType & {
819
+ type: 'tool_result';
820
+ })['value']) => Promise<void> | void;
821
+ onMessageAnnotationsPart?: (streamPart: (DataStreamPartType & {
822
+ type: 'message_annotations';
823
+ })['value']) => Promise<void> | void;
824
+ onFinishMessagePart?: (streamPart: (DataStreamPartType & {
825
+ type: 'finish_message';
826
+ })['value']) => Promise<void> | void;
827
+ onFinishStepPart?: (streamPart: (DataStreamPartType & {
828
+ type: 'finish_step';
829
+ })['value']) => Promise<void> | void;
830
+ onStartStepPart?: (streamPart: (DataStreamPartType & {
831
+ type: 'start_step';
832
+ })['value']) => Promise<void> | void;
833
+ }): Promise<void>;
834
+
835
+ declare function processTextStream({ stream, onTextPart, }: {
836
+ stream: ReadableStream<Uint8Array>;
837
+ onTextPart: (chunk: string) => Promise<void> | void;
838
+ }): Promise<void>;
839
+
840
+ /**
841
+ * Used to mark schemas so we can support both Zod and custom schemas.
842
+ */
843
+ declare const schemaSymbol: unique symbol;
844
+ type Schema<OBJECT = unknown> = Validator<OBJECT> & {
845
+ /**
846
+ * Used to mark schemas so we can support both Zod and custom schemas.
847
+ */
848
+ [schemaSymbol]: true;
849
+ /**
850
+ * Schema type for inference.
851
+ */
852
+ _type: OBJECT;
853
+ /**
854
+ * The JSON Schema for the schema. It is passed to the providers.
855
+ */
856
+ readonly jsonSchema: JSONSchema7;
857
+ };
858
+ /**
859
+ * Create a schema using a JSON Schema.
860
+ *
861
+ * @param jsonSchema The JSON Schema for the schema.
862
+ * @param options.validate Optional. A validation function for the schema.
863
+ */
864
+ declare function jsonSchema<OBJECT = unknown>(jsonSchema: JSONSchema7, { validate, }?: {
865
+ validate?: (value: unknown) => {
866
+ success: true;
867
+ value: OBJECT;
868
+ } | {
869
+ success: false;
870
+ error: Error;
871
+ };
872
+ }): Schema<OBJECT>;
873
+ declare function asSchema<OBJECT>(schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>): Schema<OBJECT>;
874
+
875
+ declare function shouldResubmitMessages({ originalMaxToolInvocationStep, originalMessageCount, maxSteps, messages, }: {
876
+ originalMaxToolInvocationStep: number | undefined;
877
+ originalMessageCount: number;
878
+ maxSteps: number;
879
+ messages: UIMessage[];
880
+ }): boolean;
881
+ /**
882
+ Check if the message is an assistant message with completed tool calls.
883
+ The last step of the message must have at least one tool invocation and
884
+ all tool invocations must have a result.
885
+ */
886
+ declare function isAssistantMessageWithCompletedToolCalls(message: UIMessage): message is UIMessage & {
887
+ role: 'assistant';
54
888
  };
889
+
55
890
  /**
56
- * @deprecated Use `ToolChoice` instead.
891
+ * Updates the result of a specific tool invocation in the last message of the given messages array.
892
+ *
893
+ * @param {object} params - The parameters object.
894
+ * @param {UIMessage[]} params.messages - An array of messages, from which the last one is updated.
895
+ * @param {string} params.toolCallId - The unique identifier for the tool invocation to update.
896
+ * @param {unknown} params.toolResult - The result object to attach to the tool invocation.
897
+ * @returns {void} This function does not return anything.
57
898
  */
58
- type CoreToolChoice<TOOLS extends Record<string, unknown>> = ToolChoice<TOOLS>;
899
+ declare function updateToolCallResult({ messages, toolCallId, toolResult: result, }: {
900
+ messages: UIMessage[];
901
+ toolCallId: string;
902
+ toolResult: unknown;
903
+ }): void;
904
+
905
+ declare function zodSchema<OBJECT>(zodSchema: z.Schema<OBJECT, z.ZodTypeDef, any>, options?: {
906
+ /**
907
+ * Enables support for references in the schema.
908
+ * This is required for recursive schemas, e.g. with `z.lazy`.
909
+ * However, not all language models and providers support such references.
910
+ * Defaults to `false`.
911
+ */
912
+ useReferences?: boolean;
913
+ }): Schema<OBJECT>;
59
914
 
60
915
  interface DataStreamWriter {
61
916
  /**
@@ -65,11 +920,11 @@ interface DataStreamWriter {
65
920
  /**
66
921
  * Appends a data part to the stream.
67
922
  */
68
- writeData(value: JSONValue): void;
923
+ writeData(value: JSONValue$1): void;
69
924
  /**
70
925
  * Appends a message annotation to the stream.
71
926
  */
72
- writeMessageAnnotation(value: JSONValue): void;
927
+ writeMessageAnnotation(value: JSONValue$1): void;
73
928
  /**
74
929
  * Appends a source part to the stream.
75
930
  */
@@ -137,145 +992,6 @@ type TelemetrySettings = {
137
992
  tracer?: Tracer;
138
993
  };
139
994
 
140
- /**
141
- Embedding model that is used by the AI SDK Core functions.
142
- */
143
- type EmbeddingModel<VALUE> = EmbeddingModelV1<VALUE>;
144
- /**
145
- Embedding.
146
- */
147
- type Embedding = EmbeddingModelV1Embedding;
148
-
149
- /**
150
- Image model that is used by the AI SDK Core functions.
151
- */
152
- type ImageModel = ImageModelV1;
153
- /**
154
- Warning from the model provider for this call. The call will proceed, but e.g.
155
- some settings might not be supported, which can lead to suboptimal results.
156
- */
157
- type ImageGenerationWarning = ImageModelV1CallWarning;
158
-
159
- type ImageModelResponseMetadata = {
160
- /**
161
- Timestamp for the start of the generated response.
162
- */
163
- timestamp: Date;
164
- /**
165
- The ID of the response model that was used to generate the response.
166
- */
167
- modelId: string;
168
- /**
169
- Response headers.
170
- */
171
- headers?: Record<string, string>;
172
- };
173
-
174
- type LanguageModelRequestMetadata = {
175
- /**
176
- Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
177
- */
178
- body?: string;
179
- };
180
-
181
- type LanguageModelResponseMetadata = {
182
- /**
183
- ID for the generated response.
184
- */
185
- id: string;
186
- /**
187
- Timestamp for the start of the generated response.
188
- */
189
- timestamp: Date;
190
- /**
191
- The ID of the response model that was used to generate the response.
192
- */
193
- modelId: string;
194
- /**
195
- Response headers (available only for providers that use HTTP requests).
196
- */
197
- headers?: Record<string, string>;
198
- };
199
-
200
- /**
201
- * Provider for language, text embedding, and image models.
202
- */
203
- type Provider = {
204
- /**
205
- Returns the language model with the given id.
206
- The model id is then passed to the provider function to get the model.
207
-
208
- @param {string} id - The id of the model to return.
209
-
210
- @returns {LanguageModel} The language model associated with the id
211
-
212
- @throws {NoSuchModelError} If no such model exists.
213
- */
214
- languageModel(modelId: string): LanguageModel;
215
- /**
216
- Returns the text embedding model with the given id.
217
- The model id is then passed to the provider function to get the model.
218
-
219
- @param {string} id - The id of the model to return.
220
-
221
- @returns {LanguageModel} The language model associated with the id
222
-
223
- @throws {NoSuchModelError} If no such model exists.
224
- */
225
- textEmbeddingModel(modelId: string): EmbeddingModel<string>;
226
- /**
227
- Returns the image model with the given id.
228
- The model id is then passed to the provider function to get the model.
229
-
230
- @param {string} id - The id of the model to return.
231
-
232
- @returns {ImageModel} The image model associated with the id
233
- */
234
- imageModel(modelId: string): ImageModel;
235
- };
236
-
237
- /**
238
- Additional provider-specific metadata that is returned from the provider.
239
-
240
- This is needed to enable provider-specific functionality that can be
241
- fully encapsulated in the provider.
242
- */
243
- type ProviderMetadata = LanguageModelV2ProviderMetadata;
244
- /**
245
- Additional provider-specific options.
246
-
247
- They are passed through to the provider from the AI SDK and enable
248
- provider-specific functionality that can be fully encapsulated in the provider.
249
- */
250
- type ProviderOptions = LanguageModelV2ProviderMetadata;
251
-
252
- /**
253
- Represents the number of tokens used in a prompt and completion.
254
- */
255
- type LanguageModelUsage = {
256
- /**
257
- The number of tokens used in the prompt.
258
- */
259
- promptTokens: number;
260
- /**
261
- The number of tokens used in the completion.
262
- */
263
- completionTokens: number;
264
- /**
265
- The total number of tokens used (promptTokens + completionTokens).
266
- */
267
- totalTokens: number;
268
- };
269
- /**
270
- Represents the number of tokens used in an embedding.
271
- */
272
- type EmbeddingModelUsage = {
273
- /**
274
- The number of tokens used in the embedding.
275
- */
276
- tokens: number;
277
- };
278
-
279
995
  /**
280
996
  The result of an `embed` call.
281
997
  It contains the embedding, the value, and additional information.
@@ -496,6 +1212,10 @@ type ToolResultContent = Array<{
496
1212
  } | {
497
1213
  type: 'image';
498
1214
  data: string;
1215
+ mediaType?: string;
1216
+ /**
1217
+ * @deprecated Use `mediaType` instead.
1218
+ */
499
1219
  mimeType?: string;
500
1220
  }>;
501
1221
 
@@ -532,7 +1252,13 @@ interface ImagePart {
532
1252
  */
533
1253
  image: DataContent | URL;
534
1254
  /**
535
- Optional mime type of the image.
1255
+ Optional IANA media type of the image.
1256
+
1257
+ @see https://www.iana.org/assignments/media-types/media-types.xhtml
1258
+ */
1259
+ mediaType?: string;
1260
+ /**
1261
+ @deprecated Use `mediaType` instead.
536
1262
  */
537
1263
  mimeType?: string;
538
1264
  /**
@@ -563,9 +1289,15 @@ interface FilePart {
563
1289
  */
564
1290
  filename?: string;
565
1291
  /**
566
- Mime type of the file.
1292
+ IANA media type of the file.
1293
+
1294
+ @see https://www.iana.org/assignments/media-types/media-types.xhtml
567
1295
  */
568
- mimeType: string;
1296
+ mediaType: string;
1297
+ /**
1298
+ @deprecated Use `mediaType` instead.
1299
+ */
1300
+ mimeType?: string;
569
1301
  /**
570
1302
  Additional provider-specific metadata. They are passed through
571
1303
  to the provider from the AI SDK and enable provider-specific
@@ -814,9 +1546,11 @@ interface GeneratedFile {
814
1546
  */
815
1547
  readonly uint8Array: Uint8Array;
816
1548
  /**
817
- MIME type of the file
1549
+ The IANA media type of the file.
1550
+
1551
+ @see https://www.iana.org/assignments/media-types/media-types.xhtml
818
1552
  */
819
- readonly mimeType: string;
1553
+ readonly mediaType: string;
820
1554
  }
821
1555
 
822
1556
  type ReasoningDetail = {
@@ -2086,7 +2820,7 @@ type StepResult<TOOLS extends ToolSet> = {
2086
2820
  /**
2087
2821
  The token usage of the generated text.
2088
2822
  */
2089
- readonly usage: LanguageModelUsage;
2823
+ readonly usage: LanguageModelUsage$1;
2090
2824
  /**
2091
2825
  Warnings from the model provider (e.g. unsupported settings).
2092
2826
  */
@@ -2183,7 +2917,7 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
2183
2917
  /**
2184
2918
  The token usage of the generated text.
2185
2919
  */
2186
- readonly usage: LanguageModelUsage;
2920
+ readonly usage: LanguageModelUsage$1;
2187
2921
  /**
2188
2922
  Warnings from the model provider (e.g. unsupported settings)
2189
2923
  */
@@ -2253,7 +2987,7 @@ interface Output<OUTPUT, PARTIAL> {
2253
2987
  text: string;
2254
2988
  }, context: {
2255
2989
  response: LanguageModelResponseMetadata;
2256
- usage: LanguageModelUsage;
2990
+ usage: LanguageModelUsage$1;
2257
2991
  finishReason: FinishReason;
2258
2992
  }): OUTPUT;
2259
2993
  }
@@ -2356,7 +3090,7 @@ type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
2356
3090
  tools: TOOLS;
2357
3091
  parameterSchema: (options: {
2358
3092
  toolName: string;
2359
- }) => JSONSchema7;
3093
+ }) => JSONSchema7$1;
2360
3094
  error: NoSuchToolError | InvalidToolArgumentsError;
2361
3095
  }) => Promise<LanguageModelV2FunctionToolCall | null>;
2362
3096
 
@@ -2497,8 +3231,8 @@ declare class StreamData {
2497
3231
  private warningTimeout;
2498
3232
  constructor();
2499
3233
  close(): Promise<void>;
2500
- append(value: JSONValue$1): void;
2501
- appendMessageAnnotation(value: JSONValue$1): void;
3234
+ append(value: JSONValue): void;
3235
+ appendMessageAnnotation(value: JSONValue): void;
2502
3236
  }
2503
3237
 
2504
3238
  type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
@@ -2556,7 +3290,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
2556
3290
 
2557
3291
  Resolved when the response is finished.
2558
3292
  */
2559
- readonly usage: Promise<LanguageModelUsage>;
3293
+ readonly usage: Promise<LanguageModelUsage$1>;
2560
3294
  /**
2561
3295
  Sources that have been used as input to generate the response.
2562
3296
  For multi-step generation, the sources are accumulated from all steps.
@@ -2781,7 +3515,7 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2781
3515
  request: LanguageModelRequestMetadata;
2782
3516
  warnings: CallWarning[] | undefined;
2783
3517
  response: LanguageModelResponseMetadata;
2784
- usage: LanguageModelUsage;
3518
+ usage: LanguageModelUsage$1;
2785
3519
  finishReason: FinishReason;
2786
3520
  providerMetadata: ProviderMetadata | undefined;
2787
3521
  /**
@@ -2792,7 +3526,7 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2792
3526
  } | {
2793
3527
  type: 'finish';
2794
3528
  finishReason: FinishReason;
2795
- usage: LanguageModelUsage;
3529
+ usage: LanguageModelUsage$1;
2796
3530
  providerMetadata: ProviderMetadata | undefined;
2797
3531
  /**
2798
3532
  * @deprecated Use `providerMetadata` instead.
@@ -3117,7 +3851,7 @@ declare function generateImage({ model, prompt, n, size, aspectRatio, seed, prov
3117
3851
  }
3118
3852
  ```
3119
3853
  */
3120
- providerOptions?: Record<string, Record<string, JSONValue>>;
3854
+ providerOptions?: Record<string, Record<string, JSONValue$1>>;
3121
3855
  /**
3122
3856
  Maximum number of retries per embedding model call. Set to 0 to disable retries.
3123
3857
 
@@ -3150,7 +3884,7 @@ interface GenerateObjectResult<OBJECT> {
3150
3884
  /**
3151
3885
  The token usage of the generated text.
3152
3886
  */
3153
- readonly usage: LanguageModelUsage;
3887
+ readonly usage: LanguageModelUsage$1;
3154
3888
  /**
3155
3889
  Warnings from the model provider (e.g. unsupported settings).
3156
3890
  */
@@ -3447,7 +4181,7 @@ functionality that can be fully encapsulated in the provider.
3447
4181
  generateId?: () => string;
3448
4182
  currentDate?: () => Date;
3449
4183
  };
3450
- }): Promise<GenerateObjectResult<JSONValue>>;
4184
+ }): Promise<GenerateObjectResult<JSONValue$1>>;
3451
4185
 
3452
4186
  /**
3453
4187
  The result of a `streamObject` call that contains the partial object stream and additional information.
@@ -3460,7 +4194,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
3460
4194
  /**
3461
4195
  The token usage of the generated response. Resolved when the response is finished.
3462
4196
  */
3463
- readonly usage: Promise<LanguageModelUsage>;
4197
+ readonly usage: Promise<LanguageModelUsage$1>;
3464
4198
  /**
3465
4199
  Additional provider-specific metadata. They are passed through
3466
4200
  from the provider to the AI SDK and enable provider-specific
@@ -3536,7 +4270,7 @@ type ObjectStreamPart<PARTIAL> = {
3536
4270
  type: 'finish';
3537
4271
  finishReason: FinishReason;
3538
4272
  logprobs?: LogProbs;
3539
- usage: LanguageModelUsage;
4273
+ usage: LanguageModelUsage$1;
3540
4274
  response: LanguageModelResponseMetadata;
3541
4275
  providerMetadata?: ProviderMetadata;
3542
4276
  };
@@ -3558,7 +4292,7 @@ type StreamObjectOnFinishCallback<RESULT> = (event: {
3558
4292
  /**
3559
4293
  The token usage of the generated response.
3560
4294
  */
3561
- usage: LanguageModelUsage;
4295
+ usage: LanguageModelUsage$1;
3562
4296
  /**
3563
4297
  The generated object. Can be undefined if the final object does not match the schema.
3564
4298
  */
@@ -3781,7 +4515,7 @@ The stream processing will pause until the callback promise is resolved.
3781
4515
  /**
3782
4516
  Callback that is called when the LLM response and the final object validation are finished.
3783
4517
  */
3784
- onFinish?: StreamObjectOnFinishCallback<JSONValue>;
4518
+ onFinish?: StreamObjectOnFinishCallback<JSONValue$1>;
3785
4519
  /**
3786
4520
  * Internal. For test use only. May change without notice.
3787
4521
  */
@@ -3790,7 +4524,133 @@ Callback that is called when the LLM response and the final object validation ar
3790
4524
  currentDate?: () => Date;
3791
4525
  now?: () => number;
3792
4526
  };
3793
- }): StreamObjectResult<JSONValue, JSONValue, never>;
4527
+ }): StreamObjectResult<JSONValue$1, JSONValue$1, never>;
4528
+
4529
+ /**
4530
+ Warning from the model provider for this call. The call will proceed, but e.g.
4531
+ some settings might not be supported, which can lead to suboptimal results.
4532
+ */
4533
+ type TranscriptionWarning = TranscriptionModelV1CallWarning;
4534
+
4535
+ type TranscriptionModelResponseMetadata = {
4536
+ /**
4537
+ Timestamp for the start of the generated response.
4538
+ */
4539
+ timestamp: Date;
4540
+ /**
4541
+ The ID of the response model that was used to generate the response.
4542
+ */
4543
+ modelId: string;
4544
+ /**
4545
+ Response headers.
4546
+ */
4547
+ headers?: Record<string, string>;
4548
+ };
4549
+
4550
+ /**
4551
+ The result of a `transcribe` call.
4552
+ It contains the transcript and additional information.
4553
+ */
4554
+ interface TranscriptionResult {
4555
+ /**
4556
+ * The complete transcribed text from the audio.
4557
+ */
4558
+ readonly text: string;
4559
+ /**
4560
+ * Array of transcript segments with timing information.
4561
+ * Each segment represents a portion of the transcribed text with start and end times.
4562
+ */
4563
+ readonly segments: Array<{
4564
+ /**
4565
+ * The text content of this segment.
4566
+ */
4567
+ readonly text: string;
4568
+ /**
4569
+ * The start time of this segment in seconds.
4570
+ */
4571
+ readonly startSecond: number;
4572
+ /**
4573
+ * The end time of this segment in seconds.
4574
+ */
4575
+ readonly endSecond: number;
4576
+ }>;
4577
+ /**
4578
+ * The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
4579
+ * May be undefined if the language couldn't be detected.
4580
+ */
4581
+ readonly language: string | undefined;
4582
+ /**
4583
+ * The total duration of the audio file in seconds.
4584
+ * May be undefined if the duration couldn't be determined.
4585
+ */
4586
+ readonly durationInSeconds: number | undefined;
4587
+ /**
4588
+ Warnings for the call, e.g. unsupported settings.
4589
+ */
4590
+ readonly warnings: Array<TranscriptionWarning>;
4591
+ /**
4592
+ Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
4593
+ */
4594
+ readonly responses: Array<TranscriptionModelResponseMetadata>;
4595
+ /**
4596
+ Provider metadata from the provider.
4597
+ */
4598
+ readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
4599
+ }
4600
+
4601
+ /**
4602
+ Generates transcripts using a transcription model.
4603
+
4604
+ @param model - The transcription model to use.
4605
+ @param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
4606
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
4607
+ as body parameters.
4608
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4609
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
4610
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4611
+
4612
+ @returns A result object that contains the generated transcript.
4613
+ */
4614
+ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4615
+ /**
4616
+ The transcription model to use.
4617
+ */
4618
+ model: TranscriptionModelV1;
4619
+ /**
4620
+ The audio data to transcribe.
4621
+ */
4622
+ audio: DataContent | URL;
4623
+ /**
4624
+ Additional provider-specific options that are passed through to the provider
4625
+ as body parameters.
4626
+
4627
+ The outer record is keyed by the provider name, and the inner
4628
+ record is keyed by the provider-specific metadata key.
4629
+ ```ts
4630
+ {
4631
+ "openai": {
4632
+ "temperature": 0
4633
+ }
4634
+ }
4635
+ ```
4636
+ */
4637
+ providerOptions?: ProviderOptions;
4638
+ /**
4639
+ Maximum number of retries per transcript model call. Set to 0 to disable retries.
4640
+
4641
+ @default 2
4642
+ */
4643
+ maxRetries?: number;
4644
+ /**
4645
+ Abort signal.
4646
+ */
4647
+ abortSignal?: AbortSignal;
4648
+ /**
4649
+ Additional headers to include in the request.
4650
+ Only applicable for HTTP-based providers.
4651
+ */
4652
+ headers?: Record<string, string>;
4653
+ }): Promise<TranscriptionResult>;
3794
4654
 
3795
4655
  /**
3796
4656
  * Applies default settings for a language model.
@@ -4003,7 +4863,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
4003
4863
  type: 'finish';
4004
4864
  finishReason: FinishReason;
4005
4865
  logprobs?: LogProbs;
4006
- usage: LanguageModelUsage;
4866
+ usage: LanguageModelUsage$1;
4007
4867
  experimental_providerMetadata?: ProviderMetadata;
4008
4868
  } | {
4009
4869
  type: 'error';
@@ -4067,7 +4927,7 @@ declare class NoObjectGeneratedError extends AISDKError {
4067
4927
  /**
4068
4928
  The usage of the model.
4069
4929
  */
4070
- readonly usage: LanguageModelUsage | undefined;
4930
+ readonly usage: LanguageModelUsage$1 | undefined;
4071
4931
  /**
4072
4932
  Reason why the model finished generating a response.
4073
4933
  */
@@ -4077,7 +4937,7 @@ declare class NoObjectGeneratedError extends AISDKError {
4077
4937
  cause?: Error;
4078
4938
  text?: string;
4079
4939
  response: LanguageModelResponseMetadata;
4080
- usage: LanguageModelUsage;
4940
+ usage: LanguageModelUsage$1;
4081
4941
  finishReason: FinishReason;
4082
4942
  });
4083
4943
  static isInstance(error: unknown): error is NoObjectGeneratedError;
@@ -4111,11 +4971,11 @@ declare const symbol$6: unique symbol;
4111
4971
  declare class ToolExecutionError extends AISDKError {
4112
4972
  private readonly [symbol$6];
4113
4973
  readonly toolName: string;
4114
- readonly toolArgs: JSONValue;
4974
+ readonly toolArgs: JSONValue$1;
4115
4975
  readonly toolCallId: string;
4116
4976
  constructor({ toolArgs, toolName, toolCallId, cause, message, }: {
4117
4977
  message?: string;
4118
- toolArgs: JSONValue;
4978
+ toolArgs: JSONValue$1;
4119
4979
  toolName: string;
4120
4980
  toolCallId: string;
4121
4981
  cause: unknown;
@@ -4298,4 +5158,4 @@ declare namespace llamaindexAdapter {
4298
5158
  };
4299
5159
  }
4300
5160
 
4301
- export { AssistantContent, CallWarning, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
5161
+ export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, transcribe as experimental_transcribe, experimental_wrapLanguageModel, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };