ai 5.0.0-canary.14 → 5.0.0-canary.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,11 +1,11 @@
1
- import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
2
- export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
- import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
4
- export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
1
+ import { ToolCall, ToolResult, FetchFunction, Schema, IDGenerator } from '@ai-sdk/provider-utils';
2
+ export { IDGenerator, Schema, ToolCall, ToolResult, asSchema, createIdGenerator, generateId, jsonSchema } from '@ai-sdk/provider-utils';
3
+ import * as _ai_sdk_provider from '@ai-sdk/provider';
4
+ import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, SpeechModelV1, SpeechModelV1CallWarning, TranscriptionModelV1, TranscriptionModelV1CallWarning, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
5
+ export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
5
6
  import { ServerResponse } from 'node:http';
6
7
  import { AttributeValue, Tracer } from '@opentelemetry/api';
7
8
  import { z } from 'zod';
8
- import { JSONSchema7 } from 'json-schema';
9
9
  import { ServerResponse as ServerResponse$1 } from 'http';
10
10
 
11
11
  /**
@@ -42,6 +42,8 @@ type ImageModelResponseMetadata = {
42
42
  headers?: Record<string, string>;
43
43
  };
44
44
 
45
+ type JSONValue = JSONValue$1;
46
+
45
47
  /**
46
48
  Language model that is used by the AI SDK Core functions.
47
49
  */
@@ -159,36 +161,63 @@ provider-specific functionality that can be fully encapsulated in the provider.
159
161
  type ProviderOptions = SharedV2ProviderOptions;
160
162
 
161
163
  /**
162
- Represents the number of tokens used in a prompt and completion.
163
- */
164
- type LanguageModelUsage$1 = {
164
+ Speech model that is used by the AI SDK Core functions.
165
+ */
166
+ type SpeechModel = SpeechModelV1;
167
+ /**
168
+ Warning from the model provider for this call. The call will proceed, but e.g.
169
+ some settings might not be supported, which can lead to suboptimal results.
170
+ */
171
+ type SpeechWarning = SpeechModelV1CallWarning;
172
+
173
+ type SpeechModelResponseMetadata = {
165
174
  /**
166
- The number of tokens used in the prompt.
175
+ Timestamp for the start of the generated response.
167
176
  */
168
- promptTokens: number;
177
+ timestamp: Date;
169
178
  /**
170
- The number of tokens used in the completion.
171
- */
172
- completionTokens: number;
179
+ The ID of the response model that was used to generate the response.
180
+ */
181
+ modelId: string;
173
182
  /**
174
- The total number of tokens used (promptTokens + completionTokens).
183
+ Response headers.
175
184
  */
176
- totalTokens: number;
185
+ headers?: Record<string, string>;
186
+ /**
187
+ Response body.
188
+ */
189
+ body?: unknown;
177
190
  };
191
+
178
192
  /**
179
- Represents the number of tokens used in an embedding.
180
- */
181
- type EmbeddingModelUsage = {
193
+ Transcription model that is used by the AI SDK Core functions.
194
+ */
195
+ type TranscriptionModel = TranscriptionModelV1;
196
+ /**
197
+ Warning from the model provider for this call. The call will proceed, but e.g.
198
+ some settings might not be supported, which can lead to suboptimal results.
199
+ */
200
+ type TranscriptionWarning = TranscriptionModelV1CallWarning;
201
+
202
+ type TranscriptionModelResponseMetadata = {
182
203
  /**
183
- The number of tokens used in the embedding.
204
+ Timestamp for the start of the generated response.
184
205
  */
185
- tokens: number;
206
+ timestamp: Date;
207
+ /**
208
+ The ID of the response model that was used to generate the response.
209
+ */
210
+ modelId: string;
211
+ /**
212
+ Response headers.
213
+ */
214
+ headers?: Record<string, string>;
186
215
  };
187
216
 
188
217
  /**
189
218
  Represents the number of tokens used in a prompt and completion.
190
219
  */
191
- type LanguageModelUsage = {
220
+ type LanguageModelUsage$1 = {
192
221
  /**
193
222
  The number of tokens used in the prompt.
194
223
  */
@@ -244,7 +273,7 @@ interface Attachment {
244
273
  /**
245
274
  * AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
246
275
  */
247
- interface Message {
276
+ interface UIMessage {
248
277
  /**
249
278
  A unique identifier for the message.
250
279
  */
@@ -258,53 +287,26 @@ interface Message {
258
287
  */
259
288
  content: string;
260
289
  /**
261
- Reasoning for the message.
262
-
263
- @deprecated Use `parts` instead.
264
- */
265
- reasoning?: string;
266
- /**
267
- * Additional attachments to be sent along with the message.
290
+ Additional attachments to be sent along with the message.
268
291
  */
269
292
  experimental_attachments?: Attachment[];
270
293
  /**
271
- The 'data' role is deprecated.
294
+ The role of the message.
272
295
  */
273
- role: 'system' | 'user' | 'assistant' | 'data';
296
+ role: 'system' | 'user' | 'assistant';
274
297
  /**
275
- For data messages.
276
-
277
- @deprecated Data messages will be removed.
298
+ Additional message-specific information added on the server via StreamData
278
299
  */
279
- data?: JSONValue;
300
+ annotations?: JSONValue$1[] | undefined;
280
301
  /**
281
- * Additional message-specific information added on the server via StreamData
282
- */
283
- annotations?: JSONValue[] | undefined;
284
- /**
285
- Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
286
- that the assistant made as part of this message.
302
+ The parts of the message. Use this for rendering the message in the UI.
287
303
 
288
- @deprecated Use `parts` instead.
289
- */
290
- toolInvocations?: Array<ToolInvocation>;
291
- /**
292
- * The parts of the message. Use this for rendering the message in the UI.
293
- *
294
- * Assistant messages can have text, reasoning and tool invocation parts.
295
- * User messages can have text parts.
304
+ Assistant messages can have text, reasoning and tool invocation parts.
305
+ User messages can have text parts.
296
306
  */
297
- parts?: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
307
+ parts: Array<UIMessagePart>;
298
308
  }
299
- type UIMessage = Message & {
300
- /**
301
- * The parts of the message. Use this for rendering the message in the UI.
302
- *
303
- * Assistant messages can have text, reasoning and tool invocation parts.
304
- * User messages can have text parts.
305
- */
306
- parts: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
307
- };
309
+ type UIMessagePart = TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart;
308
310
  /**
309
311
  * A text part of a message.
310
312
  */
@@ -323,7 +325,7 @@ type ReasoningUIPart = {
323
325
  /**
324
326
  * The reasoning text.
325
327
  */
326
- reasoning: string;
328
+ text: string;
327
329
  /**
328
330
  * The provider metadata.
329
331
  */
@@ -371,8 +373,8 @@ type FileUIPart = {
371
373
  type StepStartUIPart = {
372
374
  type: 'step-start';
373
375
  };
374
- type CreateMessage = Omit<Message, 'id'> & {
375
- id?: Message['id'];
376
+ type CreateUIMessage = Omit<UIMessage, 'id'> & {
377
+ id?: UIMessage['id'];
376
378
  };
377
379
  type ChatRequest = {
378
380
  /**
@@ -386,11 +388,11 @@ type ChatRequest = {
386
388
  /**
387
389
  The messages of the chat.
388
390
  */
389
- messages: Message[];
391
+ messages: UIMessage[];
390
392
  /**
391
393
  Additional data to be sent to the server.
392
394
  */
393
- data?: JSONValue;
395
+ data?: JSONValue$1;
394
396
  };
395
397
  type RequestOptions = {
396
398
  /**
@@ -414,7 +416,7 @@ type ChatRequestOptions = {
414
416
  /**
415
417
  Additional data to be sent to the API endpoint.
416
418
  */
417
- data?: JSONValue;
419
+ data?: JSONValue$1;
418
420
  /**
419
421
  * Additional files to be sent to the server.
420
422
  */
@@ -445,7 +447,7 @@ type UseChatOptions = {
445
447
  /**
446
448
  * Initial messages of the chat. Useful to load an existing chat history.
447
449
  */
448
- initialMessages?: Message[];
450
+ initialMessages?: UIMessage[];
449
451
  /**
450
452
  * Initial input of the chat.
451
453
  */
@@ -471,8 +473,8 @@ type UseChatOptions = {
471
473
  * @param options.usage The token usage of the message.
472
474
  * @param options.finishReason The finish reason of the message.
473
475
  */
474
- onFinish?: (message: Message, options: {
475
- usage: LanguageModelUsage;
476
+ onFinish?: (message: UIMessage, options: {
477
+ usage: LanguageModelUsage$1;
476
478
  finishReason: LanguageModelV2FinishReason;
477
479
  }) => void;
478
480
  /**
@@ -588,66 +590,32 @@ type UseCompletionOptions = {
588
590
  */
589
591
  fetch?: FetchFunction;
590
592
  };
591
- /**
592
- A JSON value can be a string, number, boolean, object, array, or null.
593
- JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
594
- */
595
- type JSONValue = null | string | number | boolean | {
596
- [value: string]: JSONValue;
597
- } | Array<JSONValue>;
598
593
 
599
594
  /**
600
- Transcription model that is used by the AI SDK Core functions.
601
- */
602
- type TranscriptionModel = TranscriptionModelV1;
603
- /**
604
- Warning from the model provider for this call. The call will proceed, but e.g.
605
- some settings might not be supported, which can lead to suboptimal results.
606
- */
607
- type TranscriptionWarning = TranscriptionModelV1CallWarning;
608
-
609
- type TranscriptionModelResponseMetadata = {
595
+ Represents the number of tokens used in a prompt and completion.
596
+ */
597
+ type LanguageModelUsage = {
610
598
  /**
611
- Timestamp for the start of the generated response.
599
+ The number of tokens used in the prompt.
612
600
  */
613
- timestamp: Date;
601
+ promptTokens: number;
614
602
  /**
615
- The ID of the response model that was used to generate the response.
616
- */
617
- modelId: string;
603
+ The number of tokens used in the completion.
604
+ */
605
+ completionTokens: number;
618
606
  /**
619
- Response headers.
607
+ The total number of tokens used (promptTokens + completionTokens).
620
608
  */
621
- headers?: Record<string, string>;
609
+ totalTokens: number;
622
610
  };
623
-
624
611
  /**
625
- Speech model that is used by the AI SDK Core functions.
626
- */
627
- type SpeechModel = SpeechModelV1;
628
- /**
629
- Warning from the model provider for this call. The call will proceed, but e.g.
630
- some settings might not be supported, which can lead to suboptimal results.
631
- */
632
- type SpeechWarning = SpeechModelV1CallWarning;
633
-
634
- type SpeechModelResponseMetadata = {
635
- /**
636
- Timestamp for the start of the generated response.
637
- */
638
- timestamp: Date;
639
- /**
640
- The ID of the response model that was used to generate the response.
641
- */
642
- modelId: string;
643
- /**
644
- Response headers.
645
- */
646
- headers?: Record<string, string>;
612
+ Represents the number of tokens used in an embedding.
613
+ */
614
+ type EmbeddingModelUsage = {
647
615
  /**
648
- Response body.
616
+ The number of tokens used in the embedding.
649
617
  */
650
- body?: unknown;
618
+ tokens: number;
651
619
  };
652
620
 
653
621
  declare const getOriginalFetch$1: () => typeof fetch;
@@ -662,7 +630,7 @@ declare function callChatApi({ api, body, streamProtocol, credentials, headers,
662
630
  onResponse: ((response: Response) => void | Promise<void>) | undefined;
663
631
  onUpdate: (options: {
664
632
  message: UIMessage;
665
- data: JSONValue[] | undefined;
633
+ data: JSONValue$1[] | undefined;
666
634
  replaceLastMessage: boolean;
667
635
  }) => void;
668
636
  onFinish: UseChatOptions['onFinish'];
@@ -700,7 +668,7 @@ interface DataStreamPart<CODE extends string, NAME extends string, TYPE> {
700
668
  value: TYPE;
701
669
  };
702
670
  }
703
- declare const dataStreamParts: readonly [DataStreamPart<"0", "text", string>, DataStreamPart<"2", "data", JSONValue[]>, DataStreamPart<"3", "error", string>, DataStreamPart<"8", "message_annotations", JSONValue[]>, DataStreamPart<"9", "tool_call", ToolCall<string, any>>, DataStreamPart<"a", "tool_result", Omit<ToolResult<string, any, any>, "args" | "toolName">>, DataStreamPart<"b", "tool_call_streaming_start", {
671
+ declare const dataStreamParts: readonly [DataStreamPart<"0", "text", string>, DataStreamPart<"2", "data", _ai_sdk_provider.JSONValue[]>, DataStreamPart<"3", "error", string>, DataStreamPart<"8", "message_annotations", _ai_sdk_provider.JSONValue[]>, DataStreamPart<"9", "tool_call", ToolCall<string, any>>, DataStreamPart<"a", "tool_result", Omit<ToolResult<string, any, any>, "args" | "toolName">>, DataStreamPart<"b", "tool_call_streaming_start", {
704
672
  toolCallId: string;
705
673
  toolName: string;
706
674
  }>, DataStreamPart<"c", "tool_call_delta", {
@@ -799,10 +767,6 @@ type PartialObject<ObjectType extends object> = {
799
767
 
800
768
  declare function extractMaxToolInvocationStep(toolInvocations: ToolInvocation[] | undefined): number | undefined;
801
769
 
802
- declare function fillMessageParts(messages: Message[]): UIMessage[];
803
-
804
- declare function getMessageParts(message: Message | CreateMessage | UIMessage): (TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart)[];
805
-
806
770
  /**
807
771
  * Performs a deep-equal comparison of two parsed JSON objects.
808
772
  *
@@ -874,39 +838,19 @@ declare function processTextStream({ stream, onTextPart, }: {
874
838
  }): Promise<void>;
875
839
 
876
840
  /**
877
- * Used to mark schemas so we can support both Zod and custom schemas.
878
- */
879
- declare const schemaSymbol: unique symbol;
880
- type Schema<OBJECT = unknown> = Validator<OBJECT> & {
881
- /**
882
- * Used to mark schemas so we can support both Zod and custom schemas.
883
- */
884
- [schemaSymbol]: true;
885
- /**
886
- * Schema type for inference.
887
- */
888
- _type: OBJECT;
889
- /**
890
- * The JSON Schema for the schema. It is passed to the providers.
891
- */
892
- readonly jsonSchema: JSONSchema7;
893
- };
894
- /**
895
- * Create a schema using a JSON Schema.
841
+ * Updates the result of a specific tool invocation in the last message of the given messages array.
896
842
  *
897
- * @param jsonSchema The JSON Schema for the schema.
898
- * @param options.validate Optional. A validation function for the schema.
843
+ * @param {object} params - The parameters object.
844
+ * @param {UIMessage[]} params.messages - An array of messages, from which the last one is updated.
845
+ * @param {string} params.toolCallId - The unique identifier for the tool invocation to update.
846
+ * @param {unknown} params.toolResult - The result object to attach to the tool invocation.
847
+ * @returns {void} This function does not return anything.
899
848
  */
900
- declare function jsonSchema<OBJECT = unknown>(jsonSchema: JSONSchema7, { validate, }?: {
901
- validate?: (value: unknown) => {
902
- success: true;
903
- value: OBJECT;
904
- } | {
905
- success: false;
906
- error: Error;
907
- };
908
- }): Schema<OBJECT>;
909
- declare function asSchema<OBJECT>(schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT> | undefined): Schema<OBJECT>;
849
+ declare function updateToolCallResult({ messages, toolCallId, toolResult: result, }: {
850
+ messages: UIMessage[];
851
+ toolCallId: string;
852
+ toolResult: unknown;
853
+ }): void;
910
854
 
911
855
  declare function shouldResubmitMessages({ originalMaxToolInvocationStep, originalMessageCount, maxSteps, messages, }: {
912
856
  originalMaxToolInvocationStep: number | undefined;
@@ -923,31 +867,6 @@ declare function isAssistantMessageWithCompletedToolCalls(message: UIMessage): m
923
867
  role: 'assistant';
924
868
  };
925
869
 
926
- /**
927
- * Updates the result of a specific tool invocation in the last message of the given messages array.
928
- *
929
- * @param {object} params - The parameters object.
930
- * @param {UIMessage[]} params.messages - An array of messages, from which the last one is updated.
931
- * @param {string} params.toolCallId - The unique identifier for the tool invocation to update.
932
- * @param {unknown} params.toolResult - The result object to attach to the tool invocation.
933
- * @returns {void} This function does not return anything.
934
- */
935
- declare function updateToolCallResult({ messages, toolCallId, toolResult: result, }: {
936
- messages: UIMessage[];
937
- toolCallId: string;
938
- toolResult: unknown;
939
- }): void;
940
-
941
- declare function zodSchema<OBJECT>(zodSchema: z.Schema<OBJECT, z.ZodTypeDef, any>, options?: {
942
- /**
943
- * Enables support for references in the schema.
944
- * This is required for recursive schemas, e.g. with `z.lazy`.
945
- * However, not all language models and providers support such references.
946
- * Defaults to `false`.
947
- */
948
- useReferences?: boolean;
949
- }): Schema<OBJECT>;
950
-
951
870
  interface DataStreamWriter {
952
871
  /**
953
872
  * Appends a data part to the stream.
@@ -1532,7 +1451,7 @@ type Prompt = {
1532
1451
  /**
1533
1452
  A list of messages. You can either use `prompt` or `messages` but not both.
1534
1453
  */
1535
- messages?: Array<CoreMessage> | Array<Omit<Message, 'id'>>;
1454
+ messages?: Array<CoreMessage> | Array<Omit<UIMessage, 'id'>>;
1536
1455
  };
1537
1456
 
1538
1457
  /**
@@ -1628,20 +1547,20 @@ declare const JSONRPCErrorSchema: z.ZodObject<{
1628
1547
  data?: unknown;
1629
1548
  }>;
1630
1549
  }, "strict", z.ZodTypeAny, {
1550
+ id: string | number;
1631
1551
  error: {
1632
1552
  code: number;
1633
1553
  message: string;
1634
1554
  data?: unknown;
1635
1555
  };
1636
- id: string | number;
1637
1556
  jsonrpc: "2.0";
1638
1557
  }, {
1558
+ id: string | number;
1639
1559
  error: {
1640
1560
  code: number;
1641
1561
  message: string;
1642
1562
  data?: unknown;
1643
1563
  };
1644
- id: string | number;
1645
1564
  jsonrpc: "2.0";
1646
1565
  }>;
1647
1566
  type JSONRPCError = z.infer<typeof JSONRPCErrorSchema>;
@@ -1762,20 +1681,20 @@ declare const JSONRPCMessageSchema: z.ZodUnion<[z.ZodObject<z.objectUtil.extendS
1762
1681
  data?: unknown;
1763
1682
  }>;
1764
1683
  }, "strict", z.ZodTypeAny, {
1684
+ id: string | number;
1765
1685
  error: {
1766
1686
  code: number;
1767
1687
  message: string;
1768
1688
  data?: unknown;
1769
1689
  };
1770
- id: string | number;
1771
1690
  jsonrpc: "2.0";
1772
1691
  }, {
1692
+ id: string | number;
1773
1693
  error: {
1774
1694
  code: number;
1775
1695
  message: string;
1776
1696
  data?: unknown;
1777
1697
  };
1778
- id: string | number;
1779
1698
  jsonrpc: "2.0";
1780
1699
  }>]>;
1781
1700
  type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
@@ -2643,9 +2562,13 @@ declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
2643
2562
  * Tool parameters are automatically inferred from the server's JSON schema
2644
2563
  * if not explicitly provided in the tools configuration
2645
2564
  *
2565
+ * This client is meant to be used to communicate with a single server. To communicate and fetch tools across multiple servers, it's recommended to create a new client instance per server.
2566
+ *
2646
2567
  * Not supported:
2647
2568
  * - Client options (e.g. sampling, roots) as they are not needed for tool conversion
2648
2569
  * - Accepting notifications
2570
+ * - Session management (when passing a sessionId to an instance of the Streamable HTTP transport)
2571
+ * - Resumable SSE streams
2649
2572
  */
2650
2573
  declare class MCPClient {
2651
2574
  private transport;
@@ -2658,6 +2581,7 @@ declare class MCPClient {
2658
2581
  constructor({ transport: transportConfig, name, onUncaughtError, }: MCPClientConfig);
2659
2582
  init(): Promise<this>;
2660
2583
  close(): Promise<void>;
2584
+ private assertCapability;
2661
2585
  private request;
2662
2586
  private listTools;
2663
2587
  private callTool;
@@ -2792,7 +2716,7 @@ type StepResult<TOOLS extends ToolSet> = {
2792
2716
  /**
2793
2717
  The token usage of the generated text.
2794
2718
  */
2795
- readonly usage: LanguageModelUsage$1;
2719
+ readonly usage: LanguageModelUsage;
2796
2720
  /**
2797
2721
  Warnings from the model provider (e.g. unsupported settings).
2798
2722
  */
@@ -2880,7 +2804,7 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
2880
2804
  /**
2881
2805
  The token usage of the generated text.
2882
2806
  */
2883
- readonly usage: LanguageModelUsage$1;
2807
+ readonly usage: LanguageModelUsage;
2884
2808
  /**
2885
2809
  Warnings from the model provider (e.g. unsupported settings)
2886
2810
  */
@@ -2933,7 +2857,7 @@ interface Output$1<OUTPUT, PARTIAL> {
2933
2857
  text: string;
2934
2858
  }, context: {
2935
2859
  response: LanguageModelResponseMetadata;
2936
- usage: LanguageModelUsage$1;
2860
+ usage: LanguageModelUsage;
2937
2861
  finishReason: FinishReason;
2938
2862
  }): Promise<OUTPUT>;
2939
2863
  }
@@ -2985,9 +2909,9 @@ declare class NoSuchToolError extends AISDKError {
2985
2909
  * Otherwise, the new message will be appended.
2986
2910
  */
2987
2911
  declare function appendClientMessage({ messages, message, }: {
2988
- messages: Message[];
2989
- message: Message;
2990
- }): Message[];
2912
+ messages: UIMessage[];
2913
+ message: UIMessage;
2914
+ }): UIMessage[];
2991
2915
 
2992
2916
  /**
2993
2917
  * Appends the ResponseMessage[] from the response to a Message[] (for useChat).
@@ -2997,7 +2921,7 @@ declare function appendClientMessage({ messages, message, }: {
2997
2921
  * @returns A new Message[] with the response messages appended.
2998
2922
  */
2999
2923
  declare function appendResponseMessages({ messages, responseMessages, _internal: { currentDate }, }: {
3000
- messages: Message[];
2924
+ messages: UIMessage[];
3001
2925
  responseMessages: ResponseMessage[];
3002
2926
  /**
3003
2927
  Internal. For test use only. May change without notice.
@@ -3005,13 +2929,13 @@ declare function appendResponseMessages({ messages, responseMessages, _internal:
3005
2929
  _internal?: {
3006
2930
  currentDate?: () => Date;
3007
2931
  };
3008
- }): Message[];
2932
+ }): UIMessage[];
3009
2933
 
3010
2934
  /**
3011
2935
  Converts an array of messages from useChat into an array of CoreMessages that can be used
3012
2936
  with the AI core functions (e.g. `streamText`).
3013
2937
  */
3014
- declare function convertToCoreMessages<TOOLS extends ToolSet = never>(messages: Array<Omit<Message, 'id'>>, options?: {
2938
+ declare function convertToCoreMessages<TOOLS extends ToolSet = never>(messages: Array<Omit<UIMessage, 'id'>>, options?: {
3015
2939
  tools?: TOOLS;
3016
2940
  }): CoreMessage[];
3017
2941
 
@@ -3035,7 +2959,7 @@ type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
3035
2959
  tools: TOOLS;
3036
2960
  parameterSchema: (options: {
3037
2961
  toolName: string;
3038
- }) => JSONSchema7$1;
2962
+ }) => JSONSchema7;
3039
2963
  error: NoSuchToolError | InvalidToolArgumentsError;
3040
2964
  }) => Promise<LanguageModelV2ToolCall | null>;
3041
2965
 
@@ -3253,7 +3177,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3253
3177
 
3254
3178
  Resolved when the response is finished.
3255
3179
  */
3256
- readonly usage: Promise<LanguageModelUsage$1>;
3180
+ readonly usage: Promise<LanguageModelUsage>;
3257
3181
  /**
3258
3182
  Sources that have been used as input to generate the response.
3259
3183
  For multi-step generation, the sources are accumulated from all steps.
@@ -3470,14 +3394,14 @@ type TextStreamPart<TOOLS extends ToolSet> = {
3470
3394
  request: LanguageModelRequestMetadata;
3471
3395
  warnings: CallWarning[] | undefined;
3472
3396
  response: LanguageModelResponseMetadata;
3473
- usage: LanguageModelUsage$1;
3397
+ usage: LanguageModelUsage;
3474
3398
  finishReason: FinishReason;
3475
3399
  providerMetadata: ProviderMetadata | undefined;
3476
3400
  isContinued: boolean;
3477
3401
  } | {
3478
3402
  type: 'finish';
3479
3403
  finishReason: FinishReason;
3480
- usage: LanguageModelUsage$1;
3404
+ usage: LanguageModelUsage;
3481
3405
  providerMetadata: ProviderMetadata | undefined;
3482
3406
  /**
3483
3407
  * @deprecated use response on step-finish instead
@@ -3790,7 +3714,7 @@ declare function generateImage({ model, prompt, n, size, aspectRatio, seed, prov
3790
3714
  }
3791
3715
  ```
3792
3716
  */
3793
- providerOptions?: Record<string, Record<string, JSONValue$1>>;
3717
+ providerOptions?: ProviderOptions;
3794
3718
  /**
3795
3719
  Maximum number of retries per embedding model call. Set to 0 to disable retries.
3796
3720
 
@@ -3823,7 +3747,7 @@ interface GenerateObjectResult<OBJECT> {
3823
3747
  /**
3824
3748
  The token usage of the generated text.
3825
3749
  */
3826
- readonly usage: LanguageModelUsage$1;
3750
+ readonly usage: LanguageModelUsage;
3827
3751
  /**
3828
3752
  Warnings from the model provider (e.g. unsupported settings).
3829
3753
  */
@@ -3951,7 +3875,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
3951
3875
  /**
3952
3876
  The token usage of the generated response. Resolved when the response is finished.
3953
3877
  */
3954
- readonly usage: Promise<LanguageModelUsage$1>;
3878
+ readonly usage: Promise<LanguageModelUsage>;
3955
3879
  /**
3956
3880
  Additional provider-specific metadata. They are passed through
3957
3881
  from the provider to the AI SDK and enable provider-specific
@@ -4022,7 +3946,7 @@ type ObjectStreamPart<PARTIAL> = {
4022
3946
  } | {
4023
3947
  type: 'finish';
4024
3948
  finishReason: FinishReason;
4025
- usage: LanguageModelUsage$1;
3949
+ usage: LanguageModelUsage;
4026
3950
  response: LanguageModelResponseMetadata;
4027
3951
  providerMetadata?: ProviderMetadata;
4028
3952
  };
@@ -4044,7 +3968,7 @@ type StreamObjectOnFinishCallback<RESULT> = (event: {
4044
3968
  /**
4045
3969
  The token usage of the generated response.
4046
3970
  */
4047
- usage: LanguageModelUsage$1;
3971
+ usage: LanguageModelUsage;
4048
3972
  /**
4049
3973
  The generated object. Can be undefined if the final object does not match the schema.
4050
3974
  */
@@ -4074,7 +3998,14 @@ This function streams the output. If you do not want to stream the output, use `
4074
3998
  @return
4075
3999
  A result object for accessing the partial object stream and additional information.
4076
4000
  */
4077
- declare function streamObject<RESULT extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'no-schema' = 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'no-schema' ? {} : {
4001
+ declare function streamObject<RESULT extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'enum' | 'no-schema' = RESULT extends string ? 'enum' : 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'enum' ? {
4002
+ /**
4003
+ The enum values that the model should use.
4004
+ */
4005
+ enum: Array<RESULT>;
4006
+ mode?: 'json';
4007
+ output: 'enum';
4008
+ } : Output extends 'no-schema' ? {} : {
4078
4009
  /**
4079
4010
  The schema of the object that the model should generate.
4080
4011
  */
@@ -4139,7 +4070,7 @@ Callback that is called when the LLM response and the final object validation ar
4139
4070
  currentDate?: () => Date;
4140
4071
  now?: () => number;
4141
4072
  };
4142
- }): StreamObjectResult<Output extends 'array' ? RESULT : DeepPartial<RESULT>, Output extends 'array' ? RESULT : RESULT, Output extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
4073
+ }): StreamObjectResult<Output extends 'enum' ? string : Output extends 'array' ? RESULT : DeepPartial<RESULT>, Output extends 'array' ? RESULT : RESULT, Output extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
4143
4074
 
4144
4075
  /**
4145
4076
  * A generated audio file.
@@ -4473,6 +4404,8 @@ declare function createProviderRegistry<PROVIDERS extends Record<string, Provide
4473
4404
  */
4474
4405
  declare const experimental_createProviderRegistry: typeof createProviderRegistry;
4475
4406
 
4407
+ declare function getToolInvocations(message: UIMessage): ToolInvocation[];
4408
+
4476
4409
  /**
4477
4410
  * Calculates the cosine similarity between two vectors. This is a useful metric for
4478
4411
  * comparing the similarity of two vectors such as embeddings.
@@ -4556,7 +4489,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
4556
4489
  } | {
4557
4490
  type: 'finish';
4558
4491
  finishReason: FinishReason;
4559
- usage: LanguageModelUsage$1;
4492
+ usage: LanguageModelUsage;
4560
4493
  providerMetadata?: ProviderMetadata;
4561
4494
  } | {
4562
4495
  type: 'error';
@@ -4620,7 +4553,7 @@ declare class NoObjectGeneratedError extends AISDKError {
4620
4553
  /**
4621
4554
  The usage of the model.
4622
4555
  */
4623
- readonly usage: LanguageModelUsage$1 | undefined;
4556
+ readonly usage: LanguageModelUsage | undefined;
4624
4557
  /**
4625
4558
  Reason why the model finished generating a response.
4626
4559
  */
@@ -4630,7 +4563,7 @@ declare class NoObjectGeneratedError extends AISDKError {
4630
4563
  cause?: Error;
4631
4564
  text?: string;
4632
4565
  response: LanguageModelResponseMetadata;
4633
- usage: LanguageModelUsage$1;
4566
+ usage: LanguageModelUsage;
4634
4567
  finishReason: FinishReason;
4635
4568
  });
4636
4569
  static isInstance(error: unknown): error is NoObjectGeneratedError;
@@ -4716,9 +4649,9 @@ declare class InvalidMessageRoleError extends AISDKError {
4716
4649
  declare const symbol$2: unique symbol;
4717
4650
  declare class MessageConversionError extends AISDKError {
4718
4651
  private readonly [symbol$2];
4719
- readonly originalMessage: Omit<Message, 'id'>;
4652
+ readonly originalMessage: Omit<UIMessage, 'id'>;
4720
4653
  constructor({ originalMessage, message, }: {
4721
- originalMessage: Omit<Message, 'id'>;
4654
+ originalMessage: Omit<UIMessage, 'id'>;
4722
4655
  message: string;
4723
4656
  });
4724
4657
  static isInstance(error: unknown): error is MessageConversionError;
@@ -4755,4 +4688,4 @@ declare class RetryError extends AISDKError {
4755
4688
  static isInstance(error: unknown): error is RetryError;
4756
4689
  }
4757
4690
 
4758
- export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
4691
+ export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UIMessagePart, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, formatDataStreamPart, generateObject, generateText, getTextFromDataUrl, getToolInvocations, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel };