ai 5.0.0-canary.16 → 5.0.0-canary.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +59 -0
- package/README.md +4 -4
- package/dist/index.d.mts +141 -159
- package/dist/index.d.ts +141 -159
- package/dist/index.js +341 -377
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +336 -378
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +30 -89
- package/dist/internal/index.d.ts +30 -89
- package/dist/internal/index.js +204 -344
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +197 -336
- package/dist/internal/index.mjs.map +1 -1
- package/dist/mcp-stdio/index.d.mts +2 -2
- package/dist/mcp-stdio/index.d.ts +2 -2
- package/package.json +4 -4
package/dist/index.d.ts
CHANGED
@@ -1,7 +1,7 @@
|
|
1
|
-
import { ToolCall, ToolResult, FetchFunction, Schema,
|
2
|
-
export {
|
1
|
+
import { ToolCall, ToolResult, FetchFunction, ToolResultContent, Schema, IdGenerator as IdGenerator$1 } from '@ai-sdk/provider-utils';
|
2
|
+
export { IdGenerator, Schema, ToolCall, ToolResult, asSchema, createIdGenerator, generateId, jsonSchema } from '@ai-sdk/provider-utils';
|
3
3
|
import * as _ai_sdk_provider from '@ai-sdk/provider';
|
4
|
-
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, SpeechModelV1, SpeechModelV1CallWarning, TranscriptionModelV1, TranscriptionModelV1CallWarning, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
|
+
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, SpeechModelV1, SpeechModelV1CallWarning, TranscriptionModelV1, TranscriptionModelV1CallWarning, LanguageModelV2Usage, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
5
5
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
6
6
|
import { ServerResponse } from 'node:http';
|
7
7
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -26,6 +26,10 @@ Warning from the model provider for this call. The call will proceed, but e.g.
|
|
26
26
|
some settings might not be supported, which can lead to suboptimal results.
|
27
27
|
*/
|
28
28
|
type ImageGenerationWarning = ImageModelV2CallWarning;
|
29
|
+
/**
|
30
|
+
Metadata from the model provider for this call
|
31
|
+
*/
|
32
|
+
type ImageModelProviderMetadata = ImageModelV2ProviderMetadata;
|
29
33
|
|
30
34
|
type ImageModelResponseMetadata = {
|
31
35
|
/**
|
@@ -217,19 +221,15 @@ type TranscriptionModelResponseMetadata = {
|
|
217
221
|
/**
|
218
222
|
Represents the number of tokens used in a prompt and completion.
|
219
223
|
*/
|
220
|
-
type LanguageModelUsage
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
/**
|
226
|
-
The number of tokens used in the completion.
|
227
|
-
*/
|
228
|
-
completionTokens: number;
|
224
|
+
type LanguageModelUsage = LanguageModelV2Usage;
|
225
|
+
/**
|
226
|
+
Represents the number of tokens used in an embedding.
|
227
|
+
*/
|
228
|
+
type EmbeddingModelUsage = {
|
229
229
|
/**
|
230
|
-
The
|
230
|
+
The number of tokens used in the embedding.
|
231
231
|
*/
|
232
|
-
|
232
|
+
tokens: number;
|
233
233
|
};
|
234
234
|
|
235
235
|
type IdGenerator = () => string;
|
@@ -252,24 +252,6 @@ type ToolInvocation = ({
|
|
252
252
|
state: 'result';
|
253
253
|
step?: number;
|
254
254
|
} & ToolResult<string, any, any>);
|
255
|
-
/**
|
256
|
-
* An attachment that can be sent along with a message.
|
257
|
-
*/
|
258
|
-
interface Attachment {
|
259
|
-
/**
|
260
|
-
* The name of the attachment, usually the file name.
|
261
|
-
*/
|
262
|
-
name?: string;
|
263
|
-
/**
|
264
|
-
* A string indicating the [media type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type).
|
265
|
-
* By default, it's extracted from the pathname's extension.
|
266
|
-
*/
|
267
|
-
contentType?: string;
|
268
|
-
/**
|
269
|
-
* The URL of the attachment. It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
|
270
|
-
*/
|
271
|
-
url: string;
|
272
|
-
}
|
273
255
|
/**
|
274
256
|
* AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
|
275
257
|
*/
|
@@ -287,10 +269,6 @@ interface UIMessage {
|
|
287
269
|
*/
|
288
270
|
content: string;
|
289
271
|
/**
|
290
|
-
Additional attachments to be sent along with the message.
|
291
|
-
*/
|
292
|
-
experimental_attachments?: Attachment[];
|
293
|
-
/**
|
294
272
|
The role of the message.
|
295
273
|
*/
|
296
274
|
role: 'system' | 'user' | 'assistant';
|
@@ -301,8 +279,12 @@ interface UIMessage {
|
|
301
279
|
/**
|
302
280
|
The parts of the message. Use this for rendering the message in the UI.
|
303
281
|
|
304
|
-
|
305
|
-
|
282
|
+
System messages should be avoided (set the system prompt on the server instead).
|
283
|
+
They can have text parts.
|
284
|
+
|
285
|
+
User messages can have text parts and file parts.
|
286
|
+
|
287
|
+
Assistant messages can have text, reasoning, tool invocation, and file parts.
|
306
288
|
*/
|
307
289
|
parts: Array<UIMessagePart>;
|
308
290
|
}
|
@@ -363,9 +345,14 @@ type FileUIPart = {
|
|
363
345
|
*/
|
364
346
|
mediaType: string;
|
365
347
|
/**
|
366
|
-
*
|
348
|
+
* Optional filename of the file.
|
349
|
+
*/
|
350
|
+
filename?: string;
|
351
|
+
/**
|
352
|
+
* The URL of the file.
|
353
|
+
* It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
|
367
354
|
*/
|
368
|
-
|
355
|
+
url: string;
|
369
356
|
};
|
370
357
|
/**
|
371
358
|
* A step boundary part of a message.
|
@@ -417,10 +404,6 @@ type ChatRequestOptions = {
|
|
417
404
|
Additional data to be sent to the API endpoint.
|
418
405
|
*/
|
419
406
|
data?: JSONValue$1;
|
420
|
-
/**
|
421
|
-
* Additional files to be sent to the server.
|
422
|
-
*/
|
423
|
-
experimental_attachments?: FileList | Array<Attachment>;
|
424
407
|
/**
|
425
408
|
* Allow submitting an empty message. Defaults to `false`.
|
426
409
|
*/
|
@@ -428,12 +411,6 @@ type ChatRequestOptions = {
|
|
428
411
|
};
|
429
412
|
type UseChatOptions = {
|
430
413
|
/**
|
431
|
-
Keeps the last message when an error happens. Defaults to `true`.
|
432
|
-
|
433
|
-
@deprecated This option will be removed in the next major release.
|
434
|
-
*/
|
435
|
-
keepLastMessageOnError?: boolean;
|
436
|
-
/**
|
437
414
|
* The API endpoint that accepts a `{ messages: Message[] }` object and returns
|
438
415
|
* a stream of tokens of the AI chat response. Defaults to `/api/chat`.
|
439
416
|
*/
|
@@ -474,7 +451,7 @@ type UseChatOptions = {
|
|
474
451
|
* @param options.finishReason The finish reason of the message.
|
475
452
|
*/
|
476
453
|
onFinish?: (message: UIMessage, options: {
|
477
|
-
usage: LanguageModelUsage
|
454
|
+
usage: LanguageModelUsage;
|
478
455
|
finishReason: LanguageModelV2FinishReason;
|
479
456
|
}) => void;
|
480
457
|
/**
|
@@ -510,12 +487,6 @@ type UseChatOptions = {
|
|
510
487
|
*/
|
511
488
|
body?: object;
|
512
489
|
/**
|
513
|
-
* Whether to send extra message fields such as `message.id` and `message.createdAt` to the API.
|
514
|
-
* Defaults to `false`. When set to `true`, the API endpoint might need to
|
515
|
-
* handle the extra fields before forwarding the request to the AI service.
|
516
|
-
*/
|
517
|
-
sendExtraMessageFields?: boolean;
|
518
|
-
/**
|
519
490
|
Streaming protocol that is used. Defaults to `data`.
|
520
491
|
*/
|
521
492
|
streamProtocol?: 'data' | 'text';
|
@@ -524,6 +495,15 @@ type UseChatOptions = {
|
|
524
495
|
or to provide a custom fetch implementation for e.g. testing.
|
525
496
|
*/
|
526
497
|
fetch?: FetchFunction;
|
498
|
+
/**
|
499
|
+
Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
500
|
+
Must be at least 1.
|
501
|
+
|
502
|
+
A maximum number is required to prevent infinite loops in the case of misconfigured tools.
|
503
|
+
|
504
|
+
By default, it's set to 1, which means that only a single LLM call is made.
|
505
|
+
*/
|
506
|
+
maxSteps?: number;
|
527
507
|
};
|
528
508
|
type UseCompletionOptions = {
|
529
509
|
/**
|
@@ -591,42 +571,14 @@ type UseCompletionOptions = {
|
|
591
571
|
fetch?: FetchFunction;
|
592
572
|
};
|
593
573
|
|
594
|
-
/**
|
595
|
-
Represents the number of tokens used in a prompt and completion.
|
596
|
-
*/
|
597
|
-
type LanguageModelUsage = {
|
598
|
-
/**
|
599
|
-
The number of tokens used in the prompt.
|
600
|
-
*/
|
601
|
-
promptTokens: number;
|
602
|
-
/**
|
603
|
-
The number of tokens used in the completion.
|
604
|
-
*/
|
605
|
-
completionTokens: number;
|
606
|
-
/**
|
607
|
-
The total number of tokens used (promptTokens + completionTokens).
|
608
|
-
*/
|
609
|
-
totalTokens: number;
|
610
|
-
};
|
611
|
-
/**
|
612
|
-
Represents the number of tokens used in an embedding.
|
613
|
-
*/
|
614
|
-
type EmbeddingModelUsage = {
|
615
|
-
/**
|
616
|
-
The number of tokens used in the embedding.
|
617
|
-
*/
|
618
|
-
tokens: number;
|
619
|
-
};
|
620
|
-
|
621
574
|
declare const getOriginalFetch$1: () => typeof fetch;
|
622
|
-
declare function callChatApi({ api, body, streamProtocol, credentials, headers, abortController,
|
575
|
+
declare function callChatApi({ api, body, streamProtocol, credentials, headers, abortController, onResponse, onUpdate, onFinish, onToolCall, generateId, fetch, lastMessage, getCurrentDate, requestType, }: {
|
623
576
|
api: string;
|
624
577
|
body: Record<string, any>;
|
625
578
|
streamProtocol: 'data' | 'text' | undefined;
|
626
579
|
credentials: RequestCredentials | undefined;
|
627
580
|
headers: HeadersInit | undefined;
|
628
581
|
abortController: (() => AbortController | null) | undefined;
|
629
|
-
restoreMessagesOnFailure: () => void;
|
630
582
|
onResponse: ((response: Response) => void | Promise<void>) | undefined;
|
631
583
|
onUpdate: (options: {
|
632
584
|
message: UIMessage;
|
@@ -638,6 +590,8 @@ declare function callChatApi({ api, body, streamProtocol, credentials, headers,
|
|
638
590
|
generateId: IdGenerator;
|
639
591
|
fetch: ReturnType<typeof getOriginalFetch$1> | undefined;
|
640
592
|
lastMessage: UIMessage | undefined;
|
593
|
+
getCurrentDate: () => Date;
|
594
|
+
requestType?: 'generate' | 'resume';
|
641
595
|
}): Promise<void>;
|
642
596
|
|
643
597
|
declare const getOriginalFetch: () => typeof fetch;
|
@@ -659,6 +613,8 @@ declare function callCompletionApi({ api, prompt, credentials, headers, body, st
|
|
659
613
|
fetch: ReturnType<typeof getOriginalFetch> | undefined;
|
660
614
|
}): Promise<string | null | undefined>;
|
661
615
|
|
616
|
+
declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
|
617
|
+
|
662
618
|
type DataStreamString = `${(typeof DataStreamStringPrefixes)[keyof typeof DataStreamStringPrefixes]}:${string}\n`;
|
663
619
|
interface DataStreamPart<CODE extends string, NAME extends string, TYPE> {
|
664
620
|
code: CODE;
|
@@ -676,25 +632,19 @@ declare const dataStreamParts: readonly [DataStreamPart<"0", "text", string>, Da
|
|
676
632
|
argsTextDelta: string;
|
677
633
|
}>, DataStreamPart<"d", "finish_message", {
|
678
634
|
finishReason: LanguageModelV2FinishReason;
|
679
|
-
usage?:
|
680
|
-
promptTokens: number;
|
681
|
-
completionTokens: number;
|
682
|
-
};
|
635
|
+
usage?: LanguageModelV2Usage;
|
683
636
|
}>, DataStreamPart<"e", "finish_step", {
|
684
637
|
isContinued: boolean;
|
685
638
|
finishReason: LanguageModelV2FinishReason;
|
686
|
-
usage?:
|
687
|
-
promptTokens: number;
|
688
|
-
completionTokens: number;
|
689
|
-
};
|
639
|
+
usage?: LanguageModelV2Usage;
|
690
640
|
}>, DataStreamPart<"f", "start_step", {
|
691
641
|
messageId: string;
|
692
642
|
}>, DataStreamPart<"g", "reasoning", {
|
693
643
|
text: string;
|
694
644
|
providerMetadata?: Record<string, any> | undefined;
|
695
645
|
}>, DataStreamPart<"h", "source", LanguageModelV2Source>, DataStreamPart<"l", "reasoning_part_finish", {}>, DataStreamPart<"k", "file", {
|
696
|
-
|
697
|
-
|
646
|
+
url: string;
|
647
|
+
mediaType: string;
|
698
648
|
}>];
|
699
649
|
type DataStreamParts = (typeof dataStreamParts)[number];
|
700
650
|
/**
|
@@ -736,7 +686,7 @@ Parses a stream part from a string.
|
|
736
686
|
*/
|
737
687
|
declare const parseDataStreamPart: (line: string) => DataStreamPartType;
|
738
688
|
/**
|
739
|
-
Prepends a string with a prefix from the `StreamChunkPrefixes`,
|
689
|
+
Prepends a string with a prefix from the `StreamChunkPrefixes`, converts it to JSON,
|
740
690
|
and appends a new line.
|
741
691
|
|
742
692
|
It ensures type-safety for the part type and value.
|
@@ -781,8 +731,6 @@ declare function parsePartialJson(jsonText: string | undefined): Promise<{
|
|
781
731
|
state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
|
782
732
|
}>;
|
783
733
|
|
784
|
-
declare function prepareAttachmentsForRequest(attachmentsFromOptions: FileList | Array<Attachment> | undefined): Promise<Attachment[]>;
|
785
|
-
|
786
734
|
declare function processDataStream({ stream, onTextPart, onReasoningPart, onReasoningPartFinish, onSourcePart, onFilePart, onDataPart, onErrorPart, onToolCallStreamingStartPart, onToolCallDeltaPart, onToolCallPart, onToolResultPart, onMessageAnnotationsPart, onFinishMessagePart, onFinishStepPart, onStartStepPart, }: {
|
787
735
|
stream: ReadableStream<Uint8Array>;
|
788
736
|
onTextPart?: (streamPart: (DataStreamPartType & {
|
@@ -837,21 +785,6 @@ declare function processTextStream({ stream, onTextPart, }: {
|
|
837
785
|
onTextPart: (chunk: string) => Promise<void> | void;
|
838
786
|
}): Promise<void>;
|
839
787
|
|
840
|
-
/**
|
841
|
-
* Updates the result of a specific tool invocation in the last message of the given messages array.
|
842
|
-
*
|
843
|
-
* @param {object} params - The parameters object.
|
844
|
-
* @param {UIMessage[]} params.messages - An array of messages, from which the last one is updated.
|
845
|
-
* @param {string} params.toolCallId - The unique identifier for the tool invocation to update.
|
846
|
-
* @param {unknown} params.toolResult - The result object to attach to the tool invocation.
|
847
|
-
* @returns {void} This function does not return anything.
|
848
|
-
*/
|
849
|
-
declare function updateToolCallResult({ messages, toolCallId, toolResult: result, }: {
|
850
|
-
messages: UIMessage[];
|
851
|
-
toolCallId: string;
|
852
|
-
toolResult: unknown;
|
853
|
-
}): void;
|
854
|
-
|
855
788
|
declare function shouldResubmitMessages({ originalMaxToolInvocationStep, originalMessageCount, maxSteps, messages, }: {
|
856
789
|
originalMaxToolInvocationStep: number | undefined;
|
857
790
|
originalMessageCount: number;
|
@@ -867,6 +800,21 @@ declare function isAssistantMessageWithCompletedToolCalls(message: UIMessage): m
|
|
867
800
|
role: 'assistant';
|
868
801
|
};
|
869
802
|
|
803
|
+
/**
|
804
|
+
* Updates the result of a specific tool invocation in the last message of the given messages array.
|
805
|
+
*
|
806
|
+
* @param {object} params - The parameters object.
|
807
|
+
* @param {UIMessage[]} params.messages - An array of messages, from which the last one is updated.
|
808
|
+
* @param {string} params.toolCallId - The unique identifier for the tool invocation to update.
|
809
|
+
* @param {unknown} params.toolResult - The result object to attach to the tool invocation.
|
810
|
+
* @returns {void} This function does not return anything.
|
811
|
+
*/
|
812
|
+
declare function updateToolCallResult({ messages, toolCallId, toolResult: result, }: {
|
813
|
+
messages: UIMessage[];
|
814
|
+
toolCallId: string;
|
815
|
+
toolResult: unknown;
|
816
|
+
}): void;
|
817
|
+
|
870
818
|
interface DataStreamWriter {
|
871
819
|
/**
|
872
820
|
* Appends a data part to the stream.
|
@@ -1075,7 +1023,7 @@ has a limit on how many embeddings can be generated in a single call.
|
|
1075
1023
|
|
1076
1024
|
@returns A result object that contains the embeddings, the value, and additional information.
|
1077
1025
|
*/
|
1078
|
-
declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
|
1026
|
+
declare function embedMany<VALUE>({ model, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
|
1079
1027
|
/**
|
1080
1028
|
The embedding model to use.
|
1081
1029
|
*/
|
@@ -1109,6 +1057,12 @@ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, ab
|
|
1109
1057
|
functionality that can be fully encapsulated in the provider.
|
1110
1058
|
*/
|
1111
1059
|
providerOptions?: ProviderOptions;
|
1060
|
+
/**
|
1061
|
+
* Maximum number of concurrent requests.
|
1062
|
+
*
|
1063
|
+
* @default Infinity
|
1064
|
+
*/
|
1065
|
+
maxParallelCalls?: number;
|
1112
1066
|
}): Promise<EmbedManyResult<VALUE>>;
|
1113
1067
|
|
1114
1068
|
type CallSettings = {
|
@@ -1191,19 +1145,6 @@ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffe
|
|
1191
1145
|
*/
|
1192
1146
|
type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
|
1193
1147
|
|
1194
|
-
type ToolResultContent = Array<{
|
1195
|
-
type: 'text';
|
1196
|
-
text: string;
|
1197
|
-
} | {
|
1198
|
-
type: 'image';
|
1199
|
-
data: string;
|
1200
|
-
mediaType?: string;
|
1201
|
-
/**
|
1202
|
-
* @deprecated Use `mediaType` instead.
|
1203
|
-
*/
|
1204
|
-
mimeType?: string;
|
1205
|
-
}>;
|
1206
|
-
|
1207
1148
|
/**
|
1208
1149
|
Text content part of a prompt. It contains a string of text.
|
1209
1150
|
*/
|
@@ -1239,10 +1180,6 @@ interface ImagePart {
|
|
1239
1180
|
*/
|
1240
1181
|
mediaType?: string;
|
1241
1182
|
/**
|
1242
|
-
@deprecated Use `mediaType` instead.
|
1243
|
-
*/
|
1244
|
-
mimeType?: string;
|
1245
|
-
/**
|
1246
1183
|
Additional provider-specific metadata. They are passed through
|
1247
1184
|
to the provider from the AI SDK and enable provider-specific
|
1248
1185
|
functionality that can be fully encapsulated in the provider.
|
@@ -1272,10 +1209,6 @@ interface FilePart {
|
|
1272
1209
|
*/
|
1273
1210
|
mediaType: string;
|
1274
1211
|
/**
|
1275
|
-
@deprecated Use `mediaType` instead.
|
1276
|
-
*/
|
1277
|
-
mimeType?: string;
|
1278
|
-
/**
|
1279
1212
|
Additional provider-specific metadata. They are passed through
|
1280
1213
|
to the provider from the AI SDK and enable provider-specific
|
1281
1214
|
functionality that can be fully encapsulated in the provider.
|
@@ -1362,7 +1295,7 @@ interface ToolResultPart {
|
|
1362
1295
|
to increase the resilience against prompt injection attacks,
|
1363
1296
|
and because not all providers support several system messages.
|
1364
1297
|
*/
|
1365
|
-
type
|
1298
|
+
type SystemModelMessage = {
|
1366
1299
|
role: 'system';
|
1367
1300
|
content: string;
|
1368
1301
|
/**
|
@@ -1372,11 +1305,19 @@ type CoreSystemMessage = {
|
|
1372
1305
|
*/
|
1373
1306
|
providerOptions?: ProviderOptions;
|
1374
1307
|
};
|
1375
|
-
|
1308
|
+
/**
|
1309
|
+
@deprecated Use `SystemModelMessage` instead.
|
1310
|
+
*/
|
1311
|
+
type CoreSystemMessage = SystemModelMessage;
|
1312
|
+
declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>;
|
1313
|
+
/**
|
1314
|
+
@deprecated Use `systemModelMessageSchema` instead.
|
1315
|
+
*/
|
1316
|
+
declare const coreSystemMessageSchema: z.ZodType<SystemModelMessage, z.ZodTypeDef, SystemModelMessage>;
|
1376
1317
|
/**
|
1377
1318
|
A user message. It can contain text or a combination of text and images.
|
1378
1319
|
*/
|
1379
|
-
type
|
1320
|
+
type UserModelMessage = {
|
1380
1321
|
role: 'user';
|
1381
1322
|
content: UserContent;
|
1382
1323
|
/**
|
@@ -1386,7 +1327,15 @@ type CoreUserMessage = {
|
|
1386
1327
|
*/
|
1387
1328
|
providerOptions?: ProviderOptions;
|
1388
1329
|
};
|
1389
|
-
|
1330
|
+
/**
|
1331
|
+
@deprecated Use `UserModelMessage` instead.
|
1332
|
+
*/
|
1333
|
+
type CoreUserMessage = UserModelMessage;
|
1334
|
+
declare const userModelMessageSchema: z.ZodType<UserModelMessage>;
|
1335
|
+
/**
|
1336
|
+
@deprecated Use `userModelMessageSchema` instead.
|
1337
|
+
*/
|
1338
|
+
declare const coreUserMessageSchema: z.ZodType<UserModelMessage, z.ZodTypeDef, UserModelMessage>;
|
1390
1339
|
/**
|
1391
1340
|
Content of a user message. It can be a string or an array of text and image parts.
|
1392
1341
|
*/
|
@@ -1394,7 +1343,7 @@ type UserContent = string | Array<TextPart | ImagePart | FilePart>;
|
|
1394
1343
|
/**
|
1395
1344
|
An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
|
1396
1345
|
*/
|
1397
|
-
type
|
1346
|
+
type AssistantModelMessage = {
|
1398
1347
|
role: 'assistant';
|
1399
1348
|
content: AssistantContent;
|
1400
1349
|
/**
|
@@ -1404,7 +1353,15 @@ type CoreAssistantMessage = {
|
|
1404
1353
|
*/
|
1405
1354
|
providerOptions?: ProviderOptions;
|
1406
1355
|
};
|
1407
|
-
|
1356
|
+
/**
|
1357
|
+
@deprecated Use `AssistantModelMessage` instead.
|
1358
|
+
*/
|
1359
|
+
type CoreAssistantMessage = AssistantModelMessage;
|
1360
|
+
declare const assistantModelMessageSchema: z.ZodType<AssistantModelMessage>;
|
1361
|
+
/**
|
1362
|
+
@deprecated Use `assistantModelMessageSchema` instead.
|
1363
|
+
*/
|
1364
|
+
declare const coreAssistantMessageSchema: z.ZodType<AssistantModelMessage, z.ZodTypeDef, AssistantModelMessage>;
|
1408
1365
|
/**
|
1409
1366
|
Content of an assistant message.
|
1410
1367
|
It can be a string or an array of text, image, reasoning, redacted reasoning, and tool call parts.
|
@@ -1413,7 +1370,7 @@ type AssistantContent = string | Array<TextPart | FilePart | ReasoningPart | Too
|
|
1413
1370
|
/**
|
1414
1371
|
A tool message. It contains the result of one or more tool calls.
|
1415
1372
|
*/
|
1416
|
-
type
|
1373
|
+
type ToolModelMessage = {
|
1417
1374
|
role: 'tool';
|
1418
1375
|
content: ToolContent;
|
1419
1376
|
/**
|
@@ -1423,7 +1380,15 @@ type CoreToolMessage = {
|
|
1423
1380
|
*/
|
1424
1381
|
providerOptions?: ProviderOptions;
|
1425
1382
|
};
|
1426
|
-
|
1383
|
+
/**
|
1384
|
+
@deprecated Use `ToolModelMessage` instead.
|
1385
|
+
*/
|
1386
|
+
type CoreToolMessage = ToolModelMessage;
|
1387
|
+
declare const toolModelMessageSchema: z.ZodType<ToolModelMessage>;
|
1388
|
+
/**
|
1389
|
+
@deprecated Use `toolModelMessageSchema` instead.
|
1390
|
+
*/
|
1391
|
+
declare const coreToolMessageSchema: z.ZodType<ToolModelMessage, z.ZodTypeDef, ToolModelMessage>;
|
1427
1392
|
/**
|
1428
1393
|
Content of a tool message. It is an array of tool result parts.
|
1429
1394
|
*/
|
@@ -1432,7 +1397,15 @@ type ToolContent = Array<ToolResultPart>;
|
|
1432
1397
|
A message that can be used in the `messages` field of a prompt.
|
1433
1398
|
It can be a user message, an assistant message, or a tool message.
|
1434
1399
|
*/
|
1435
|
-
type
|
1400
|
+
type ModelMessage = SystemModelMessage | UserModelMessage | AssistantModelMessage | ToolModelMessage;
|
1401
|
+
/**
|
1402
|
+
@deprecated Use `ModelMessage` instead.
|
1403
|
+
*/
|
1404
|
+
type CoreMessage = ModelMessage;
|
1405
|
+
declare const modelMessageSchema: z.ZodType<ModelMessage>;
|
1406
|
+
/**
|
1407
|
+
@deprecated Use `modelMessageSchema` instead.
|
1408
|
+
*/
|
1436
1409
|
declare const coreMessageSchema: z.ZodType<CoreMessage>;
|
1437
1410
|
|
1438
1411
|
/**
|
@@ -1451,7 +1424,7 @@ type Prompt = {
|
|
1451
1424
|
/**
|
1452
1425
|
A list of messages. You can either use `prompt` or `messages` but not both.
|
1453
1426
|
*/
|
1454
|
-
messages?: Array<
|
1427
|
+
messages?: Array<ModelMessage> | Array<Omit<UIMessage, 'id'>>;
|
1455
1428
|
};
|
1456
1429
|
|
1457
1430
|
/**
|
@@ -1547,20 +1520,20 @@ declare const JSONRPCErrorSchema: z.ZodObject<{
|
|
1547
1520
|
data?: unknown;
|
1548
1521
|
}>;
|
1549
1522
|
}, "strict", z.ZodTypeAny, {
|
1550
|
-
id: string | number;
|
1551
1523
|
error: {
|
1552
1524
|
code: number;
|
1553
1525
|
message: string;
|
1554
1526
|
data?: unknown;
|
1555
1527
|
};
|
1528
|
+
id: string | number;
|
1556
1529
|
jsonrpc: "2.0";
|
1557
1530
|
}, {
|
1558
|
-
id: string | number;
|
1559
1531
|
error: {
|
1560
1532
|
code: number;
|
1561
1533
|
message: string;
|
1562
1534
|
data?: unknown;
|
1563
1535
|
};
|
1536
|
+
id: string | number;
|
1564
1537
|
jsonrpc: "2.0";
|
1565
1538
|
}>;
|
1566
1539
|
type JSONRPCError = z.infer<typeof JSONRPCErrorSchema>;
|
@@ -1681,20 +1654,20 @@ declare const JSONRPCMessageSchema: z.ZodUnion<[z.ZodObject<z.objectUtil.extendS
|
|
1681
1654
|
data?: unknown;
|
1682
1655
|
}>;
|
1683
1656
|
}, "strict", z.ZodTypeAny, {
|
1684
|
-
id: string | number;
|
1685
1657
|
error: {
|
1686
1658
|
code: number;
|
1687
1659
|
message: string;
|
1688
1660
|
data?: unknown;
|
1689
1661
|
};
|
1662
|
+
id: string | number;
|
1690
1663
|
jsonrpc: "2.0";
|
1691
1664
|
}, {
|
1692
|
-
id: string | number;
|
1693
1665
|
error: {
|
1694
1666
|
code: number;
|
1695
1667
|
message: string;
|
1696
1668
|
data?: unknown;
|
1697
1669
|
};
|
1670
|
+
id: string | number;
|
1698
1671
|
jsonrpc: "2.0";
|
1699
1672
|
}>]>;
|
1700
1673
|
type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
|
@@ -1752,7 +1725,7 @@ interface ToolExecutionOptions {
|
|
1752
1725
|
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1753
1726
|
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1754
1727
|
*/
|
1755
|
-
messages:
|
1728
|
+
messages: ModelMessage[];
|
1756
1729
|
/**
|
1757
1730
|
* An optional abort signal that indicates that the overall operation should be aborted.
|
1758
1731
|
*/
|
@@ -2935,9 +2908,13 @@ declare function appendResponseMessages({ messages, responseMessages, _internal:
|
|
2935
2908
|
Converts an array of messages from useChat into an array of CoreMessages that can be used
|
2936
2909
|
with the AI core functions (e.g. `streamText`).
|
2937
2910
|
*/
|
2938
|
-
declare function
|
2911
|
+
declare function convertToModelMessages<TOOLS extends ToolSet = never>(messages: Array<Omit<UIMessage, 'id'>>, options?: {
|
2939
2912
|
tools?: TOOLS;
|
2940
|
-
}):
|
2913
|
+
}): ModelMessage[];
|
2914
|
+
/**
|
2915
|
+
@deprecated Use `convertToModelMessages` instead.
|
2916
|
+
*/
|
2917
|
+
declare const convertToCoreMessages: typeof convertToModelMessages;
|
2941
2918
|
|
2942
2919
|
/**
|
2943
2920
|
* A function that attempts to repair a tool call that failed to parse.
|
@@ -2954,7 +2931,7 @@ declare function convertToCoreMessages<TOOLS extends ToolSet = never>(messages:
|
|
2954
2931
|
*/
|
2955
2932
|
type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
|
2956
2933
|
system: string | undefined;
|
2957
|
-
messages:
|
2934
|
+
messages: ModelMessage[];
|
2958
2935
|
toolCall: LanguageModelV2ToolCall;
|
2959
2936
|
tools: TOOLS;
|
2960
2937
|
parameterSchema: (options: {
|
@@ -3040,7 +3017,7 @@ By default, it's set to 1, which means that only a single LLM call is made.
|
|
3040
3017
|
/**
|
3041
3018
|
Generate a unique ID for each message.
|
3042
3019
|
*/
|
3043
|
-
experimental_generateMessageId?:
|
3020
|
+
experimental_generateMessageId?: IdGenerator$1;
|
3044
3021
|
/**
|
3045
3022
|
When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
|
3046
3023
|
|
@@ -3100,7 +3077,7 @@ A function that attempts to repair a tool call that failed to parse.
|
|
3100
3077
|
* Internal. For test use only. May change without notice.
|
3101
3078
|
*/
|
3102
3079
|
_internal?: {
|
3103
|
-
generateId?:
|
3080
|
+
generateId?: IdGenerator$1;
|
3104
3081
|
currentDate?: () => Date;
|
3105
3082
|
};
|
3106
3083
|
}): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
@@ -3559,7 +3536,7 @@ By default, it's set to 1, which means that only a single LLM call is made.
|
|
3559
3536
|
/**
|
3560
3537
|
Generate a unique ID for each message.
|
3561
3538
|
*/
|
3562
|
-
experimental_generateMessageId?:
|
3539
|
+
experimental_generateMessageId?: IdGenerator$1;
|
3563
3540
|
/**
|
3564
3541
|
When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
|
3565
3542
|
|
@@ -3630,7 +3607,7 @@ Internal. For test use only. May change without notice.
|
|
3630
3607
|
*/
|
3631
3608
|
_internal?: {
|
3632
3609
|
now?: () => number;
|
3633
|
-
generateId?:
|
3610
|
+
generateId?: IdGenerator$1;
|
3634
3611
|
currentDate?: () => Date;
|
3635
3612
|
};
|
3636
3613
|
}): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
|
@@ -3656,6 +3633,11 @@ interface GenerateImageResult {
|
|
3656
3633
|
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
3657
3634
|
*/
|
3658
3635
|
readonly responses: Array<ImageModelResponseMetadata>;
|
3636
|
+
/**
|
3637
|
+
* Provider-specific metadata. They are passed through from the provider to the AI SDK and enable provider-specific
|
3638
|
+
* results that can be fully encapsulated in the provider.
|
3639
|
+
*/
|
3640
|
+
readonly providerMetadata: ImageModelProviderMetadata;
|
3659
3641
|
}
|
3660
3642
|
|
3661
3643
|
/**
|
@@ -4688,4 +4670,4 @@ declare class RetryError extends AISDKError {
|
|
4688
4670
|
static isInstance(error: unknown): error is RetryError;
|
4689
4671
|
}
|
4690
4672
|
|
4691
|
-
export { AssistantContent,
|
4673
|
+
export { AssistantContent, AssistantModelMessage, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, ModelMessage, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, SystemModelMessage, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolModelMessage, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UIMessagePart, UseChatOptions, UseCompletionOptions, UserContent, UserModelMessage, appendClientMessage, appendResponseMessages, assistantModelMessageSchema, callChatApi, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, formatDataStreamPart, generateObject, generateText, getTextFromDataUrl, getToolInvocations, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, modelMessageSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, systemModelMessageSchema, tool, toolModelMessageSchema, updateToolCallResult, userModelMessageSchema, wrapLanguageModel };
|