ai 4.1.61 → 4.1.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -2,14 +2,12 @@ import { IDGenerator } from '@ai-sdk/provider-utils';
2
2
  export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
3
  import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
4
4
  export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
5
- import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONParseError, TypeValidationError, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
5
+ import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
6
6
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
7
7
  import { ServerResponse } from 'node:http';
8
8
  import { AttributeValue, Tracer } from '@opentelemetry/api';
9
9
  import { z } from 'zod';
10
10
  import { ServerResponse as ServerResponse$1 } from 'http';
11
- import { IOType } from 'node:child_process';
12
- import { Stream } from 'node:stream';
13
11
 
14
12
  /**
15
13
  Language model that is used by the AI SDK Core functions.
@@ -413,119 +411,6 @@ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, ab
413
411
  experimental_telemetry?: TelemetrySettings;
414
412
  }): Promise<EmbedManyResult<VALUE>>;
415
413
 
416
- /**
417
- The result of a `generateImage` call.
418
- It contains the images and additional information.
419
- */
420
- interface GenerateImageResult {
421
- /**
422
- The first image that was generated.
423
- */
424
- readonly image: GeneratedImage;
425
- /**
426
- The images that were generated.
427
- */
428
- readonly images: Array<GeneratedImage>;
429
- /**
430
- Warnings for the call, e.g. unsupported settings.
431
- */
432
- readonly warnings: Array<ImageGenerationWarning>;
433
- /**
434
- Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
435
- */
436
- readonly responses: Array<ImageModelResponseMetadata>;
437
- }
438
- interface GeneratedImage {
439
- /**
440
- Image as a base64 encoded string.
441
- */
442
- readonly base64: string;
443
- /**
444
- Image as a Uint8Array.
445
- */
446
- readonly uint8Array: Uint8Array;
447
- }
448
-
449
- /**
450
- Generates images using an image model.
451
-
452
- @param model - The image model to use.
453
- @param prompt - The prompt that should be used to generate the image.
454
- @param n - Number of images to generate. Default: 1.
455
- @param size - Size of the images to generate. Must have the format `{width}x{height}`.
456
- @param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
457
- @param seed - Seed for the image generation.
458
- @param providerOptions - Additional provider-specific options that are passed through to the provider
459
- as body parameters.
460
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
461
- @param abortSignal - An optional abort signal that can be used to cancel the call.
462
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
463
-
464
- @returns A result object that contains the generated images.
465
- */
466
- declare function generateImage({ model, prompt, n, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, _internal, }: {
467
- /**
468
- The image model to use.
469
- */
470
- model: ImageModelV1;
471
- /**
472
- The prompt that should be used to generate the image.
473
- */
474
- prompt: string;
475
- /**
476
- Number of images to generate.
477
- */
478
- n?: number;
479
- /**
480
- Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
481
- */
482
- size?: `${number}x${number}`;
483
- /**
484
- Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
485
- */
486
- aspectRatio?: `${number}:${number}`;
487
- /**
488
- Seed for the image generation. If not provided, the default seed will be used.
489
- */
490
- seed?: number;
491
- /**
492
- Additional provider-specific options that are passed through to the provider
493
- as body parameters.
494
-
495
- The outer record is keyed by the provider name, and the inner
496
- record is keyed by the provider-specific metadata key.
497
- ```ts
498
- {
499
- "openai": {
500
- "style": "vivid"
501
- }
502
- }
503
- ```
504
- */
505
- providerOptions?: Record<string, Record<string, JSONValue>>;
506
- /**
507
- Maximum number of retries per embedding model call. Set to 0 to disable retries.
508
-
509
- @default 2
510
- */
511
- maxRetries?: number;
512
- /**
513
- Abort signal.
514
- */
515
- abortSignal?: AbortSignal;
516
- /**
517
- Additional headers to include in the request.
518
- Only applicable for HTTP-based providers.
519
- */
520
- headers?: Record<string, string>;
521
- /**
522
- * Internal. For test use only. May change without notice.
523
- */
524
- _internal?: {
525
- currentDate?: () => Date;
526
- };
527
- }): Promise<GenerateImageResult>;
528
-
529
414
  type CallSettings = {
530
415
  /**
531
416
  Maximum number of tokens to generate.
@@ -864,9 +749,10 @@ type CoreAssistantMessage = {
864
749
  };
865
750
  declare const coreAssistantMessageSchema: z.ZodType<CoreAssistantMessage>;
866
751
  /**
867
- Content of an assistant message. It can be a string or an array of text and tool call parts.
752
+ Content of an assistant message.
753
+ It can be a string or an array of text, image, reasoning, redacted reasoning, and tool call parts.
868
754
  */
869
- type AssistantContent = string | Array<TextPart | ReasoningPart | RedactedReasoningPart | ToolCallPart>;
755
+ type AssistantContent = string | Array<TextPart | FilePart | ReasoningPart | RedactedReasoningPart | ToolCallPart>;
870
756
  /**
871
757
  A tool message. It contains the result of one or more tool calls.
872
758
  */
@@ -916,827 +802,320 @@ type Prompt = {
916
802
  };
917
803
 
918
804
  /**
919
- The result of a `generateObject` call.
805
+ * A generated file.
920
806
  */
921
- interface GenerateObjectResult<OBJECT> {
922
- /**
923
- The generated object (typed according to the schema).
924
- */
925
- readonly object: OBJECT;
926
- /**
927
- The reason why the generation finished.
928
- */
929
- readonly finishReason: FinishReason;
807
+ interface GeneratedFile {
930
808
  /**
931
- The token usage of the generated text.
809
+ File as a base64 encoded string.
932
810
  */
933
- readonly usage: LanguageModelUsage;
811
+ readonly base64: string;
934
812
  /**
935
- Warnings from the model provider (e.g. unsupported settings).
813
+ File as a Uint8Array.
936
814
  */
937
- readonly warnings: CallWarning[] | undefined;
815
+ readonly uint8Array: Uint8Array;
938
816
  /**
939
- Additional request information.
817
+ MIME type of the file
940
818
  */
941
- readonly request: LanguageModelRequestMetadata;
819
+ readonly mimeType: string;
820
+ }
821
+
822
+ type ReasoningDetail = {
823
+ type: 'text';
824
+ text: string;
825
+ signature?: string;
826
+ } | {
827
+ type: 'redacted';
828
+ data: string;
829
+ };
830
+
831
+ type ToolParameters = z.ZodTypeAny | Schema<any>;
832
+ type inferParameters<PARAMETERS extends ToolParameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
833
+ interface ToolExecutionOptions {
942
834
  /**
943
- Additional response information.
835
+ * The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
944
836
  */
945
- readonly response: LanguageModelResponseMetadata & {
946
- /**
947
- Response body (available only for providers that use HTTP requests).
948
- */
949
- body?: unknown;
950
- };
951
- /**
952
- Logprobs for the completion.
953
- `undefined` if the mode does not support logprobs or if was not enabled.
954
-
955
- @deprecated Will become a provider extension in the future.
956
- */
957
- readonly logprobs: LogProbs | undefined;
837
+ toolCallId: string;
958
838
  /**
959
- Additional provider-specific metadata. They are passed through
960
- from the provider to the AI SDK and enable provider-specific
961
- results that can be fully encapsulated in the provider.
839
+ * Messages that were sent to the language model to initiate the response that contained the tool call.
840
+ * The messages **do not** include the system prompt nor the assistant response that contained the tool call.
962
841
  */
963
- readonly providerMetadata: ProviderMetadata | undefined;
842
+ messages: CoreMessage[];
964
843
  /**
965
- @deprecated Use `providerMetadata` instead.
844
+ * An optional abort signal that indicates that the overall operation should be aborted.
966
845
  */
967
- readonly experimental_providerMetadata: ProviderMetadata | undefined;
968
- /**
969
- Converts the object to a JSON response.
970
- The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
971
- */
972
- toJsonResponse(init?: ResponseInit): Response;
846
+ abortSignal?: AbortSignal;
973
847
  }
974
-
975
- /**
976
- A function that attempts to repair the raw output of the mode
977
- to enable JSON parsing.
978
-
979
- Should return the repaired text or null if the text cannot be repaired.
980
- */
981
- type RepairTextFunction = (options: {
982
- text: string;
983
- error: JSONParseError | TypeValidationError;
984
- }) => Promise<string | null>;
985
848
  /**
986
- Generate a structured, typed object for a given prompt and schema using a language model.
987
-
988
- This function does not stream the output. If you want to stream the output, use `streamObject` instead.
849
+ A tool contains the description and the schema of the input that the tool expects.
850
+ This enables the language model to generate the input.
989
851
 
990
- @returns
991
- A result object that contains the generated object, the finish reason, the token usage, and additional information.
852
+ The tool can also contain an optional execute function for the actual execution function of the tool.
992
853
  */
993
- declare function generateObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
994
- output?: 'object' | undefined;
995
- /**
996
- The language model to use.
997
- */
998
- model: LanguageModel;
854
+ type Tool<PARAMETERS extends ToolParameters = any, RESULT = any> = {
999
855
  /**
1000
- The schema of the object that the model should generate.
1001
- */
1002
- schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
856
+ The schema of the input that the tool expects. The language model will use this to generate the input.
857
+ It is also used to validate the output of the language model.
858
+ Use descriptions to make the input understandable for the language model.
859
+ */
860
+ parameters: PARAMETERS;
1003
861
  /**
1004
- Optional name of the output that should be generated.
1005
- Used by some providers for additional LLM guidance, e.g.
1006
- via tool or schema name.
1007
- */
1008
- schemaName?: string;
862
+ An optional description of what the tool does.
863
+ Will be used by the language model to decide whether to use the tool.
864
+ Not used for provider-defined tools.
865
+ */
866
+ description?: string;
1009
867
  /**
1010
- Optional description of the output that should be generated.
1011
- Used by some providers for additional LLM guidance, e.g.
1012
- via tool or schema description.
1013
- */
1014
- schemaDescription?: string;
868
+ Optional conversion function that maps the tool result to multi-part tool content for LLMs.
869
+ */
870
+ experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;
1015
871
  /**
1016
- The mode to use for object generation.
1017
-
1018
- The schema is converted into a JSON schema and used in one of the following ways
1019
-
1020
- - 'auto': The provider will choose the best mode for the model.
1021
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
1022
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
1023
-
1024
- Please note that most providers do not support all modes.
1025
-
1026
- Default and recommended: 'auto' (best mode for the model).
1027
- */
1028
- mode?: 'auto' | 'json' | 'tool';
872
+ An async function that is called with the arguments from the tool call and produces a result.
873
+ If not provided, the tool will not be executed automatically.
874
+
875
+ @args is the input of the tool call.
876
+ @options.abortSignal is a signal that can be used to abort the tool call.
877
+ */
878
+ execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
879
+ } & ({
1029
880
  /**
1030
- A function that attempts to repair the raw output of the mode
1031
- to enable JSON parsing.
1032
- */
1033
- experimental_repairText?: RepairTextFunction;
881
+ Function tool.
882
+ */
883
+ type?: undefined | 'function';
884
+ } | {
1034
885
  /**
1035
- Optional telemetry configuration (experimental).
886
+ Provider-defined tool.
1036
887
  */
1037
- experimental_telemetry?: TelemetrySettings;
888
+ type: 'provider-defined';
1038
889
  /**
1039
- Additional provider-specific options. They are passed through
1040
- to the provider from the AI SDK and enable provider-specific
1041
- functionality that can be fully encapsulated in the provider.
1042
- */
1043
- providerOptions?: ProviderOptions;
1044
- /**
1045
- @deprecated Use `providerOptions` instead.
1046
- */
1047
- experimental_providerMetadata?: ProviderMetadata;
890
+ The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
891
+ */
892
+ id: `${string}.${string}`;
1048
893
  /**
1049
- * Internal. For test use only. May change without notice.
894
+ The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
1050
895
  */
1051
- _internal?: {
1052
- generateId?: () => string;
1053
- currentDate?: () => Date;
1054
- };
1055
- }): Promise<GenerateObjectResult<OBJECT>>;
896
+ args: Record<string, unknown>;
897
+ });
1056
898
  /**
1057
- Generate an array with structured, typed elements for a given prompt and element schema using a language model.
1058
-
1059
- This function does not stream the output. If you want to stream the output, use `streamObject` instead.
1060
-
1061
- @return
1062
- A result object that contains the generated object, the finish reason, the token usage, and additional information.
899
+ * @deprecated Use `Tool` instead.
1063
900
  */
1064
- declare function generateObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1065
- output: 'array';
1066
- /**
1067
- The language model to use.
1068
- */
1069
- model: LanguageModel;
1070
- /**
1071
- The element schema of the array that the model should generate.
1072
- */
1073
- schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
1074
- /**
1075
- Optional name of the array that should be generated.
1076
- Used by some providers for additional LLM guidance, e.g.
1077
- via tool or schema name.
1078
- */
1079
- schemaName?: string;
1080
- /**
1081
- Optional description of the array that should be generated.
1082
- Used by some providers for additional LLM guidance, e.g.
1083
- via tool or schema description.
1084
- */
1085
- schemaDescription?: string;
1086
- /**
1087
- The mode to use for object generation.
901
+ type CoreTool<PARAMETERS extends ToolParameters = any, RESULT = any> = Tool<PARAMETERS, RESULT>;
902
+ /**
903
+ Helper function for inferring the execute args of a tool.
904
+ */
905
+ declare function tool<PARAMETERS extends ToolParameters, RESULT>(tool: Tool<PARAMETERS, RESULT> & {
906
+ execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
907
+ }): Tool<PARAMETERS, RESULT> & {
908
+ execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
909
+ };
910
+ declare function tool<PARAMETERS extends ToolParameters, RESULT>(tool: Tool<PARAMETERS, RESULT> & {
911
+ execute?: undefined;
912
+ }): Tool<PARAMETERS, RESULT> & {
913
+ execute: undefined;
914
+ };
1088
915
 
1089
- The schema is converted into a JSON schema and used in one of the following ways
916
+ /**
917
+ Create a union of the given object's values, and optionally specify which keys to get the values from.
1090
918
 
1091
- - 'auto': The provider will choose the best mode for the model.
1092
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
1093
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
919
+ Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
1094
920
 
1095
- Please note that most providers do not support all modes.
921
+ @example
922
+ ```
923
+ // data.json
924
+ {
925
+ 'foo': 1,
926
+ 'bar': 2,
927
+ 'biz': 3
928
+ }
1096
929
 
1097
- Default and recommended: 'auto' (best mode for the model).
1098
- */
1099
- mode?: 'auto' | 'json' | 'tool';
1100
- /**
1101
- A function that attempts to repair the raw output of the mode
1102
- to enable JSON parsing.
1103
- */
1104
- experimental_repairText?: RepairTextFunction;
1105
- /**
1106
- Optional telemetry configuration (experimental).
1107
- */
1108
- experimental_telemetry?: TelemetrySettings;
1109
- /**
1110
- Additional provider-specific options. They are passed through
1111
- to the provider from the AI SDK and enable provider-specific
1112
- functionality that can be fully encapsulated in the provider.
1113
- */
1114
- providerOptions?: ProviderOptions;
1115
- /**
1116
- @deprecated Use `providerOptions` instead.
1117
- */
1118
- experimental_providerMetadata?: ProviderMetadata;
1119
- /**
1120
- * Internal. For test use only. May change without notice.
1121
- */
1122
- _internal?: {
1123
- generateId?: () => string;
1124
- currentDate?: () => Date;
1125
- };
1126
- }): Promise<GenerateObjectResult<Array<ELEMENT>>>;
1127
- /**
1128
- Generate a value from an enum (limited list of string values) using a language model.
930
+ // main.ts
931
+ import type {ValueOf} from 'type-fest';
932
+ import data = require('./data.json');
1129
933
 
1130
- This function does not stream the output.
934
+ export function getData(name: string): ValueOf<typeof data> {
935
+ return data[name];
936
+ }
1131
937
 
1132
- @return
1133
- A result object that contains the generated value, the finish reason, the token usage, and additional information.
1134
- */
1135
- declare function generateObject<ENUM extends string>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1136
- output: 'enum';
1137
- /**
1138
- The language model to use.
1139
- */
1140
- model: LanguageModel;
1141
- /**
1142
- The enum values that the model should use.
1143
- */
1144
- enum: Array<ENUM>;
1145
- /**
1146
- The mode to use for object generation.
938
+ export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
939
+ return data[name];
940
+ }
1147
941
 
1148
- The schema is converted into a JSON schema and used in one of the following ways
942
+ // file.ts
943
+ import {getData, onlyBar} from './main';
1149
944
 
1150
- - 'auto': The provider will choose the best mode for the model.
1151
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
1152
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
945
+ getData('foo');
946
+ //=> 1
1153
947
 
1154
- Please note that most providers do not support all modes.
948
+ onlyBar('foo');
949
+ //=> TypeError ...
1155
950
 
1156
- Default and recommended: 'auto' (best mode for the model).
1157
- */
1158
- mode?: 'auto' | 'json' | 'tool';
1159
- /**
1160
- A function that attempts to repair the raw output of the mode
1161
- to enable JSON parsing.
1162
- */
1163
- experimental_repairText?: RepairTextFunction;
1164
- /**
1165
- Optional telemetry configuration (experimental).
1166
- */
1167
- experimental_telemetry?: TelemetrySettings;
1168
- /**
1169
- Additional provider-specific options. They are passed through
1170
- to the provider from the AI SDK and enable provider-specific
1171
- functionality that can be fully encapsulated in the provider.
1172
- */
1173
- providerOptions?: ProviderOptions;
1174
- /**
1175
- @deprecated Use `providerOptions` instead.
951
+ onlyBar('bar');
952
+ //=> 2
953
+ ```
954
+ * @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
1176
955
  */
1177
- experimental_providerMetadata?: ProviderMetadata;
1178
- /**
1179
- * Internal. For test use only. May change without notice.
1180
- */
1181
- _internal?: {
1182
- generateId?: () => string;
1183
- currentDate?: () => Date;
1184
- };
1185
- }): Promise<GenerateObjectResult<ENUM>>;
1186
- /**
1187
- Generate JSON with any schema for a given prompt using a language model.
956
+ type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
1188
957
 
1189
- This function does not stream the output. If you want to stream the output, use `streamObject` instead.
958
+ declare const JSONRPCMessageSchema: z.ZodUnion<[z.ZodObject<z.objectUtil.extendShape<{
959
+ jsonrpc: z.ZodLiteral<"2.0">;
960
+ id: z.ZodUnion<[z.ZodString, z.ZodNumber]>;
961
+ }, {
962
+ method: z.ZodString;
963
+ params: z.ZodOptional<z.ZodObject<{
964
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
965
+ }, "passthrough", z.ZodTypeAny, z.objectOutputType<{
966
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
967
+ }, z.ZodTypeAny, "passthrough">, z.objectInputType<{
968
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
969
+ }, z.ZodTypeAny, "passthrough">>>;
970
+ }>, "strict", z.ZodTypeAny, {
971
+ id: string | number;
972
+ method: string;
973
+ jsonrpc: "2.0";
974
+ params?: z.objectOutputType<{
975
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
976
+ }, z.ZodTypeAny, "passthrough"> | undefined;
977
+ }, {
978
+ id: string | number;
979
+ method: string;
980
+ jsonrpc: "2.0";
981
+ params?: z.objectInputType<{
982
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
983
+ }, z.ZodTypeAny, "passthrough"> | undefined;
984
+ }>, z.ZodObject<z.objectUtil.extendShape<{
985
+ jsonrpc: z.ZodLiteral<"2.0">;
986
+ }, {
987
+ method: z.ZodString;
988
+ params: z.ZodOptional<z.ZodObject<{
989
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
990
+ }, "passthrough", z.ZodTypeAny, z.objectOutputType<{
991
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
992
+ }, z.ZodTypeAny, "passthrough">, z.objectInputType<{
993
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
994
+ }, z.ZodTypeAny, "passthrough">>>;
995
+ }>, "strict", z.ZodTypeAny, {
996
+ method: string;
997
+ jsonrpc: "2.0";
998
+ params?: z.objectOutputType<{
999
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
1000
+ }, z.ZodTypeAny, "passthrough"> | undefined;
1001
+ }, {
1002
+ method: string;
1003
+ jsonrpc: "2.0";
1004
+ params?: z.objectInputType<{
1005
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
1006
+ }, z.ZodTypeAny, "passthrough"> | undefined;
1007
+ }>, z.ZodObject<{
1008
+ jsonrpc: z.ZodLiteral<"2.0">;
1009
+ id: z.ZodUnion<[z.ZodString, z.ZodNumber]>;
1010
+ result: z.ZodObject<{
1011
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
1012
+ }, "passthrough", z.ZodTypeAny, z.objectOutputType<{
1013
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
1014
+ }, z.ZodTypeAny, "passthrough">, z.objectInputType<{
1015
+ _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
1016
+ }, z.ZodTypeAny, "passthrough">>;
1017
+ }, "strict", z.ZodTypeAny, {
1018
+ result: {
1019
+ _meta?: z.objectOutputType<{}, z.ZodTypeAny, "passthrough"> | undefined;
1020
+ } & {
1021
+ [k: string]: unknown;
1022
+ };
1023
+ id: string | number;
1024
+ jsonrpc: "2.0";
1025
+ }, {
1026
+ result: {
1027
+ _meta?: z.objectInputType<{}, z.ZodTypeAny, "passthrough"> | undefined;
1028
+ } & {
1029
+ [k: string]: unknown;
1030
+ };
1031
+ id: string | number;
1032
+ jsonrpc: "2.0";
1033
+ }>, z.ZodObject<{
1034
+ jsonrpc: z.ZodLiteral<"2.0">;
1035
+ id: z.ZodUnion<[z.ZodString, z.ZodNumber]>;
1036
+ error: z.ZodObject<{
1037
+ code: z.ZodNumber;
1038
+ message: z.ZodString;
1039
+ data: z.ZodOptional<z.ZodUnknown>;
1040
+ }, "strip", z.ZodTypeAny, {
1041
+ code: number;
1042
+ message: string;
1043
+ data?: unknown;
1044
+ }, {
1045
+ code: number;
1046
+ message: string;
1047
+ data?: unknown;
1048
+ }>;
1049
+ }, "strict", z.ZodTypeAny, {
1050
+ error: {
1051
+ code: number;
1052
+ message: string;
1053
+ data?: unknown;
1054
+ };
1055
+ id: string | number;
1056
+ jsonrpc: "2.0";
1057
+ }, {
1058
+ error: {
1059
+ code: number;
1060
+ message: string;
1061
+ data?: unknown;
1062
+ };
1063
+ id: string | number;
1064
+ jsonrpc: "2.0";
1065
+ }>]>;
1066
+ type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
1190
1067
 
1191
- @returns
1192
- A result object that contains the generated object, the finish reason, the token usage, and additional information.
1068
+ /**
1069
+ * Transport interface for MCP (Model Context Protocol) communication.
1070
+ * Maps to the `Transport` interface in the MCP spec.
1193
1071
  */
1194
- declare function generateObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1195
- output: 'no-schema';
1196
- /**
1197
- The language model to use.
1198
- */
1199
- model: LanguageModel;
1072
+ interface MCPTransport {
1200
1073
  /**
1201
- The mode to use for object generation. Must be "json" for no-schema output.
1202
- */
1203
- mode?: 'json';
1074
+ * Initialize and start the transport
1075
+ */
1076
+ start(): Promise<void>;
1204
1077
  /**
1205
- A function that attempts to repair the raw output of the mode
1206
- to enable JSON parsing.
1207
- */
1208
- experimental_repairText?: RepairTextFunction;
1078
+ * Send a JSON-RPC message through the transport
1079
+ * @param message The JSON-RPC message to send
1080
+ */
1081
+ send(message: JSONRPCMessage): Promise<void>;
1209
1082
  /**
1210
- Optional telemetry configuration (experimental).
1083
+ * Clean up and close the transport
1211
1084
  */
1212
- experimental_telemetry?: TelemetrySettings;
1085
+ close(): Promise<void>;
1213
1086
  /**
1214
- Additional provider-specific options. They are passed through
1215
- to the provider from the AI SDK and enable provider-specific
1216
- functionality that can be fully encapsulated in the provider.
1217
- */
1218
- providerOptions?: ProviderOptions;
1087
+ * Event handler for transport closure
1088
+ */
1089
+ onclose?: () => void;
1219
1090
  /**
1220
- @deprecated Use `providerOptions` instead.
1221
- */
1222
- experimental_providerMetadata?: ProviderMetadata;
1091
+ * Event handler for transport errors
1092
+ */
1093
+ onerror?: (error: Error) => void;
1223
1094
  /**
1224
- * Internal. For test use only. May change without notice.
1095
+ * Event handler for received messages
1225
1096
  */
1226
- _internal?: {
1227
- generateId?: () => string;
1228
- currentDate?: () => Date;
1229
- };
1230
- }): Promise<GenerateObjectResult<JSONValue>>;
1231
-
1232
- type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
1097
+ onmessage?: (message: JSONRPCMessage) => void;
1098
+ }
1099
+ interface SSEConfig {
1100
+ type: 'sse';
1101
+ url: string;
1102
+ }
1103
+ type MCPTransportConfig = SSEConfig;
1233
1104
 
1234
- /**
1235
- The result of a `streamObject` call that contains the partial object stream and additional information.
1236
- */
1237
- interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
1238
- /**
1239
- Warnings from the model provider (e.g. unsupported settings)
1240
- */
1241
- readonly warnings: Promise<CallWarning[] | undefined>;
1242
- /**
1243
- The token usage of the generated response. Resolved when the response is finished.
1244
- */
1245
- readonly usage: Promise<LanguageModelUsage>;
1246
- /**
1247
- Additional provider-specific metadata. They are passed through
1248
- from the provider to the AI SDK and enable provider-specific
1249
- results that can be fully encapsulated in the provider.
1250
- */
1251
- readonly providerMetadata: Promise<ProviderMetadata | undefined>;
1252
- /**
1253
- @deprecated Use `providerMetadata` instead.
1254
- */
1255
- readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
1256
- /**
1257
- Additional request information from the last step.
1258
- */
1259
- readonly request: Promise<LanguageModelRequestMetadata>;
1260
- /**
1261
- Additional response information.
1262
- */
1263
- readonly response: Promise<LanguageModelResponseMetadata>;
1264
- /**
1265
- The generated object (typed according to the schema). Resolved when the response is finished.
1266
- */
1267
- readonly object: Promise<RESULT>;
1268
- /**
1269
- Stream of partial objects. It gets more complete as the stream progresses.
1270
-
1271
- Note that the partial object is not validated.
1272
- If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
1273
- */
1274
- readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
1275
- /**
1276
- * Stream over complete array elements. Only available if the output strategy is set to `array`.
1277
- */
1278
- readonly elementStream: ELEMENT_STREAM;
1279
- /**
1280
- Text stream of the JSON representation of the generated object. It contains text chunks.
1281
- When the stream is finished, the object is valid JSON that can be parsed.
1282
- */
1283
- readonly textStream: AsyncIterableStream<string>;
1284
- /**
1285
- Stream of different types of events, including partial objects, errors, and finish events.
1286
- Only errors that stop the stream, such as network errors, are thrown.
1287
- */
1288
- readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
1289
- /**
1290
- Writes text delta output to a Node.js response-like object.
1291
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1292
- writes each text delta as a separate chunk.
1293
-
1294
- @param response A Node.js response-like object (ServerResponse).
1295
- @param init Optional headers, status code, and status text.
1296
- */
1297
- pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
1298
- /**
1299
- Creates a simple text stream response.
1300
- The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
1301
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
1302
- Non-text-delta events are ignored.
1303
-
1304
- @param init Optional headers, status code, and status text.
1305
- */
1306
- toTextStreamResponse(init?: ResponseInit): Response;
1307
- }
1308
- type ObjectStreamPart<PARTIAL> = {
1309
- type: 'object';
1310
- object: PARTIAL;
1311
- } | {
1312
- type: 'text-delta';
1313
- textDelta: string;
1314
- } | {
1315
- type: 'error';
1316
- error: unknown;
1317
- } | {
1318
- type: 'finish';
1319
- finishReason: FinishReason;
1320
- logprobs?: LogProbs;
1321
- usage: LanguageModelUsage;
1322
- response: LanguageModelResponseMetadata;
1323
- providerMetadata?: ProviderMetadata;
1324
- };
1325
-
1326
- /**
1327
- Callback that is set using the `onError` option.
1328
-
1329
- @param event - The event that is passed to the callback.
1330
- */
1331
- type StreamObjectOnErrorCallback = (event: {
1332
- error: unknown;
1333
- }) => Promise<void> | void;
1334
- /**
1335
- Callback that is set using the `onFinish` option.
1336
-
1337
- @param event - The event that is passed to the callback.
1338
- */
1339
- type StreamObjectOnFinishCallback<RESULT> = (event: {
1340
- /**
1341
- The token usage of the generated response.
1342
- */
1343
- usage: LanguageModelUsage;
1344
- /**
1345
- The generated object. Can be undefined if the final object does not match the schema.
1346
- */
1347
- object: RESULT | undefined;
1348
- /**
1349
- Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
1350
- */
1351
- error: unknown | undefined;
1352
- /**
1353
- Response metadata.
1354
- */
1355
- response: LanguageModelResponseMetadata;
1356
- /**
1357
- Warnings from the model provider (e.g. unsupported settings).
1358
- */
1359
- warnings?: CallWarning[];
1360
- /**
1361
- Additional provider-specific metadata. They are passed through
1362
- to the provider from the AI SDK and enable provider-specific
1363
- functionality that can be fully encapsulated in the provider.
1364
- */
1365
- providerMetadata: ProviderMetadata | undefined;
1366
- /**
1367
- @deprecated Use `providerMetadata` instead.
1368
- */
1369
- experimental_providerMetadata?: ProviderMetadata;
1370
- }) => Promise<void> | void;
1371
- /**
1372
- Generate a structured, typed object for a given prompt and schema using a language model.
1373
-
1374
- This function streams the output. If you do not want to stream the output, use `generateObject` instead.
1375
-
1376
- @return
1377
- A result object for accessing the partial object stream and additional information.
1378
- */
1379
- declare function streamObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1380
- output?: 'object' | undefined;
1381
- /**
1382
- The language model to use.
1383
- */
1384
- model: LanguageModel;
1385
- /**
1386
- The schema of the object that the model should generate.
1387
- */
1388
- schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
1389
- /**
1390
- Optional name of the output that should be generated.
1391
- Used by some providers for additional LLM guidance, e.g.
1392
- via tool or schema name.
1393
- */
1394
- schemaName?: string;
1395
- /**
1396
- Optional description of the output that should be generated.
1397
- Used by some providers for additional LLM guidance, e.g.
1398
- via tool or schema description.
1399
- */
1400
- schemaDescription?: string;
1401
- /**
1402
- The mode to use for object generation.
1403
-
1404
- The schema is converted into a JSON schema and used in one of the following ways
1405
-
1406
- - 'auto': The provider will choose the best mode for the model.
1407
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
1408
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
1409
-
1410
- Please note that most providers do not support all modes.
1411
-
1412
- Default and recommended: 'auto' (best mode for the model).
1413
- */
1414
- mode?: 'auto' | 'json' | 'tool';
1415
- /**
1416
- Optional telemetry configuration (experimental).
1417
- */
1418
- experimental_telemetry?: TelemetrySettings;
1419
- /**
1420
- Additional provider-specific options. They are passed through
1421
- to the provider from the AI SDK and enable provider-specific
1422
- functionality that can be fully encapsulated in the provider.
1423
- */
1424
- providerOptions?: ProviderOptions;
1425
- /**
1426
- @deprecated Use `providerOptions` instead.
1427
- */
1428
- experimental_providerMetadata?: ProviderMetadata;
1429
- /**
1430
- Callback that is invoked when an error occurs during streaming.
1431
- You can use it to log errors.
1432
- The stream processing will pause until the callback promise is resolved.
1433
- */
1434
- onError?: StreamObjectOnErrorCallback;
1435
- /**
1436
- Callback that is called when the LLM response and the final object validation are finished.
1437
- */
1438
- onFinish?: StreamObjectOnFinishCallback<OBJECT>;
1439
- /**
1440
- * Internal. For test use only. May change without notice.
1441
- */
1442
- _internal?: {
1443
- generateId?: () => string;
1444
- currentDate?: () => Date;
1445
- now?: () => number;
1446
- };
1447
- }): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
1448
- /**
1449
- Generate an array with structured, typed elements for a given prompt and element schema using a language model.
1450
-
1451
- This function streams the output. If you do not want to stream the output, use `generateObject` instead.
1452
-
1453
- @return
1454
- A result object for accessing the partial object stream and additional information.
1455
- */
1456
- declare function streamObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1457
- output: 'array';
1458
- /**
1459
- The language model to use.
1460
- */
1461
- model: LanguageModel;
1462
- /**
1463
- The element schema of the array that the model should generate.
1464
- */
1465
- schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
1466
- /**
1467
- Optional name of the array that should be generated.
1468
- Used by some providers for additional LLM guidance, e.g.
1469
- via tool or schema name.
1470
- */
1471
- schemaName?: string;
1472
- /**
1473
- Optional description of the array that should be generated.
1474
- Used by some providers for additional LLM guidance, e.g.
1475
- via tool or schema description.
1476
- */
1477
- schemaDescription?: string;
1478
- /**
1479
- The mode to use for object generation.
1480
-
1481
- The schema is converted into a JSON schema and used in one of the following ways
1482
-
1483
- - 'auto': The provider will choose the best mode for the model.
1484
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
1485
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
1486
-
1487
- Please note that most providers do not support all modes.
1488
-
1489
- Default and recommended: 'auto' (best mode for the model).
1490
- */
1491
- mode?: 'auto' | 'json' | 'tool';
1492
- /**
1493
- Optional telemetry configuration (experimental).
1494
- */
1495
- experimental_telemetry?: TelemetrySettings;
1496
- /**
1497
- Additional provider-specific options. They are passed through
1498
- to the provider from the AI SDK and enable provider-specific
1499
- functionality that can be fully encapsulated in the provider.
1500
- */
1501
- providerOptions?: ProviderOptions;
1502
- /**
1503
- @deprecated Use `providerOptions` instead.
1504
- */
1505
- experimental_providerMetadata?: ProviderMetadata;
1506
- /**
1507
- Callback that is invoked when an error occurs during streaming.
1508
- You can use it to log errors.
1509
- The stream processing will pause until the callback promise is resolved.
1510
- */
1511
- onError?: StreamObjectOnErrorCallback;
1512
- /**
1513
- Callback that is called when the LLM response and the final object validation are finished.
1514
- */
1515
- onFinish?: StreamObjectOnFinishCallback<Array<ELEMENT>>;
1516
- /**
1517
- * Internal. For test use only. May change without notice.
1518
- */
1519
- _internal?: {
1520
- generateId?: () => string;
1521
- currentDate?: () => Date;
1522
- now?: () => number;
1523
- };
1524
- }): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
1525
- /**
1526
- Generate JSON with any schema for a given prompt using a language model.
1527
-
1528
- This function streams the output. If you do not want to stream the output, use `generateObject` instead.
1529
-
1530
- @return
1531
- A result object for accessing the partial object stream and additional information.
1532
- */
1533
- declare function streamObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1534
- output: 'no-schema';
1535
- /**
1536
- The language model to use.
1537
- */
1538
- model: LanguageModel;
1539
- /**
1540
- The mode to use for object generation. Must be "json" for no-schema output.
1541
- */
1542
- mode?: 'json';
1543
- /**
1544
- Optional telemetry configuration (experimental).
1545
- */
1546
- experimental_telemetry?: TelemetrySettings;
1547
- /**
1548
- Additional provider-specific options. They are passed through
1549
- to the provider from the AI SDK and enable provider-specific
1550
- functionality that can be fully encapsulated in the provider.
1551
- */
1552
- providerOptions?: ProviderOptions;
1553
- /**
1554
- @deprecated Use `providerOptions` instead.
1555
- */
1556
- experimental_providerMetadata?: ProviderMetadata;
1557
- /**
1558
- Callback that is invoked when an error occurs during streaming.
1559
- You can use it to log errors.
1560
- The stream processing will pause until the callback promise is resolved.
1561
- */
1562
- onError?: StreamObjectOnErrorCallback;
1563
- /**
1564
- Callback that is called when the LLM response and the final object validation are finished.
1565
- */
1566
- onFinish?: StreamObjectOnFinishCallback<JSONValue>;
1567
- /**
1568
- * Internal. For test use only. May change without notice.
1569
- */
1570
- _internal?: {
1571
- generateId?: () => string;
1572
- currentDate?: () => Date;
1573
- now?: () => number;
1574
- };
1575
- }): StreamObjectResult<JSONValue, JSONValue, never>;
1576
-
1577
- type ReasoningDetail = {
1578
- type: 'text';
1579
- text: string;
1580
- signature?: string;
1581
- } | {
1582
- type: 'redacted';
1583
- data: string;
1584
- };
1585
-
1586
- type ToolParameters = z.ZodTypeAny | Schema<any>;
1587
- type inferParameters<PARAMETERS extends ToolParameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
1588
- interface ToolExecutionOptions {
1589
- /**
1590
- * The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
1591
- */
1592
- toolCallId: string;
1593
- /**
1594
- * Messages that were sent to the language model to initiate the response that contained the tool call.
1595
- * The messages **do not** include the system prompt nor the assistant response that contained the tool call.
1596
- */
1597
- messages: CoreMessage[];
1598
- /**
1599
- * An optional abort signal that indicates that the overall operation should be aborted.
1600
- */
1601
- abortSignal?: AbortSignal;
1602
- }
1603
- /**
1604
- A tool contains the description and the schema of the input that the tool expects.
1605
- This enables the language model to generate the input.
1606
-
1607
- The tool can also contain an optional execute function for the actual execution function of the tool.
1608
- */
1609
- type Tool<PARAMETERS extends ToolParameters = any, RESULT = any> = {
1610
- /**
1611
- The schema of the input that the tool expects. The language model will use this to generate the input.
1612
- It is also used to validate the output of the language model.
1613
- Use descriptions to make the input understandable for the language model.
1614
- */
1615
- parameters: PARAMETERS;
1616
- /**
1617
- An optional description of what the tool does.
1618
- Will be used by the language model to decide whether to use the tool.
1619
- Not used for provider-defined tools.
1620
- */
1621
- description?: string;
1622
- /**
1623
- Optional conversion function that maps the tool result to multi-part tool content for LLMs.
1624
- */
1625
- experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;
1626
- /**
1627
- An async function that is called with the arguments from the tool call and produces a result.
1628
- If not provided, the tool will not be executed automatically.
1629
-
1630
- @args is the input of the tool call.
1631
- @options.abortSignal is a signal that can be used to abort the tool call.
1632
- */
1633
- execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
1634
- } & ({
1635
- /**
1636
- Function tool.
1637
- */
1638
- type?: undefined | 'function';
1639
- } | {
1640
- /**
1641
- Provider-defined tool.
1642
- */
1643
- type: 'provider-defined';
1644
- /**
1645
- The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
1646
- */
1647
- id: `${string}.${string}`;
1648
- /**
1649
- The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
1650
- */
1651
- args: Record<string, unknown>;
1652
- });
1653
- /**
1654
- * @deprecated Use `Tool` instead.
1655
- */
1656
- type CoreTool<PARAMETERS extends ToolParameters = any, RESULT = any> = Tool<PARAMETERS, RESULT>;
1657
- /**
1658
- Helper function for inferring the execute args of a tool.
1659
- */
1660
- declare function tool<PARAMETERS extends ToolParameters, RESULT>(tool: Tool<PARAMETERS, RESULT> & {
1661
- execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
1662
- }): Tool<PARAMETERS, RESULT> & {
1663
- execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
1664
- };
1665
- declare function tool<PARAMETERS extends ToolParameters, RESULT>(tool: Tool<PARAMETERS, RESULT> & {
1666
- execute?: undefined;
1667
- }): Tool<PARAMETERS, RESULT> & {
1668
- execute: undefined;
1669
- };
1670
-
1671
- /**
1672
- Create a union of the given object's values, and optionally specify which keys to get the values from.
1673
-
1674
- Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
1675
-
1676
- @example
1677
- ```
1678
- // data.json
1679
- {
1680
- 'foo': 1,
1681
- 'bar': 2,
1682
- 'biz': 3
1683
- }
1684
-
1685
- // main.ts
1686
- import type {ValueOf} from 'type-fest';
1687
- import data = require('./data.json');
1688
-
1689
- export function getData(name: string): ValueOf<typeof data> {
1690
- return data[name];
1691
- }
1692
-
1693
- export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
1694
- return data[name];
1695
- }
1696
-
1697
- // file.ts
1698
- import {getData, onlyBar} from './main';
1699
-
1700
- getData('foo');
1701
- //=> 1
1702
-
1703
- onlyBar('foo');
1704
- //=> TypeError ...
1705
-
1706
- onlyBar('bar');
1707
- //=> 2
1708
- ```
1709
- * @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
1710
- */
1711
- type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
1712
-
1713
- type ToolSchemas = Record<string, {
1714
- parameters: ToolParameters;
1715
- }> | 'automatic' | undefined;
1716
- type McpToolSet<TOOL_SCHEMAS extends ToolSchemas = 'automatic'> = TOOL_SCHEMAS extends Record<string, {
1717
- parameters: ToolParameters;
1718
- }> ? {
1719
- [K in keyof TOOL_SCHEMAS]: Tool<TOOL_SCHEMAS[K]['parameters'], CallToolResult> & {
1720
- execute: (args: inferParameters<TOOL_SCHEMAS[K]['parameters']>, options: ToolExecutionOptions) => PromiseLike<CallToolResult>;
1105
+ type ToolSchemas = Record<string, {
1106
+ parameters: ToolParameters;
1107
+ }> | 'automatic' | undefined;
1108
+ type McpToolSet<TOOL_SCHEMAS extends ToolSchemas = 'automatic'> = TOOL_SCHEMAS extends Record<string, {
1109
+ parameters: ToolParameters;
1110
+ }> ? {
1111
+ [K in keyof TOOL_SCHEMAS]: Tool<TOOL_SCHEMAS[K]['parameters'], CallToolResult> & {
1112
+ execute: (args: inferParameters<TOOL_SCHEMAS[K]['parameters']>, options: ToolExecutionOptions) => PromiseLike<CallToolResult>;
1721
1113
  };
1722
1114
  } : {
1723
1115
  [k: string]: Tool<z.ZodUnknown, CallToolResult> & {
1724
1116
  execute: (args: unknown, options: ToolExecutionOptions) => PromiseLike<CallToolResult>;
1725
1117
  };
1726
1118
  };
1727
- interface McpStdioServerConfig {
1728
- command: string;
1729
- args?: string[];
1730
- env?: Record<string, string>;
1731
- stderr?: IOType | Stream | number;
1732
- cwd?: string;
1733
- type: 'stdio';
1734
- }
1735
- interface McpSSEServerConfig {
1736
- type: 'sse';
1737
- url: string;
1738
- }
1739
- type TransportConfig = McpStdioServerConfig | McpSSEServerConfig;
1740
1119
  declare const CallToolResultSchema: z.ZodUnion<[z.ZodObject<z.objectUtil.extendShape<{
1741
1120
  _meta: z.ZodOptional<z.ZodObject<{}, "passthrough", z.ZodTypeAny, z.objectOutputType<{}, z.ZodTypeAny, "passthrough">, z.objectInputType<{}, z.ZodTypeAny, "passthrough">>>;
1742
1121
  }, {
@@ -2451,736 +1830,1545 @@ declare const CallToolResultSchema: z.ZodUnion<[z.ZodObject<z.objectUtil.extendS
2451
1830
  }>, z.ZodTypeAny, "passthrough">>]>;
2452
1831
  type CallToolResult = z.infer<typeof CallToolResultSchema>;
2453
1832
 
2454
- interface MCPClientConfig {
2455
- /** Transport configuration for connecting to the MCP server */
2456
- transport: TransportConfig;
2457
- /** Optional callback for uncaught errors */
2458
- onUncaughtError?: (error: unknown) => void;
2459
- /** Optional client name, defaults to 'ai-sdk-mcp-client' */
2460
- name?: string;
1833
+ interface MCPClientConfig {
1834
+ /** Transport configuration for connecting to the MCP server */
1835
+ transport: MCPTransportConfig | MCPTransport;
1836
+ /** Optional callback for uncaught errors */
1837
+ onUncaughtError?: (error: unknown) => void;
1838
+ /** Optional client name, defaults to 'ai-sdk-mcp-client' */
1839
+ name?: string;
1840
+ }
1841
+ declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
1842
+ /**
1843
+ * A lightweight MCP Client implementation
1844
+ *
1845
+ * The primary purpose of this client is tool conversion between MCP<>AI SDK
1846
+ * but can later be extended to support other MCP features
1847
+ *
1848
+ * Tool parameters are automatically inferred from the server's JSON schema
1849
+ * if not explicitly provided in the tools configuration
1850
+ *
1851
+ * Not supported:
1852
+ * - Client options (e.g. sampling, roots) as they are not needed for tool conversion
1853
+ * - Accepting notifications
1854
+ */
1855
+ declare class MCPClient {
1856
+ private transport;
1857
+ private onUncaughtError?;
1858
+ private clientInfo;
1859
+ private requestMessageId;
1860
+ private responseHandlers;
1861
+ private serverCapabilities;
1862
+ private isClosed;
1863
+ constructor({ transport: transportConfig, name, onUncaughtError, }: MCPClientConfig);
1864
+ init(): Promise<this>;
1865
+ close(): Promise<void>;
1866
+ private request;
1867
+ private listTools;
1868
+ private callTool;
1869
+ private notification;
1870
+ /**
1871
+ * Returns a set of AI SDK tools from the MCP server
1872
+ * @returns A record of tool names to their implementations
1873
+ */
1874
+ tools<TOOL_SCHEMAS extends ToolSchemas = 'automatic'>({ schemas, }?: {
1875
+ schemas?: TOOL_SCHEMAS;
1876
+ }): Promise<McpToolSet<TOOL_SCHEMAS>>;
1877
+ private onClose;
1878
+ private onError;
1879
+ private onResponse;
1880
+ }
1881
+
1882
+ type ToolSet = Record<string, Tool>;
1883
+
1884
+ type ToolCallUnion<TOOLS extends ToolSet> = ValueOf<{
1885
+ [NAME in keyof TOOLS]: {
1886
+ type: 'tool-call';
1887
+ toolCallId: string;
1888
+ toolName: NAME & string;
1889
+ args: inferParameters<TOOLS[NAME]['parameters']>;
1890
+ };
1891
+ }>;
1892
+ /**
1893
+ * @deprecated Use `ToolCallUnion` instead.
1894
+ */
1895
+ type CoreToolCallUnion<TOOLS extends ToolSet> = ToolCallUnion<ToolSet>;
1896
+ type ToolCallArray<TOOLS extends ToolSet> = Array<ToolCallUnion<TOOLS>>;
1897
+
1898
+ type ToToolsWithExecute<TOOLS extends ToolSet> = {
1899
+ [K in keyof TOOLS as TOOLS[K] extends {
1900
+ execute: any;
1901
+ } ? K : never]: TOOLS[K];
1902
+ };
1903
+ type ToToolsWithDefinedExecute<TOOLS extends ToolSet> = {
1904
+ [K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
1905
+ };
1906
+ type ToToolResultObject<TOOLS extends ToolSet> = ValueOf<{
1907
+ [NAME in keyof TOOLS]: {
1908
+ type: 'tool-result';
1909
+ toolCallId: string;
1910
+ toolName: NAME & string;
1911
+ args: inferParameters<TOOLS[NAME]['parameters']>;
1912
+ result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
1913
+ };
1914
+ }>;
1915
+ type ToolResultUnion<TOOLS extends ToolSet> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
1916
+ /**
1917
+ * @deprecated Use `ToolResultUnion` instead.
1918
+ */
1919
+ type CoreToolResultUnion<TOOLS extends ToolSet> = ToolResultUnion<TOOLS>;
1920
+ type ToolResultArray<TOOLS extends ToolSet> = Array<ToolResultUnion<TOOLS>>;
1921
+
1922
+ /**
1923
+ A message that was generated during the generation process.
1924
+ It can be either an assistant message or a tool message.
1925
+ */
1926
+ type ResponseMessage = (CoreAssistantMessage | CoreToolMessage) & {
1927
+ /**
1928
+ Message ID generated by the AI SDK.
1929
+ */
1930
+ id: string;
1931
+ };
1932
+ /**
1933
+ * The result of a single step in the generation process.
1934
+ */
1935
+ type StepResult<TOOLS extends ToolSet> = {
1936
+ /**
1937
+ The generated text.
1938
+ */
1939
+ readonly text: string;
1940
+ /**
1941
+ The reasoning that was generated during the generation.
1942
+ */
1943
+ readonly reasoning: string | undefined;
1944
+ readonly reasoningDetails: Array<ReasoningDetail>;
1945
+ /**
1946
+ The files that were generated during the generation.
1947
+ */
1948
+ readonly files: GeneratedFile[];
1949
+ /**
1950
+ The sources that were used to generate the text.
1951
+ */
1952
+ readonly sources: Source[];
1953
+ /**
1954
+ The tool calls that were made during the generation.
1955
+ */
1956
+ readonly toolCalls: ToolCallArray<TOOLS>;
1957
+ /**
1958
+ The results of the tool calls.
1959
+ */
1960
+ readonly toolResults: ToolResultArray<TOOLS>;
1961
+ /**
1962
+ The reason why the generation finished.
1963
+ */
1964
+ readonly finishReason: FinishReason;
1965
+ /**
1966
+ The token usage of the generated text.
1967
+ */
1968
+ readonly usage: LanguageModelUsage;
1969
+ /**
1970
+ Warnings from the model provider (e.g. unsupported settings).
1971
+ */
1972
+ readonly warnings: CallWarning[] | undefined;
1973
+ /**
1974
+ Logprobs for the completion.
1975
+ `undefined` if the mode does not support logprobs or if was not enabled.
1976
+ */
1977
+ readonly logprobs: LogProbs | undefined;
1978
+ /**
1979
+ Additional request information.
1980
+ */
1981
+ readonly request: LanguageModelRequestMetadata;
1982
+ /**
1983
+ Additional response information.
1984
+ */
1985
+ readonly response: LanguageModelResponseMetadata & {
1986
+ /**
1987
+ The response messages that were generated during the call.
1988
+ Response messages can be either assistant messages or tool messages.
1989
+ They contain a generated id.
1990
+ */
1991
+ readonly messages: Array<ResponseMessage>;
1992
+ /**
1993
+ Response body (available only for providers that use HTTP requests).
1994
+ */
1995
+ body?: unknown;
1996
+ };
1997
+ /**
1998
+ Additional provider-specific metadata. They are passed through
1999
+ from the provider to the AI SDK and enable provider-specific
2000
+ results that can be fully encapsulated in the provider.
2001
+ */
2002
+ readonly providerMetadata: ProviderMetadata | undefined;
2003
+ /**
2004
+ @deprecated Use `providerMetadata` instead.
2005
+ */
2006
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
2007
+ /**
2008
+ The type of step that this result is for. The first step is always
2009
+ an "initial" step, and subsequent steps are either "continue" steps
2010
+ or "tool-result" steps.
2011
+ */
2012
+ readonly stepType: 'initial' | 'continue' | 'tool-result';
2013
+ /**
2014
+ True when there will be a continuation step with a continuation text.
2015
+ */
2016
+ readonly isContinued: boolean;
2017
+ };
2018
+
2019
+ /**
2020
+ The result of a `generateText` call.
2021
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
2022
+ */
2023
+ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
2024
+ /**
2025
+ The generated text.
2026
+ */
2027
+ readonly text: string;
2028
+ /**
2029
+ The reasoning text that the model has generated. Can be undefined if the model
2030
+ has only generated text.
2031
+ */
2032
+ readonly reasoning: string | undefined;
2033
+ /**
2034
+ The files that were generated. Empty array if no files were generated.
2035
+ */
2036
+ readonly files: Array<GeneratedFile>;
2037
+ /**
2038
+ The full reasoning that the model has generated.
2039
+ */
2040
+ readonly reasoningDetails: Array<ReasoningDetail>;
2041
+ /**
2042
+ Sources that have been used as input to generate the response.
2043
+ For multi-step generation, the sources are accumulated from all steps.
2044
+ */
2045
+ readonly sources: Source[];
2046
+ /**
2047
+ The generated structured output. It uses the `experimental_output` specification.
2048
+ */
2049
+ readonly experimental_output: OUTPUT;
2050
+ /**
2051
+ The tool calls that were made during the generation.
2052
+ */
2053
+ readonly toolCalls: ToolCallArray<TOOLS>;
2054
+ /**
2055
+ The results of the tool calls.
2056
+ */
2057
+ readonly toolResults: ToolResultArray<TOOLS>;
2058
+ /**
2059
+ The reason why the generation finished.
2060
+ */
2061
+ readonly finishReason: FinishReason;
2062
+ /**
2063
+ The token usage of the generated text.
2064
+ */
2065
+ readonly usage: LanguageModelUsage;
2066
+ /**
2067
+ Warnings from the model provider (e.g. unsupported settings)
2068
+ */
2069
+ readonly warnings: CallWarning[] | undefined;
2070
+ /**
2071
+ Details for all steps.
2072
+ You can use this to get information about intermediate steps,
2073
+ such as the tool calls or the response headers.
2074
+ */
2075
+ readonly steps: Array<StepResult<TOOLS>>;
2076
+ /**
2077
+ Additional request information.
2078
+ */
2079
+ readonly request: LanguageModelRequestMetadata;
2080
+ /**
2081
+ Additional response information.
2082
+ */
2083
+ readonly response: LanguageModelResponseMetadata & {
2084
+ /**
2085
+ The response messages that were generated during the call. It consists of an assistant message,
2086
+ potentially containing tool calls.
2087
+
2088
+ When there are tool results, there is an additional tool message with the tool results that are available.
2089
+ If there are tools that do not have execute functions, they are not included in the tool results and
2090
+ need to be added separately.
2091
+ */
2092
+ messages: Array<ResponseMessage>;
2093
+ /**
2094
+ Response body (available only for providers that use HTTP requests).
2095
+ */
2096
+ body?: unknown;
2097
+ };
2098
+ /**
2099
+ Logprobs for the completion.
2100
+ `undefined` if the mode does not support logprobs or if it was not enabled.
2101
+
2102
+ @deprecated Will become a provider extension in the future.
2103
+ */
2104
+ readonly logprobs: LogProbs | undefined;
2105
+ /**
2106
+ Additional provider-specific metadata. They are passed through
2107
+ from the provider to the AI SDK and enable provider-specific
2108
+ results that can be fully encapsulated in the provider.
2109
+ */
2110
+ readonly providerMetadata: ProviderMetadata | undefined;
2111
+ /**
2112
+ @deprecated Use `providerMetadata` instead.
2113
+ */
2114
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
2115
+ }
2116
+
2117
+ interface Output<OUTPUT, PARTIAL> {
2118
+ readonly type: 'object' | 'text';
2119
+ injectIntoSystemPrompt(options: {
2120
+ system: string | undefined;
2121
+ model: LanguageModel;
2122
+ }): string | undefined;
2123
+ responseFormat: (options: {
2124
+ model: LanguageModel;
2125
+ }) => LanguageModelV1CallOptions['responseFormat'];
2126
+ parsePartial(options: {
2127
+ text: string;
2128
+ }): {
2129
+ partial: PARTIAL;
2130
+ } | undefined;
2131
+ parseOutput(options: {
2132
+ text: string;
2133
+ }, context: {
2134
+ response: LanguageModelResponseMetadata;
2135
+ usage: LanguageModelUsage;
2136
+ }): OUTPUT;
2137
+ }
2138
+ declare const text: () => Output<string, string>;
2139
+ declare const object: <OUTPUT>({ schema: inputSchema, }: {
2140
+ schema: z.Schema<OUTPUT, z.ZodTypeDef, any> | Schema<OUTPUT>;
2141
+ }) => Output<OUTPUT, DeepPartial<OUTPUT>>;
2142
+
2143
+ type output_Output<OUTPUT, PARTIAL> = Output<OUTPUT, PARTIAL>;
2144
+ declare const output_object: typeof object;
2145
+ declare const output_text: typeof text;
2146
+ declare namespace output {
2147
+ export {
2148
+ output_Output as Output,
2149
+ output_object as object,
2150
+ output_text as text,
2151
+ };
2152
+ }
2153
+
2154
+ declare const symbol$f: unique symbol;
2155
+ declare class InvalidToolArgumentsError extends AISDKError {
2156
+ private readonly [symbol$f];
2157
+ readonly toolName: string;
2158
+ readonly toolArgs: string;
2159
+ constructor({ toolArgs, toolName, cause, message, }: {
2160
+ message?: string;
2161
+ toolArgs: string;
2162
+ toolName: string;
2163
+ cause: unknown;
2164
+ });
2165
+ static isInstance(error: unknown): error is InvalidToolArgumentsError;
2166
+ }
2167
+
2168
+ declare const symbol$e: unique symbol;
2169
+ declare class NoSuchToolError extends AISDKError {
2170
+ private readonly [symbol$e];
2171
+ readonly toolName: string;
2172
+ readonly availableTools: string[] | undefined;
2173
+ constructor({ toolName, availableTools, message, }: {
2174
+ toolName: string;
2175
+ availableTools?: string[] | undefined;
2176
+ message?: string;
2177
+ });
2178
+ static isInstance(error: unknown): error is NoSuchToolError;
2179
+ }
2180
+
2181
+ /**
2182
+ * Appends a client message to the messages array.
2183
+ * If the last message in the array has the same id as the new message, it will be replaced.
2184
+ * Otherwise, the new message will be appended.
2185
+ */
2186
+ declare function appendClientMessage({ messages, message, }: {
2187
+ messages: Message[];
2188
+ message: Message;
2189
+ }): Message[];
2190
+
2191
+ /**
2192
+ * Appends the ResponseMessage[] from the response to a Message[] (for useChat).
2193
+ * The messages are converted to Messages before being appended.
2194
+ * Timestamps are generated for the new messages.
2195
+ *
2196
+ * @returns A new Message[] with the response messages appended.
2197
+ */
2198
+ declare function appendResponseMessages({ messages, responseMessages, _internal: { currentDate }, }: {
2199
+ messages: Message[];
2200
+ responseMessages: ResponseMessage[];
2201
+ /**
2202
+ Internal. For test use only. May change without notice.
2203
+ */
2204
+ _internal?: {
2205
+ currentDate?: () => Date;
2206
+ };
2207
+ }): Message[];
2208
+
2209
+ /**
2210
+ Converts an array of messages from useChat into an array of CoreMessages that can be used
2211
+ with the AI core functions (e.g. `streamText`).
2212
+ */
2213
+ declare function convertToCoreMessages<TOOLS extends ToolSet = never>(messages: Array<Omit<Message, 'id'>>, options?: {
2214
+ tools?: TOOLS;
2215
+ }): CoreMessage[];
2216
+
2217
+ /**
2218
+ * A function that attempts to repair a tool call that failed to parse.
2219
+ *
2220
+ * It receives the error and the context as arguments and returns the repair
2221
+ * tool call JSON as text.
2222
+ *
2223
+ * @param options.system - The system prompt.
2224
+ * @param options.messages - The messages in the current generation step.
2225
+ * @param options.toolCall - The tool call that failed to parse.
2226
+ * @param options.tools - The tools that are available.
2227
+ * @param options.parameterSchema - A function that returns the JSON Schema for a tool.
2228
+ * @param options.error - The error that occurred while parsing the tool call.
2229
+ */
2230
+ type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
2231
+ system: string | undefined;
2232
+ messages: CoreMessage[];
2233
+ toolCall: LanguageModelV1FunctionToolCall;
2234
+ tools: TOOLS;
2235
+ parameterSchema: (options: {
2236
+ toolName: string;
2237
+ }) => JSONSchema7;
2238
+ error: NoSuchToolError | InvalidToolArgumentsError;
2239
+ }) => Promise<LanguageModelV1FunctionToolCall | null>;
2240
+
2241
+ /**
2242
+ Callback that is set using the `onStepFinish` option.
2243
+
2244
+ @param stepResult - The result of the step.
2245
+ */
2246
+ type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
2247
+ /**
2248
+ Generate a text and call tools for a given prompt using a language model.
2249
+
2250
+ This function does not stream the output. If you want to stream the output, use `streamText` instead.
2251
+
2252
+ @param model - The language model to use.
2253
+
2254
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
2255
+ @param toolChoice - The tool choice strategy. Default: 'auto'.
2256
+
2257
+ @param system - A system message that will be part of the prompt.
2258
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2259
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2260
+
2261
+ @param maxTokens - Maximum number of tokens to generate.
2262
+ @param temperature - Temperature setting.
2263
+ The value is passed through to the provider. The range depends on the provider and model.
2264
+ It is recommended to set either `temperature` or `topP`, but not both.
2265
+ @param topP - Nucleus sampling.
2266
+ The value is passed through to the provider. The range depends on the provider and model.
2267
+ It is recommended to set either `temperature` or `topP`, but not both.
2268
+ @param topK - Only sample from the top K options for each subsequent token.
2269
+ Used to remove "long tail" low probability responses.
2270
+ Recommended for advanced use cases only. You usually only need to use temperature.
2271
+ @param presencePenalty - Presence penalty setting.
2272
+ It affects the likelihood of the model to repeat information that is already in the prompt.
2273
+ The value is passed through to the provider. The range depends on the provider and model.
2274
+ @param frequencyPenalty - Frequency penalty setting.
2275
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
2276
+ The value is passed through to the provider. The range depends on the provider and model.
2277
+ @param stopSequences - Stop sequences.
2278
+ If set, the model will stop generating text when one of the stop sequences is generated.
2279
+ @param seed - The seed (integer) to use for random sampling.
2280
+ If set and supported by the model, calls will generate deterministic results.
2281
+
2282
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2283
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
2284
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2285
+
2286
+ @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
2287
+ @param experimental_generateMessageId - Generate a unique ID for each message.
2288
+
2289
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2290
+
2291
+ @returns
2292
+ A result object that contains the generated text, the results of the tool calls, and additional information.
2293
+ */
2294
+ declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
2295
+ /**
2296
+ The language model to use.
2297
+ */
2298
+ model: LanguageModel;
2299
+ /**
2300
+ The tools that the model can call. The model needs to support calling tools.
2301
+ */
2302
+ tools?: TOOLS;
2303
+ /**
2304
+ The tool choice strategy. Default: 'auto'.
2305
+ */
2306
+ toolChoice?: ToolChoice<TOOLS>;
2307
+ /**
2308
+ Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
2309
+
2310
+ A maximum number is required to prevent infinite loops in the case of misconfigured tools.
2311
+
2312
+ By default, it's set to 1, which means that only a single LLM call is made.
2313
+ */
2314
+ maxSteps?: number;
2315
+ /**
2316
+ Generate a unique ID for each message.
2317
+ */
2318
+ experimental_generateMessageId?: IDGenerator;
2319
+ /**
2320
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
2321
+
2322
+ By default, it's set to false.
2323
+ */
2324
+ experimental_continueSteps?: boolean;
2325
+ /**
2326
+ Optional telemetry configuration (experimental).
2327
+ */
2328
+ experimental_telemetry?: TelemetrySettings;
2329
+ /**
2330
+ Additional provider-specific options. They are passed through
2331
+ to the provider from the AI SDK and enable provider-specific
2332
+ functionality that can be fully encapsulated in the provider.
2333
+ */
2334
+ providerOptions?: ProviderOptions;
2335
+ /**
2336
+ @deprecated Use `providerOptions` instead.
2337
+ */
2338
+ experimental_providerMetadata?: ProviderMetadata;
2339
+ /**
2340
+ Limits the tools that are available for the model to call without
2341
+ changing the tool call and result types in the result.
2342
+ */
2343
+ experimental_activeTools?: Array<keyof TOOLS>;
2344
+ /**
2345
+ Optional specification for parsing structured outputs from the LLM response.
2346
+ */
2347
+ experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
2348
+ /**
2349
+ A function that attempts to repair a tool call that failed to parse.
2350
+ */
2351
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
2352
+ /**
2353
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
2354
+ */
2355
+ onStepFinish?: GenerateTextOnStepFinishCallback<TOOLS>;
2356
+ /**
2357
+ * Internal. For test use only. May change without notice.
2358
+ */
2359
+ _internal?: {
2360
+ generateId?: IDGenerator;
2361
+ currentDate?: () => Date;
2362
+ };
2363
+ }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
2364
+
2365
+ /**
2366
+ * A stream wrapper to send custom JSON-encoded data back to the client.
2367
+ *
2368
+ * @deprecated Please use `createDataStream`, `createDataStreamResponse`, and `pipeDataStreamToResponse` instead.
2369
+ */
2370
+ declare class StreamData {
2371
+ private encoder;
2372
+ private controller;
2373
+ stream: ReadableStream<Uint8Array>;
2374
+ private isClosed;
2375
+ private warningTimeout;
2376
+ constructor();
2377
+ close(): Promise<void>;
2378
+ append(value: JSONValue$1): void;
2379
+ appendMessageAnnotation(value: JSONValue$1): void;
2380
+ }
2381
+
2382
+ type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
2383
+
2384
+ type DataStreamOptions = {
2385
+ /**
2386
+ * Send usage parts to the client.
2387
+ * Default to true.
2388
+ */
2389
+ sendUsage?: boolean;
2390
+ /**
2391
+ * Send reasoning parts to the client.
2392
+ * Default to false.
2393
+ */
2394
+ sendReasoning?: boolean;
2395
+ /**
2396
+ * Send source parts to the client.
2397
+ * Default to false.
2398
+ */
2399
+ sendSources?: boolean;
2400
+ /**
2401
+ * Send the finish event to the client.
2402
+ * Set to false if you are using additional streamText calls
2403
+ * that send additional data.
2404
+ * Default to true.
2405
+ */
2406
+ experimental_sendFinish?: boolean;
2407
+ /**
2408
+ * Send the message start event to the client.
2409
+ * Set to false if you are using additional streamText calls
2410
+ * and the message start event has already been sent.
2411
+ * Default to true.
2412
+ *
2413
+ * Note: this setting is currently not used, but you should
2414
+ * already set it to false if you are using additional
2415
+ * streamText calls that send additional data to prevent
2416
+ * the message start event from being sent multiple times.
2417
+ */
2418
+ experimental_sendStart?: boolean;
2419
+ };
2420
+ /**
2421
+ A result object for accessing different stream types and additional information.
2422
+ */
2423
+ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
2424
+ /**
2425
+ Warnings from the model provider (e.g. unsupported settings) for the first step.
2426
+ */
2427
+ readonly warnings: Promise<CallWarning[] | undefined>;
2428
+ /**
2429
+ The total token usage of the generated response.
2430
+ When there are multiple steps, the usage is the sum of all step usages.
2431
+
2432
+ Resolved when the response is finished.
2433
+ */
2434
+ readonly usage: Promise<LanguageModelUsage>;
2435
+ /**
2436
+ Sources that have been used as input to generate the response.
2437
+ For multi-step generation, the sources are accumulated from all steps.
2438
+
2439
+ Resolved when the response is finished.
2440
+ */
2441
+ readonly sources: Promise<Source[]>;
2442
+ /**
2443
+ Files that have been generated by the model in the last step.
2444
+
2445
+ Resolved when the response is finished.
2446
+ */
2447
+ readonly files: Promise<GeneratedFile[]>;
2448
+ /**
2449
+ The reason why the generation finished. Taken from the last step.
2450
+
2451
+ Resolved when the response is finished.
2452
+ */
2453
+ readonly finishReason: Promise<FinishReason>;
2454
+ /**
2455
+ Additional provider-specific metadata from the last step.
2456
+ Metadata is passed through from the provider to the AI SDK and
2457
+ enables provider-specific results that can be fully encapsulated in the provider.
2458
+ */
2459
+ readonly providerMetadata: Promise<ProviderMetadata | undefined>;
2460
+ /**
2461
+ @deprecated Use `providerMetadata` instead.
2462
+ */
2463
+ readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
2464
+ /**
2465
+ The full text that has been generated by the last step.
2466
+
2467
+ Resolved when the response is finished.
2468
+ */
2469
+ readonly text: Promise<string>;
2470
+ /**
2471
+ The reasoning that has been generated by the last step.
2472
+
2473
+ Resolved when the response is finished.
2474
+ */
2475
+ readonly reasoning: Promise<string | undefined>;
2476
+ /**
2477
+ The full reasoning that the model has generated.
2478
+
2479
+ Resolved when the response is finished.
2480
+ */
2481
+ readonly reasoningDetails: Promise<Array<ReasoningDetail>>;
2482
+ /**
2483
+ The tool calls that have been executed in the last step.
2484
+
2485
+ Resolved when the response is finished.
2486
+ */
2487
+ readonly toolCalls: Promise<ToolCallUnion<TOOLS>[]>;
2488
+ /**
2489
+ The tool results that have been generated in the last step.
2490
+
2491
+ Resolved when the all tool executions are finished.
2492
+ */
2493
+ readonly toolResults: Promise<ToolResultUnion<TOOLS>[]>;
2494
+ /**
2495
+ Details for all steps.
2496
+ You can use this to get information about intermediate steps,
2497
+ such as the tool calls or the response headers.
2498
+ */
2499
+ readonly steps: Promise<Array<StepResult<TOOLS>>>;
2500
+ /**
2501
+ Additional request information from the last step.
2502
+ */
2503
+ readonly request: Promise<LanguageModelRequestMetadata>;
2504
+ /**
2505
+ Additional response information from the last step.
2506
+ */
2507
+ readonly response: Promise<LanguageModelResponseMetadata & {
2508
+ /**
2509
+ The response messages that were generated during the call. It consists of an assistant message,
2510
+ potentially containing tool calls.
2511
+
2512
+ When there are tool results, there is an additional tool message with the tool results that are available.
2513
+ If there are tools that do not have execute functions, they are not included in the tool results and
2514
+ need to be added separately.
2515
+ */
2516
+ messages: Array<ResponseMessage>;
2517
+ }>;
2518
+ /**
2519
+ A text stream that returns only the generated text deltas. You can use it
2520
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
2521
+ stream will throw the error.
2522
+ */
2523
+ readonly textStream: AsyncIterableStream<string>;
2524
+ /**
2525
+ A stream with all events, including text deltas, tool calls, tool results, and
2526
+ errors.
2527
+ You can use it as either an AsyncIterable or a ReadableStream.
2528
+ Only errors that stop the stream, such as network errors, are thrown.
2529
+ */
2530
+ readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
2531
+ /**
2532
+ A stream of partial outputs. It uses the `experimental_output` specification.
2533
+ */
2534
+ readonly experimental_partialOutputStream: AsyncIterableStream<PARTIAL_OUTPUT>;
2535
+ /**
2536
+ Consumes the stream without processing the parts.
2537
+ This is useful to force the stream to finish.
2538
+ It effectively removes the backpressure and allows the stream to finish,
2539
+ triggering the `onFinish` callback and the promise resolution.
2540
+ */
2541
+ consumeStream(): Promise<void>;
2542
+ /**
2543
+ Converts the result to a data stream.
2544
+
2545
+ @param data an optional StreamData object that will be merged into the stream.
2546
+ @param getErrorMessage an optional function that converts an error to an error message.
2547
+ @param sendUsage whether to send the usage information to the client. Defaults to true.
2548
+ @param sendReasoning whether to send the reasoning information to the client. Defaults to false.
2549
+ @return A data stream.
2550
+ */
2551
+ toDataStream(options?: {
2552
+ data?: StreamData;
2553
+ getErrorMessage?: (error: unknown) => string;
2554
+ } & DataStreamOptions): ReadableStream<Uint8Array>;
2555
+ /**
2556
+ * Merges the result as a data stream into another data stream.
2557
+ *
2558
+ * @param dataStream A data stream writer.
2559
+ * @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
2560
+ * @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
2561
+ */
2562
+ mergeIntoDataStream(dataStream: DataStreamWriter, options?: DataStreamOptions): void;
2563
+ /**
2564
+ Writes data stream output to a Node.js response-like object.
2565
+
2566
+ @param response A Node.js response-like object (ServerResponse).
2567
+ @param options.status The status code.
2568
+ @param options.statusText The status text.
2569
+ @param options.headers The headers.
2570
+ @param options.data The stream data.
2571
+ @param options.getErrorMessage An optional function that converts an error to an error message.
2572
+ @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
2573
+ @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
2574
+ */
2575
+ pipeDataStreamToResponse(response: ServerResponse, options?: ResponseInit & {
2576
+ data?: StreamData;
2577
+ getErrorMessage?: (error: unknown) => string;
2578
+ } & DataStreamOptions): void;
2579
+ /**
2580
+ Writes text delta output to a Node.js response-like object.
2581
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
2582
+ writes each text delta as a separate chunk.
2583
+
2584
+ @param response A Node.js response-like object (ServerResponse).
2585
+ @param init Optional headers, status code, and status text.
2586
+ */
2587
+ pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
2588
+ /**
2589
+ Converts the result to a streamed response object with a stream data part stream.
2590
+ It can be used with the `useChat` and `useCompletion` hooks.
2591
+
2592
+ @param options.status The status code.
2593
+ @param options.statusText The status text.
2594
+ @param options.headers The headers.
2595
+ @param options.data The stream data.
2596
+ @param options.getErrorMessage An optional function that converts an error to an error message.
2597
+ @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
2598
+ @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
2599
+
2600
+ @return A response object.
2601
+ */
2602
+ toDataStreamResponse(options?: ResponseInit & {
2603
+ data?: StreamData;
2604
+ getErrorMessage?: (error: unknown) => string;
2605
+ } & DataStreamOptions): Response;
2606
+ /**
2607
+ Creates a simple text stream response.
2608
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
2609
+ Non-text-delta events are ignored.
2610
+
2611
+ @param init Optional headers, status code, and status text.
2612
+ */
2613
+ toTextStreamResponse(init?: ResponseInit): Response;
2461
2614
  }
2462
- declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
2463
- /**
2464
- * A lightweight MCP Client implementation
2465
- *
2466
- * The primary purpose of this client is tool conversion between MCP<>AI SDK
2467
- * but can later be extended to support other MCP features
2468
- *
2469
- * Tool parameters are automatically inferred from the server's JSON schema
2470
- * if not explicitly provided in the tools configuration
2471
- *
2472
- * Not supported:
2473
- * - Client options (e.g. sampling, roots) as they are not needed for tool conversion
2474
- * - Accepting notifications
2475
- */
2476
- declare class MCPClient {
2477
- private transport;
2478
- private onUncaughtError?;
2479
- private clientInfo;
2480
- private requestMessageId;
2481
- private responseHandlers;
2482
- private serverCapabilities;
2483
- private isClosed;
2484
- constructor({ transport: transportConfig, name, onUncaughtError, }: MCPClientConfig);
2485
- init(): Promise<this>;
2486
- close(): Promise<void>;
2487
- private request;
2488
- private listTools;
2489
- private callTool;
2490
- private notification;
2615
+ type TextStreamPart<TOOLS extends ToolSet> = {
2616
+ type: 'text-delta';
2617
+ textDelta: string;
2618
+ } | {
2619
+ type: 'reasoning';
2620
+ textDelta: string;
2621
+ } | {
2622
+ type: 'reasoning-signature';
2623
+ signature: string;
2624
+ } | {
2625
+ type: 'redacted-reasoning';
2626
+ data: string;
2627
+ } | {
2628
+ type: 'source';
2629
+ source: Source;
2630
+ } | ({
2631
+ type: 'file';
2632
+ } & GeneratedFile) | ({
2633
+ type: 'tool-call';
2634
+ } & ToolCallUnion<TOOLS>) | {
2635
+ type: 'tool-call-streaming-start';
2636
+ toolCallId: string;
2637
+ toolName: string;
2638
+ } | {
2639
+ type: 'tool-call-delta';
2640
+ toolCallId: string;
2641
+ toolName: string;
2642
+ argsTextDelta: string;
2643
+ } | ({
2644
+ type: 'tool-result';
2645
+ } & ToolResultUnion<TOOLS>) | {
2646
+ type: 'step-start';
2647
+ messageId: string;
2648
+ request: LanguageModelRequestMetadata;
2649
+ warnings: CallWarning[];
2650
+ } | {
2651
+ type: 'step-finish';
2652
+ messageId: string;
2653
+ logprobs?: LogProbs;
2654
+ request: LanguageModelRequestMetadata;
2655
+ warnings: CallWarning[] | undefined;
2656
+ response: LanguageModelResponseMetadata;
2657
+ usage: LanguageModelUsage;
2658
+ finishReason: FinishReason;
2659
+ providerMetadata: ProviderMetadata | undefined;
2660
+ /**
2661
+ * @deprecated Use `providerMetadata` instead.
2662
+ */
2663
+ experimental_providerMetadata?: ProviderMetadata;
2664
+ isContinued: boolean;
2665
+ } | {
2666
+ type: 'finish';
2667
+ finishReason: FinishReason;
2668
+ usage: LanguageModelUsage;
2669
+ providerMetadata: ProviderMetadata | undefined;
2670
+ /**
2671
+ * @deprecated Use `providerMetadata` instead.
2672
+ */
2673
+ experimental_providerMetadata?: ProviderMetadata;
2674
+ /**
2675
+ * @deprecated will be moved into provider metadata
2676
+ */
2677
+ logprobs?: LogProbs;
2678
+ /**
2679
+ * @deprecated use response on step-finish instead
2680
+ */
2681
+ response: LanguageModelResponseMetadata;
2682
+ } | {
2683
+ type: 'error';
2684
+ error: unknown;
2685
+ };
2686
+
2687
+ /**
2688
+ * Smooths text streaming output.
2689
+ *
2690
+ * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
2691
+ * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
2692
+ *
2693
+ * @returns A transform stream that smooths text streaming output.
2694
+ */
2695
+ declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
2696
+ delayInMs?: number | null;
2697
+ chunking?: 'word' | 'line' | RegExp;
2491
2698
  /**
2492
- * Returns a set of AI SDK tools from the MCP server
2493
- * @returns A record of tool names to their implementations
2699
+ * Internal. For test use only. May change without notice.
2494
2700
  */
2495
- tools<TOOL_SCHEMAS extends ToolSchemas = 'automatic'>({ schemas, }?: {
2496
- schemas?: TOOL_SCHEMAS;
2497
- }): Promise<McpToolSet<TOOL_SCHEMAS>>;
2498
- private onClose;
2499
- private onError;
2500
- private onResponse;
2501
- }
2701
+ _internal?: {
2702
+ delay?: (delayInMs: number | null) => Promise<void>;
2703
+ };
2704
+ }): (options: {
2705
+ tools: TOOLS;
2706
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
2502
2707
 
2503
- type ToolSet = Record<string, Tool>;
2708
+ /**
2709
+ A transformation that is applied to the stream.
2504
2710
 
2505
- type ToolCallUnion<TOOLS extends ToolSet> = ValueOf<{
2506
- [NAME in keyof TOOLS]: {
2507
- type: 'tool-call';
2508
- toolCallId: string;
2509
- toolName: NAME & string;
2510
- args: inferParameters<TOOLS[NAME]['parameters']>;
2511
- };
2512
- }>;
2711
+ @param stopStream - A function that stops the source stream.
2712
+ @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
2713
+ */
2714
+ type StreamTextTransform<TOOLS extends ToolSet> = (options: {
2715
+ tools: TOOLS;
2716
+ stopStream: () => void;
2717
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
2513
2718
  /**
2514
- * @deprecated Use `ToolCallUnion` instead.
2719
+ Callback that is set using the `onError` option.
2720
+
2721
+ @param event - The event that is passed to the callback.
2515
2722
  */
2516
- type CoreToolCallUnion<TOOLS extends ToolSet> = ToolCallUnion<ToolSet>;
2517
- type ToolCallArray<TOOLS extends ToolSet> = Array<ToolCallUnion<TOOLS>>;
2723
+ type StreamTextOnErrorCallback = (event: {
2724
+ error: unknown;
2725
+ }) => Promise<void> | void;
2726
+ /**
2727
+ Callback that is set using the `onStepFinish` option.
2518
2728
 
2519
- type ToToolsWithExecute<TOOLS extends ToolSet> = {
2520
- [K in keyof TOOLS as TOOLS[K] extends {
2521
- execute: any;
2522
- } ? K : never]: TOOLS[K];
2523
- };
2524
- type ToToolsWithDefinedExecute<TOOLS extends ToolSet> = {
2525
- [K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
2526
- };
2527
- type ToToolResultObject<TOOLS extends ToolSet> = ValueOf<{
2528
- [NAME in keyof TOOLS]: {
2529
- type: 'tool-result';
2530
- toolCallId: string;
2531
- toolName: NAME & string;
2532
- args: inferParameters<TOOLS[NAME]['parameters']>;
2533
- result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
2534
- };
2535
- }>;
2536
- type ToolResultUnion<TOOLS extends ToolSet> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
2729
+ @param stepResult - The result of the step.
2730
+ */
2731
+ type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
2537
2732
  /**
2538
- * @deprecated Use `ToolResultUnion` instead.
2733
+ Callback that is set using the `onChunk` option.
2734
+
2735
+ @param event - The event that is passed to the callback.
2539
2736
  */
2540
- type CoreToolResultUnion<TOOLS extends ToolSet> = ToolResultUnion<TOOLS>;
2541
- type ToolResultArray<TOOLS extends ToolSet> = Array<ToolResultUnion<TOOLS>>;
2737
+ type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
2738
+ chunk: Extract<TextStreamPart<TOOLS>, {
2739
+ type: 'text-delta' | 'reasoning' | 'source' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
2740
+ }>;
2741
+ }) => Promise<void> | void;
2742
+ /**
2743
+ Callback that is set using the `onFinish` option.
2542
2744
 
2745
+ @param event - The event that is passed to the callback.
2746
+ */
2747
+ type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: Omit<StepResult<TOOLS>, 'stepType' | 'isContinued'> & {
2748
+ /**
2749
+ Details for all steps.
2750
+ */
2751
+ readonly steps: StepResult<TOOLS>[];
2752
+ }) => Promise<void> | void;
2543
2753
  /**
2544
- A message that was generated during the generation process.
2545
- It can be either an assistant message or a tool message.
2754
+ Generate a text and call tools for a given prompt using a language model.
2755
+
2756
+ This function streams the output. If you do not want to stream the output, use `generateText` instead.
2757
+
2758
+ @param model - The language model to use.
2759
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
2760
+
2761
+ @param system - A system message that will be part of the prompt.
2762
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2763
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2764
+
2765
+ @param maxTokens - Maximum number of tokens to generate.
2766
+ @param temperature - Temperature setting.
2767
+ The value is passed through to the provider. The range depends on the provider and model.
2768
+ It is recommended to set either `temperature` or `topP`, but not both.
2769
+ @param topP - Nucleus sampling.
2770
+ The value is passed through to the provider. The range depends on the provider and model.
2771
+ It is recommended to set either `temperature` or `topP`, but not both.
2772
+ @param topK - Only sample from the top K options for each subsequent token.
2773
+ Used to remove "long tail" low probability responses.
2774
+ Recommended for advanced use cases only. You usually only need to use temperature.
2775
+ @param presencePenalty - Presence penalty setting.
2776
+ It affects the likelihood of the model to repeat information that is already in the prompt.
2777
+ The value is passed through to the provider. The range depends on the provider and model.
2778
+ @param frequencyPenalty - Frequency penalty setting.
2779
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
2780
+ The value is passed through to the provider. The range depends on the provider and model.
2781
+ @param stopSequences - Stop sequences.
2782
+ If set, the model will stop generating text when one of the stop sequences is generated.
2783
+ @param seed - The seed (integer) to use for random sampling.
2784
+ If set and supported by the model, calls will generate deterministic results.
2785
+
2786
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2787
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
2788
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2789
+
2790
+ @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
2791
+ @param experimental_generateMessageId - Generate a unique ID for each message.
2792
+
2793
+ @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
2794
+ @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
2795
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2796
+ @param onFinish - Callback that is called when the LLM response and all request tool executions
2797
+ (for tools that have an `execute` function) are finished.
2798
+
2799
+ @return
2800
+ A result object for accessing different stream types and additional information.
2546
2801
  */
2547
- type ResponseMessage = (CoreAssistantMessage | CoreToolMessage) & {
2802
+ declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
2548
2803
  /**
2549
- Message ID generated by the AI SDK.
2804
+ The language model to use.
2550
2805
  */
2551
- id: string;
2552
- };
2553
- /**
2554
- * The result of a single step in the generation process.
2806
+ model: LanguageModel;
2807
+ /**
2808
+ The tools that the model can call. The model needs to support calling tools.
2809
+ */
2810
+ tools?: TOOLS;
2811
+ /**
2812
+ The tool choice strategy. Default: 'auto'.
2813
+ */
2814
+ toolChoice?: ToolChoice<TOOLS>;
2815
+ /**
2816
+ Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
2817
+
2818
+ A maximum number is required to prevent infinite loops in the case of misconfigured tools.
2819
+
2820
+ By default, it's set to 1, which means that only a single LLM call is made.
2555
2821
  */
2556
- type StepResult<TOOLS extends ToolSet> = {
2822
+ maxSteps?: number;
2557
2823
  /**
2558
- The generated text.
2559
- */
2560
- readonly text: string;
2824
+ Generate a unique ID for each message.
2825
+ */
2826
+ experimental_generateMessageId?: IDGenerator;
2561
2827
  /**
2562
- The reasoning that was generated during the generation.
2563
- */
2564
- readonly reasoning: string | undefined;
2565
- readonly reasoningDetails: Array<ReasoningDetail>;
2828
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
2829
+
2830
+ By default, it's set to false.
2831
+ */
2832
+ experimental_continueSteps?: boolean;
2566
2833
  /**
2567
- The sources that were used to generate the text.
2568
- */
2569
- readonly sources: Source[];
2834
+ Optional telemetry configuration (experimental).
2835
+ */
2836
+ experimental_telemetry?: TelemetrySettings;
2570
2837
  /**
2571
- The tool calls that were made during the generation.
2572
- */
2573
- readonly toolCalls: ToolCallArray<TOOLS>;
2838
+ Additional provider-specific options. They are passed through
2839
+ to the provider from the AI SDK and enable provider-specific
2840
+ functionality that can be fully encapsulated in the provider.
2841
+ */
2842
+ providerOptions?: ProviderOptions;
2574
2843
  /**
2575
- The results of the tool calls.
2576
- */
2577
- readonly toolResults: ToolResultArray<TOOLS>;
2844
+ @deprecated Use `providerOptions` instead.
2845
+ */
2846
+ experimental_providerMetadata?: ProviderMetadata;
2578
2847
  /**
2579
- The reason why the generation finished.
2580
- */
2581
- readonly finishReason: FinishReason;
2848
+ Limits the tools that are available for the model to call without
2849
+ changing the tool call and result types in the result.
2850
+ */
2851
+ experimental_activeTools?: Array<keyof TOOLS>;
2582
2852
  /**
2583
- The token usage of the generated text.
2584
- */
2585
- readonly usage: LanguageModelUsage;
2853
+ Optional specification for parsing structured outputs from the LLM response.
2854
+ */
2855
+ experimental_output?: Output<OUTPUT, PARTIAL_OUTPUT>;
2586
2856
  /**
2587
- Warnings from the model provider (e.g. unsupported settings).
2588
- */
2589
- readonly warnings: CallWarning[] | undefined;
2857
+ A function that attempts to repair a tool call that failed to parse.
2858
+ */
2859
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
2590
2860
  /**
2591
- Logprobs for the completion.
2592
- `undefined` if the mode does not support logprobs or if was not enabled.
2593
- */
2594
- readonly logprobs: LogProbs | undefined;
2861
+ Enable streaming of tool call deltas as they are generated. Disabled by default.
2862
+ */
2863
+ toolCallStreaming?: boolean;
2864
+ /**
2865
+ @deprecated Use `toolCallStreaming` instead.
2866
+ */
2867
+ experimental_toolCallStreaming?: boolean;
2868
+ /**
2869
+ Optional stream transformations.
2870
+ They are applied in the order they are provided.
2871
+ The stream transformations must maintain the stream structure for streamText to work correctly.
2872
+ */
2873
+ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
2874
+ /**
2875
+ Callback that is called for each chunk of the stream.
2876
+ The stream processing will pause until the callback promise is resolved.
2877
+ */
2878
+ onChunk?: StreamTextOnChunkCallback<TOOLS>;
2879
+ /**
2880
+ Callback that is invoked when an error occurs during streaming.
2881
+ You can use it to log errors.
2882
+ The stream processing will pause until the callback promise is resolved.
2883
+ */
2884
+ onError?: StreamTextOnErrorCallback;
2595
2885
  /**
2596
- Additional request information.
2886
+ Callback that is called when the LLM response and all request tool executions
2887
+ (for tools that have an `execute` function) are finished.
2888
+
2889
+ The usage is the combined usage of all steps.
2597
2890
  */
2598
- readonly request: LanguageModelRequestMetadata;
2891
+ onFinish?: StreamTextOnFinishCallback<TOOLS>;
2599
2892
  /**
2600
- Additional response information.
2601
- */
2602
- readonly response: LanguageModelResponseMetadata & {
2603
- /**
2604
- The response messages that were generated during the call.
2605
- Response messages can be either assistant messages or tool messages.
2606
- They contain a generated id.
2893
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
2607
2894
  */
2608
- readonly messages: Array<ResponseMessage>;
2609
- /**
2610
- Response body (available only for providers that use HTTP requests).
2611
- */
2612
- body?: unknown;
2613
- };
2895
+ onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
2614
2896
  /**
2615
- Additional provider-specific metadata. They are passed through
2616
- from the provider to the AI SDK and enable provider-specific
2617
- results that can be fully encapsulated in the provider.
2897
+ Internal. For test use only. May change without notice.
2618
2898
  */
2619
- readonly providerMetadata: ProviderMetadata | undefined;
2899
+ _internal?: {
2900
+ now?: () => number;
2901
+ generateId?: IDGenerator;
2902
+ currentDate?: () => Date;
2903
+ };
2904
+ }): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
2905
+
2906
+ /**
2907
+ The result of a `generateImage` call.
2908
+ It contains the images and additional information.
2909
+ */
2910
+ interface GenerateImageResult {
2620
2911
  /**
2621
- @deprecated Use `providerMetadata` instead.
2912
+ The first image that was generated.
2622
2913
  */
2623
- readonly experimental_providerMetadata: ProviderMetadata | undefined;
2914
+ readonly image: GeneratedFile;
2624
2915
  /**
2625
- The type of step that this result is for. The first step is always
2626
- an "initial" step, and subsequent steps are either "continue" steps
2627
- or "tool-result" steps.
2628
- */
2629
- readonly stepType: 'initial' | 'continue' | 'tool-result';
2916
+ The images that were generated.
2917
+ */
2918
+ readonly images: Array<GeneratedFile>;
2630
2919
  /**
2631
- True when there will be a continuation step with a continuation text.
2920
+ Warnings for the call, e.g. unsupported settings.
2921
+ */
2922
+ readonly warnings: Array<ImageGenerationWarning>;
2923
+ /**
2924
+ Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
2632
2925
  */
2633
- readonly isContinued: boolean;
2634
- };
2926
+ readonly responses: Array<ImageModelResponseMetadata>;
2927
+ }
2635
2928
 
2636
2929
  /**
2637
- The result of a `generateText` call.
2638
- It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
2930
+ Generates images using an image model.
2931
+
2932
+ @param model - The image model to use.
2933
+ @param prompt - The prompt that should be used to generate the image.
2934
+ @param n - Number of images to generate. Default: 1.
2935
+ @param size - Size of the images to generate. Must have the format `{width}x{height}`.
2936
+ @param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
2937
+ @param seed - Seed for the image generation.
2938
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
2939
+ as body parameters.
2940
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2941
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
2942
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2943
+
2944
+ @returns A result object that contains the generated images.
2639
2945
  */
2640
- interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
2946
+ declare function generateImage({ model, prompt, n, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
2641
2947
  /**
2642
- The generated text.
2948
+ The image model to use.
2643
2949
  */
2644
- readonly text: string;
2950
+ model: ImageModelV1;
2645
2951
  /**
2646
- The reasoning text that the model has generated. Can be undefined if the model
2647
- has only generated text.
2952
+ The prompt that should be used to generate the image.
2648
2953
  */
2649
- readonly reasoning: string | undefined;
2954
+ prompt: string;
2650
2955
  /**
2651
- The full reasoning that the model has generated.
2956
+ Number of images to generate.
2652
2957
  */
2653
- readonly reasoningDetails: Array<ReasoningDetail>;
2958
+ n?: number;
2654
2959
  /**
2655
- Sources that have been used as input to generate the response.
2656
- For multi-step generation, the sources are accumulated from all steps.
2960
+ Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
2657
2961
  */
2658
- readonly sources: Source[];
2962
+ size?: `${number}x${number}`;
2659
2963
  /**
2660
- The generated structured output. It uses the `experimental_output` specification.
2964
+ Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
2661
2965
  */
2662
- readonly experimental_output: OUTPUT;
2966
+ aspectRatio?: `${number}:${number}`;
2663
2967
  /**
2664
- The tool calls that were made during the generation.
2968
+ Seed for the image generation. If not provided, the default seed will be used.
2665
2969
  */
2666
- readonly toolCalls: ToolCallArray<TOOLS>;
2970
+ seed?: number;
2667
2971
  /**
2668
- The results of the tool calls.
2972
+ Additional provider-specific options that are passed through to the provider
2973
+ as body parameters.
2974
+
2975
+ The outer record is keyed by the provider name, and the inner
2976
+ record is keyed by the provider-specific metadata key.
2977
+ ```ts
2978
+ {
2979
+ "openai": {
2980
+ "style": "vivid"
2981
+ }
2982
+ }
2983
+ ```
2984
+ */
2985
+ providerOptions?: Record<string, Record<string, JSONValue>>;
2986
+ /**
2987
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
2988
+
2989
+ @default 2
2669
2990
  */
2670
- readonly toolResults: ToolResultArray<TOOLS>;
2991
+ maxRetries?: number;
2992
+ /**
2993
+ Abort signal.
2994
+ */
2995
+ abortSignal?: AbortSignal;
2996
+ /**
2997
+ Additional headers to include in the request.
2998
+ Only applicable for HTTP-based providers.
2999
+ */
3000
+ headers?: Record<string, string>;
3001
+ }): Promise<GenerateImageResult>;
3002
+
3003
+ /**
3004
+ The result of a `generateObject` call.
3005
+ */
3006
+ interface GenerateObjectResult<OBJECT> {
3007
+ /**
3008
+ The generated object (typed according to the schema).
3009
+ */
3010
+ readonly object: OBJECT;
2671
3011
  /**
2672
3012
  The reason why the generation finished.
2673
- */
3013
+ */
2674
3014
  readonly finishReason: FinishReason;
2675
3015
  /**
2676
3016
  The token usage of the generated text.
2677
- */
3017
+ */
2678
3018
  readonly usage: LanguageModelUsage;
2679
3019
  /**
2680
- Warnings from the model provider (e.g. unsupported settings)
2681
- */
3020
+ Warnings from the model provider (e.g. unsupported settings).
3021
+ */
2682
3022
  readonly warnings: CallWarning[] | undefined;
2683
3023
  /**
2684
- Details for all steps.
2685
- You can use this to get information about intermediate steps,
2686
- such as the tool calls or the response headers.
2687
- */
2688
- readonly steps: Array<StepResult<TOOLS>>;
2689
- /**
2690
3024
  Additional request information.
2691
3025
  */
2692
3026
  readonly request: LanguageModelRequestMetadata;
2693
3027
  /**
2694
3028
  Additional response information.
2695
3029
  */
2696
- readonly response: LanguageModelResponseMetadata & {
2697
- /**
2698
- The response messages that were generated during the call. It consists of an assistant message,
2699
- potentially containing tool calls.
2700
-
2701
- When there are tool results, there is an additional tool message with the tool results that are available.
2702
- If there are tools that do not have execute functions, they are not included in the tool results and
2703
- need to be added separately.
2704
- */
2705
- messages: Array<ResponseMessage>;
2706
- /**
2707
- Response body (available only for providers that use HTTP requests).
2708
- */
2709
- body?: unknown;
2710
- };
2711
- /**
2712
- Logprobs for the completion.
2713
- `undefined` if the mode does not support logprobs or if it was not enabled.
2714
-
2715
- @deprecated Will become a provider extension in the future.
2716
- */
2717
- readonly logprobs: LogProbs | undefined;
2718
- /**
2719
- Additional provider-specific metadata. They are passed through
2720
- from the provider to the AI SDK and enable provider-specific
2721
- results that can be fully encapsulated in the provider.
2722
- */
2723
- readonly providerMetadata: ProviderMetadata | undefined;
2724
- /**
2725
- @deprecated Use `providerMetadata` instead.
2726
- */
2727
- readonly experimental_providerMetadata: ProviderMetadata | undefined;
2728
- }
2729
-
2730
- interface Output<OUTPUT, PARTIAL> {
2731
- readonly type: 'object' | 'text';
2732
- injectIntoSystemPrompt(options: {
2733
- system: string | undefined;
2734
- model: LanguageModel;
2735
- }): string | undefined;
2736
- responseFormat: (options: {
2737
- model: LanguageModel;
2738
- }) => LanguageModelV1CallOptions['responseFormat'];
2739
- parsePartial(options: {
2740
- text: string;
2741
- }): {
2742
- partial: PARTIAL;
2743
- } | undefined;
2744
- parseOutput(options: {
2745
- text: string;
2746
- }, context: {
2747
- response: LanguageModelResponseMetadata;
2748
- usage: LanguageModelUsage;
2749
- }): OUTPUT;
2750
- }
2751
- declare const text: () => Output<string, string>;
2752
- declare const object: <OUTPUT>({ schema: inputSchema, }: {
2753
- schema: z.Schema<OUTPUT, z.ZodTypeDef, any> | Schema<OUTPUT>;
2754
- }) => Output<OUTPUT, DeepPartial<OUTPUT>>;
2755
-
2756
- type output_Output<OUTPUT, PARTIAL> = Output<OUTPUT, PARTIAL>;
2757
- declare const output_object: typeof object;
2758
- declare const output_text: typeof text;
2759
- declare namespace output {
2760
- export {
2761
- output_Output as Output,
2762
- output_object as object,
2763
- output_text as text,
2764
- };
2765
- }
2766
-
2767
- declare const symbol$f: unique symbol;
2768
- declare class InvalidToolArgumentsError extends AISDKError {
2769
- private readonly [symbol$f];
2770
- readonly toolName: string;
2771
- readonly toolArgs: string;
2772
- constructor({ toolArgs, toolName, cause, message, }: {
2773
- message?: string;
2774
- toolArgs: string;
2775
- toolName: string;
2776
- cause: unknown;
2777
- });
2778
- static isInstance(error: unknown): error is InvalidToolArgumentsError;
2779
- }
2780
-
2781
- declare const symbol$e: unique symbol;
2782
- declare class NoSuchToolError extends AISDKError {
2783
- private readonly [symbol$e];
2784
- readonly toolName: string;
2785
- readonly availableTools: string[] | undefined;
2786
- constructor({ toolName, availableTools, message, }: {
2787
- toolName: string;
2788
- availableTools?: string[] | undefined;
2789
- message?: string;
2790
- });
2791
- static isInstance(error: unknown): error is NoSuchToolError;
3030
+ readonly response: LanguageModelResponseMetadata & {
3031
+ /**
3032
+ Response body (available only for providers that use HTTP requests).
3033
+ */
3034
+ body?: unknown;
3035
+ };
3036
+ /**
3037
+ Logprobs for the completion.
3038
+ `undefined` if the mode does not support logprobs or if was not enabled.
3039
+
3040
+ @deprecated Will become a provider extension in the future.
3041
+ */
3042
+ readonly logprobs: LogProbs | undefined;
3043
+ /**
3044
+ Additional provider-specific metadata. They are passed through
3045
+ from the provider to the AI SDK and enable provider-specific
3046
+ results that can be fully encapsulated in the provider.
3047
+ */
3048
+ readonly providerMetadata: ProviderMetadata | undefined;
3049
+ /**
3050
+ @deprecated Use `providerMetadata` instead.
3051
+ */
3052
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
3053
+ /**
3054
+ Converts the object to a JSON response.
3055
+ The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
3056
+ */
3057
+ toJsonResponse(init?: ResponseInit): Response;
2792
3058
  }
2793
3059
 
2794
3060
  /**
2795
- * Appends a client message to the messages array.
2796
- * If the last message in the array has the same id as the new message, it will be replaced.
2797
- * Otherwise, the new message will be appended.
2798
- */
2799
- declare function appendClientMessage({ messages, message, }: {
2800
- messages: Message[];
2801
- message: Message;
2802
- }): Message[];
3061
+ A function that attempts to repair the raw output of the mode
3062
+ to enable JSON parsing.
2803
3063
 
3064
+ Should return the repaired text or null if the text cannot be repaired.
3065
+ */
3066
+ type RepairTextFunction = (options: {
3067
+ text: string;
3068
+ error: JSONParseError | TypeValidationError;
3069
+ }) => Promise<string | null>;
2804
3070
  /**
2805
- * Appends the ResponseMessage[] from the response to a Message[] (for useChat).
2806
- * The messages are converted to Messages before being appended.
2807
- * Timestamps are generated for the new messages.
2808
- *
2809
- * @returns A new Message[] with the response messages appended.
3071
+ Generate a structured, typed object for a given prompt and schema using a language model.
3072
+
3073
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
3074
+
3075
+ @returns
3076
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
2810
3077
  */
2811
- declare function appendResponseMessages({ messages, responseMessages, _internal: { currentDate }, }: {
2812
- messages: Message[];
2813
- responseMessages: ResponseMessage[];
3078
+ declare function generateObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3079
+ output?: 'object' | undefined;
2814
3080
  /**
2815
- Internal. For test use only. May change without notice.
2816
- */
2817
- _internal?: {
2818
- currentDate?: () => Date;
2819
- };
2820
- }): Message[];
3081
+ The language model to use.
3082
+ */
3083
+ model: LanguageModel;
3084
+ /**
3085
+ The schema of the object that the model should generate.
3086
+ */
3087
+ schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
3088
+ /**
3089
+ Optional name of the output that should be generated.
3090
+ Used by some providers for additional LLM guidance, e.g.
3091
+ via tool or schema name.
3092
+ */
3093
+ schemaName?: string;
3094
+ /**
3095
+ Optional description of the output that should be generated.
3096
+ Used by some providers for additional LLM guidance, e.g.
3097
+ via tool or schema description.
3098
+ */
3099
+ schemaDescription?: string;
3100
+ /**
3101
+ The mode to use for object generation.
2821
3102
 
2822
- /**
2823
- Converts an array of messages from useChat into an array of CoreMessages that can be used
2824
- with the AI core functions (e.g. `streamText`).
2825
- */
2826
- declare function convertToCoreMessages<TOOLS extends ToolSet = never>(messages: Array<Omit<Message, 'id'>>, options?: {
2827
- tools?: TOOLS;
2828
- }): CoreMessage[];
3103
+ The schema is converted into a JSON schema and used in one of the following ways
2829
3104
 
2830
- /**
2831
- * A function that attempts to repair a tool call that failed to parse.
2832
- *
2833
- * It receives the error and the context as arguments and returns the repair
2834
- * tool call JSON as text.
2835
- *
2836
- * @param options.system - The system prompt.
2837
- * @param options.messages - The messages in the current generation step.
2838
- * @param options.toolCall - The tool call that failed to parse.
2839
- * @param options.tools - The tools that are available.
2840
- * @param options.parameterSchema - A function that returns the JSON Schema for a tool.
2841
- * @param options.error - The error that occurred while parsing the tool call.
2842
- */
2843
- type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
2844
- system: string | undefined;
2845
- messages: CoreMessage[];
2846
- toolCall: LanguageModelV1FunctionToolCall;
2847
- tools: TOOLS;
2848
- parameterSchema: (options: {
2849
- toolName: string;
2850
- }) => JSONSchema7;
2851
- error: NoSuchToolError | InvalidToolArgumentsError;
2852
- }) => Promise<LanguageModelV1FunctionToolCall | null>;
3105
+ - 'auto': The provider will choose the best mode for the model.
3106
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3107
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
2853
3108
 
2854
- /**
2855
- Callback that is set using the `onStepFinish` option.
3109
+ Please note that most providers do not support all modes.
2856
3110
 
2857
- @param stepResult - The result of the step.
2858
- */
2859
- type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
3111
+ Default and recommended: 'auto' (best mode for the model).
3112
+ */
3113
+ mode?: 'auto' | 'json' | 'tool';
3114
+ /**
3115
+ A function that attempts to repair the raw output of the mode
3116
+ to enable JSON parsing.
3117
+ */
3118
+ experimental_repairText?: RepairTextFunction;
3119
+ /**
3120
+ Optional telemetry configuration (experimental).
3121
+ */
3122
+ experimental_telemetry?: TelemetrySettings;
3123
+ /**
3124
+ Additional provider-specific options. They are passed through
3125
+ to the provider from the AI SDK and enable provider-specific
3126
+ functionality that can be fully encapsulated in the provider.
3127
+ */
3128
+ providerOptions?: ProviderOptions;
3129
+ /**
3130
+ @deprecated Use `providerOptions` instead.
3131
+ */
3132
+ experimental_providerMetadata?: ProviderMetadata;
3133
+ /**
3134
+ * Internal. For test use only. May change without notice.
3135
+ */
3136
+ _internal?: {
3137
+ generateId?: () => string;
3138
+ currentDate?: () => Date;
3139
+ };
3140
+ }): Promise<GenerateObjectResult<OBJECT>>;
2860
3141
  /**
2861
- Generate a text and call tools for a given prompt using a language model.
2862
-
2863
- This function does not stream the output. If you want to stream the output, use `streamText` instead.
3142
+ Generate an array with structured, typed elements for a given prompt and element schema using a language model.
2864
3143
 
2865
- @param model - The language model to use.
3144
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
2866
3145
 
2867
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
2868
- @param toolChoice - The tool choice strategy. Default: 'auto'.
3146
+ @return
3147
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
3148
+ */
3149
+ declare function generateObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3150
+ output: 'array';
3151
+ /**
3152
+ The language model to use.
3153
+ */
3154
+ model: LanguageModel;
3155
+ /**
3156
+ The element schema of the array that the model should generate.
3157
+ */
3158
+ schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
3159
+ /**
3160
+ Optional name of the array that should be generated.
3161
+ Used by some providers for additional LLM guidance, e.g.
3162
+ via tool or schema name.
3163
+ */
3164
+ schemaName?: string;
3165
+ /**
3166
+ Optional description of the array that should be generated.
3167
+ Used by some providers for additional LLM guidance, e.g.
3168
+ via tool or schema description.
3169
+ */
3170
+ schemaDescription?: string;
3171
+ /**
3172
+ The mode to use for object generation.
2869
3173
 
2870
- @param system - A system message that will be part of the prompt.
2871
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2872
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
3174
+ The schema is converted into a JSON schema and used in one of the following ways
2873
3175
 
2874
- @param maxTokens - Maximum number of tokens to generate.
2875
- @param temperature - Temperature setting.
2876
- The value is passed through to the provider. The range depends on the provider and model.
2877
- It is recommended to set either `temperature` or `topP`, but not both.
2878
- @param topP - Nucleus sampling.
2879
- The value is passed through to the provider. The range depends on the provider and model.
2880
- It is recommended to set either `temperature` or `topP`, but not both.
2881
- @param topK - Only sample from the top K options for each subsequent token.
2882
- Used to remove "long tail" low probability responses.
2883
- Recommended for advanced use cases only. You usually only need to use temperature.
2884
- @param presencePenalty - Presence penalty setting.
2885
- It affects the likelihood of the model to repeat information that is already in the prompt.
2886
- The value is passed through to the provider. The range depends on the provider and model.
2887
- @param frequencyPenalty - Frequency penalty setting.
2888
- It affects the likelihood of the model to repeatedly use the same words or phrases.
2889
- The value is passed through to the provider. The range depends on the provider and model.
2890
- @param stopSequences - Stop sequences.
2891
- If set, the model will stop generating text when one of the stop sequences is generated.
2892
- @param seed - The seed (integer) to use for random sampling.
2893
- If set and supported by the model, calls will generate deterministic results.
3176
+ - 'auto': The provider will choose the best mode for the model.
3177
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3178
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
2894
3179
 
2895
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2896
- @param abortSignal - An optional abort signal that can be used to cancel the call.
2897
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3180
+ Please note that most providers do not support all modes.
2898
3181
 
2899
- @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
2900
- @param experimental_generateMessageId - Generate a unique ID for each message.
3182
+ Default and recommended: 'auto' (best mode for the model).
3183
+ */
3184
+ mode?: 'auto' | 'json' | 'tool';
3185
+ /**
3186
+ A function that attempts to repair the raw output of the mode
3187
+ to enable JSON parsing.
3188
+ */
3189
+ experimental_repairText?: RepairTextFunction;
3190
+ /**
3191
+ Optional telemetry configuration (experimental).
3192
+ */
3193
+ experimental_telemetry?: TelemetrySettings;
3194
+ /**
3195
+ Additional provider-specific options. They are passed through
3196
+ to the provider from the AI SDK and enable provider-specific
3197
+ functionality that can be fully encapsulated in the provider.
3198
+ */
3199
+ providerOptions?: ProviderOptions;
3200
+ /**
3201
+ @deprecated Use `providerOptions` instead.
3202
+ */
3203
+ experimental_providerMetadata?: ProviderMetadata;
3204
+ /**
3205
+ * Internal. For test use only. May change without notice.
3206
+ */
3207
+ _internal?: {
3208
+ generateId?: () => string;
3209
+ currentDate?: () => Date;
3210
+ };
3211
+ }): Promise<GenerateObjectResult<Array<ELEMENT>>>;
3212
+ /**
3213
+ Generate a value from an enum (limited list of string values) using a language model.
2901
3214
 
2902
- @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
3215
+ This function does not stream the output.
2903
3216
 
2904
- @returns
2905
- A result object that contains the generated text, the results of the tool calls, and additional information.
3217
+ @return
3218
+ A result object that contains the generated value, the finish reason, the token usage, and additional information.
2906
3219
  */
2907
- declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
3220
+ declare function generateObject<ENUM extends string>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3221
+ output: 'enum';
2908
3222
  /**
2909
3223
  The language model to use.
2910
- */
3224
+ */
2911
3225
  model: LanguageModel;
2912
3226
  /**
2913
- The tools that the model can call. The model needs to support calling tools.
2914
- */
2915
- tools?: TOOLS;
2916
- /**
2917
- The tool choice strategy. Default: 'auto'.
2918
- */
2919
- toolChoice?: ToolChoice<TOOLS>;
3227
+ The enum values that the model should use.
3228
+ */
3229
+ enum: Array<ENUM>;
2920
3230
  /**
2921
- Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
3231
+ The mode to use for object generation.
2922
3232
 
2923
- A maximum number is required to prevent infinite loops in the case of misconfigured tools.
3233
+ The schema is converted into a JSON schema and used in one of the following ways
2924
3234
 
2925
- By default, it's set to 1, which means that only a single LLM call is made.
2926
- */
2927
- maxSteps?: number;
2928
- /**
2929
- Generate a unique ID for each message.
2930
- */
2931
- experimental_generateMessageId?: IDGenerator;
2932
- /**
2933
- When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
3235
+ - 'auto': The provider will choose the best mode for the model.
3236
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3237
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
2934
3238
 
2935
- By default, it's set to false.
2936
- */
2937
- experimental_continueSteps?: boolean;
3239
+ Please note that most providers do not support all modes.
3240
+
3241
+ Default and recommended: 'auto' (best mode for the model).
3242
+ */
3243
+ mode?: 'auto' | 'json' | 'tool';
3244
+ /**
3245
+ A function that attempts to repair the raw output of the mode
3246
+ to enable JSON parsing.
3247
+ */
3248
+ experimental_repairText?: RepairTextFunction;
2938
3249
  /**
2939
3250
  Optional telemetry configuration (experimental).
2940
- */
3251
+ */
2941
3252
  experimental_telemetry?: TelemetrySettings;
2942
3253
  /**
2943
3254
  Additional provider-specific options. They are passed through
2944
3255
  to the provider from the AI SDK and enable provider-specific
2945
3256
  functionality that can be fully encapsulated in the provider.
2946
- */
3257
+ */
2947
3258
  providerOptions?: ProviderOptions;
2948
3259
  /**
2949
3260
  @deprecated Use `providerOptions` instead.
2950
- */
3261
+ */
2951
3262
  experimental_providerMetadata?: ProviderMetadata;
2952
3263
  /**
2953
- Limits the tools that are available for the model to call without
2954
- changing the tool call and result types in the result.
2955
- */
2956
- experimental_activeTools?: Array<keyof TOOLS>;
2957
- /**
2958
- Optional specification for parsing structured outputs from the LLM response.
2959
- */
2960
- experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
2961
- /**
2962
- A function that attempts to repair a tool call that failed to parse.
2963
- */
2964
- experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
2965
- /**
2966
- Callback that is called when each step (LLM call) is finished, including intermediate steps.
2967
- */
2968
- onStepFinish?: GenerateTextOnStepFinishCallback<TOOLS>;
2969
- /**
2970
3264
  * Internal. For test use only. May change without notice.
2971
3265
  */
2972
3266
  _internal?: {
2973
- generateId?: IDGenerator;
3267
+ generateId?: () => string;
2974
3268
  currentDate?: () => Date;
2975
3269
  };
2976
- }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
2977
-
3270
+ }): Promise<GenerateObjectResult<ENUM>>;
2978
3271
  /**
2979
- * A stream wrapper to send custom JSON-encoded data back to the client.
2980
- *
2981
- * @deprecated Please use `createDataStream`, `createDataStreamResponse`, and `pipeDataStreamToResponse` instead.
2982
- */
2983
- declare class StreamData {
2984
- private encoder;
2985
- private controller;
2986
- stream: ReadableStream<Uint8Array>;
2987
- private isClosed;
2988
- private warningTimeout;
2989
- constructor();
2990
- close(): Promise<void>;
2991
- append(value: JSONValue$1): void;
2992
- appendMessageAnnotation(value: JSONValue$1): void;
2993
- }
3272
+ Generate JSON with any schema for a given prompt using a language model.
2994
3273
 
2995
- type DataStreamOptions = {
2996
- /**
2997
- * Send usage parts to the client.
2998
- * Default to true.
2999
- */
3000
- sendUsage?: boolean;
3001
- /**
3002
- * Send reasoning parts to the client.
3003
- * Default to false.
3004
- */
3005
- sendReasoning?: boolean;
3006
- /**
3007
- * Send source parts to the client.
3008
- * Default to false.
3009
- */
3010
- sendSources?: boolean;
3011
- /**
3012
- * Send the finish event to the client.
3013
- * Set to false if you are using additional streamText calls
3014
- * that send additional data.
3015
- * Default to true.
3016
- */
3017
- experimental_sendFinish?: boolean;
3018
- /**
3019
- * Send the message start event to the client.
3020
- * Set to false if you are using additional streamText calls
3021
- * and the message start event has already been sent.
3022
- * Default to true.
3023
- *
3024
- * Note: this setting is currently not used, but you should
3025
- * already set it to false if you are using additional
3026
- * streamText calls that send additional data to prevent
3027
- * the message start event from being sent multiple times.
3028
- */
3029
- experimental_sendStart?: boolean;
3030
- };
3031
- /**
3032
- A result object for accessing different stream types and additional information.
3274
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
3275
+
3276
+ @returns
3277
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
3033
3278
  */
3034
- interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3035
- /**
3036
- Warnings from the model provider (e.g. unsupported settings) for the first step.
3037
- */
3038
- readonly warnings: Promise<CallWarning[] | undefined>;
3039
- /**
3040
- The total token usage of the generated response.
3041
- When there are multiple steps, the usage is the sum of all step usages.
3042
-
3043
- Resolved when the response is finished.
3044
- */
3045
- readonly usage: Promise<LanguageModelUsage>;
3046
- /**
3047
- Sources that have been used as input to generate the response.
3048
- For multi-step generation, the sources are accumulated from all steps.
3049
-
3050
- Resolved when the response is finished.
3051
- */
3052
- readonly sources: Promise<Source[]>;
3053
- /**
3054
- The reason why the generation finished. Taken from the last step.
3055
-
3056
- Resolved when the response is finished.
3057
- */
3058
- readonly finishReason: Promise<FinishReason>;
3059
- /**
3060
- Additional provider-specific metadata from the last step.
3061
- Metadata is passed through from the provider to the AI SDK and
3062
- enables provider-specific results that can be fully encapsulated in the provider.
3063
- */
3064
- readonly providerMetadata: Promise<ProviderMetadata | undefined>;
3279
+ declare function generateObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3280
+ output: 'no-schema';
3065
3281
  /**
3066
- @deprecated Use `providerMetadata` instead.
3067
- */
3068
- readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
3282
+ The language model to use.
3283
+ */
3284
+ model: LanguageModel;
3069
3285
  /**
3070
- The full text that has been generated by the last step.
3071
-
3072
- Resolved when the response is finished.
3073
- */
3074
- readonly text: Promise<string>;
3286
+ The mode to use for object generation. Must be "json" for no-schema output.
3287
+ */
3288
+ mode?: 'json';
3075
3289
  /**
3076
- The reasoning that has been generated by the last step.
3077
-
3078
- Resolved when the response is finished.
3079
- */
3080
- readonly reasoning: Promise<string | undefined>;
3290
+ A function that attempts to repair the raw output of the mode
3291
+ to enable JSON parsing.
3292
+ */
3293
+ experimental_repairText?: RepairTextFunction;
3081
3294
  /**
3082
- The full reasoning that the model has generated.
3083
-
3084
- Resolved when the response is finished.
3295
+ Optional telemetry configuration (experimental).
3085
3296
  */
3086
- readonly reasoningDetails: Promise<Array<ReasoningDetail>>;
3297
+ experimental_telemetry?: TelemetrySettings;
3087
3298
  /**
3088
- The tool calls that have been executed in the last step.
3089
-
3090
- Resolved when the response is finished.
3091
- */
3092
- readonly toolCalls: Promise<ToolCallUnion<TOOLS>[]>;
3299
+ Additional provider-specific options. They are passed through
3300
+ to the provider from the AI SDK and enable provider-specific
3301
+ functionality that can be fully encapsulated in the provider.
3302
+ */
3303
+ providerOptions?: ProviderOptions;
3093
3304
  /**
3094
- The tool results that have been generated in the last step.
3095
-
3096
- Resolved when the all tool executions are finished.
3097
- */
3098
- readonly toolResults: Promise<ToolResultUnion<TOOLS>[]>;
3305
+ @deprecated Use `providerOptions` instead.
3306
+ */
3307
+ experimental_providerMetadata?: ProviderMetadata;
3099
3308
  /**
3100
- Details for all steps.
3101
- You can use this to get information about intermediate steps,
3102
- such as the tool calls or the response headers.
3309
+ * Internal. For test use only. May change without notice.
3103
3310
  */
3104
- readonly steps: Promise<Array<StepResult<TOOLS>>>;
3105
- /**
3106
- Additional request information from the last step.
3107
- */
3108
- readonly request: Promise<LanguageModelRequestMetadata>;
3109
- /**
3110
- Additional response information from the last step.
3111
- */
3112
- readonly response: Promise<LanguageModelResponseMetadata & {
3113
- /**
3114
- The response messages that were generated during the call. It consists of an assistant message,
3115
- potentially containing tool calls.
3116
-
3117
- When there are tool results, there is an additional tool message with the tool results that are available.
3118
- If there are tools that do not have execute functions, they are not included in the tool results and
3119
- need to be added separately.
3120
- */
3121
- messages: Array<ResponseMessage>;
3122
- }>;
3311
+ _internal?: {
3312
+ generateId?: () => string;
3313
+ currentDate?: () => Date;
3314
+ };
3315
+ }): Promise<GenerateObjectResult<JSONValue>>;
3316
+
3317
+ /**
3318
+ The result of a `streamObject` call that contains the partial object stream and additional information.
3319
+ */
3320
+ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
3123
3321
  /**
3124
- A text stream that returns only the generated text deltas. You can use it
3125
- as either an AsyncIterable or a ReadableStream. When an error occurs, the
3126
- stream will throw the error.
3322
+ Warnings from the model provider (e.g. unsupported settings)
3127
3323
  */
3128
- readonly textStream: AsyncIterableStream<string>;
3324
+ readonly warnings: Promise<CallWarning[] | undefined>;
3129
3325
  /**
3130
- A stream with all events, including text deltas, tool calls, tool results, and
3131
- errors.
3132
- You can use it as either an AsyncIterable or a ReadableStream.
3133
- Only errors that stop the stream, such as network errors, are thrown.
3326
+ The token usage of the generated response. Resolved when the response is finished.
3134
3327
  */
3135
- readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
3328
+ readonly usage: Promise<LanguageModelUsage>;
3136
3329
  /**
3137
- A stream of partial outputs. It uses the `experimental_output` specification.
3330
+ Additional provider-specific metadata. They are passed through
3331
+ from the provider to the AI SDK and enable provider-specific
3332
+ results that can be fully encapsulated in the provider.
3138
3333
  */
3139
- readonly experimental_partialOutputStream: AsyncIterableStream<PARTIAL_OUTPUT>;
3334
+ readonly providerMetadata: Promise<ProviderMetadata | undefined>;
3140
3335
  /**
3141
- Consumes the stream without processing the parts.
3142
- This is useful to force the stream to finish.
3143
- It effectively removes the backpressure and allows the stream to finish,
3144
- triggering the `onFinish` callback and the promise resolution.
3145
- */
3146
- consumeStream(): Promise<void>;
3336
+ @deprecated Use `providerMetadata` instead.
3337
+ */
3338
+ readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
3147
3339
  /**
3148
- Converts the result to a data stream.
3340
+ Additional request information from the last step.
3341
+ */
3342
+ readonly request: Promise<LanguageModelRequestMetadata>;
3343
+ /**
3344
+ Additional response information.
3345
+ */
3346
+ readonly response: Promise<LanguageModelResponseMetadata>;
3347
+ /**
3348
+ The generated object (typed according to the schema). Resolved when the response is finished.
3349
+ */
3350
+ readonly object: Promise<RESULT>;
3351
+ /**
3352
+ Stream of partial objects. It gets more complete as the stream progresses.
3149
3353
 
3150
- @param data an optional StreamData object that will be merged into the stream.
3151
- @param getErrorMessage an optional function that converts an error to an error message.
3152
- @param sendUsage whether to send the usage information to the client. Defaults to true.
3153
- @param sendReasoning whether to send the reasoning information to the client. Defaults to false.
3154
- @return A data stream.
3354
+ Note that the partial object is not validated.
3355
+ If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
3155
3356
  */
3156
- toDataStream(options?: {
3157
- data?: StreamData;
3158
- getErrorMessage?: (error: unknown) => string;
3159
- } & DataStreamOptions): ReadableStream<Uint8Array>;
3357
+ readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
3160
3358
  /**
3161
- * Merges the result as a data stream into another data stream.
3162
- *
3163
- * @param dataStream A data stream writer.
3164
- * @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
3165
- * @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3359
+ * Stream over complete array elements. Only available if the output strategy is set to `array`.
3166
3360
  */
3167
- mergeIntoDataStream(dataStream: DataStreamWriter, options?: DataStreamOptions): void;
3361
+ readonly elementStream: ELEMENT_STREAM;
3168
3362
  /**
3169
- Writes data stream output to a Node.js response-like object.
3170
-
3171
- @param response A Node.js response-like object (ServerResponse).
3172
- @param options.status The status code.
3173
- @param options.statusText The status text.
3174
- @param options.headers The headers.
3175
- @param options.data The stream data.
3176
- @param options.getErrorMessage An optional function that converts an error to an error message.
3177
- @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
3178
- @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3363
+ Text stream of the JSON representation of the generated object. It contains text chunks.
3364
+ When the stream is finished, the object is valid JSON that can be parsed.
3179
3365
  */
3180
- pipeDataStreamToResponse(response: ServerResponse, options?: ResponseInit & {
3181
- data?: StreamData;
3182
- getErrorMessage?: (error: unknown) => string;
3183
- } & DataStreamOptions): void;
3366
+ readonly textStream: AsyncIterableStream<string>;
3367
+ /**
3368
+ Stream of different types of events, including partial objects, errors, and finish events.
3369
+ Only errors that stop the stream, such as network errors, are thrown.
3370
+ */
3371
+ readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
3184
3372
  /**
3185
3373
  Writes text delta output to a Node.js response-like object.
3186
3374
  It sets a `Content-Type` header to `text/plain; charset=utf-8` and
@@ -3189,27 +3377,10 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3189
3377
  @param response A Node.js response-like object (ServerResponse).
3190
3378
  @param init Optional headers, status code, and status text.
3191
3379
  */
3192
- pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
3193
- /**
3194
- Converts the result to a streamed response object with a stream data part stream.
3195
- It can be used with the `useChat` and `useCompletion` hooks.
3196
-
3197
- @param options.status The status code.
3198
- @param options.statusText The status text.
3199
- @param options.headers The headers.
3200
- @param options.data The stream data.
3201
- @param options.getErrorMessage An optional function that converts an error to an error message.
3202
- @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
3203
- @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3204
-
3205
- @return A response object.
3206
- */
3207
- toDataStreamResponse(options?: ResponseInit & {
3208
- data?: StreamData;
3209
- getErrorMessage?: (error: unknown) => string;
3210
- } & DataStreamOptions): Response;
3380
+ pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
3211
3381
  /**
3212
3382
  Creates a simple text stream response.
3383
+ The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
3213
3384
  Each text delta is encoded as UTF-8 and sent as a separate chunk.
3214
3385
  Non-text-delta events are ignored.
3215
3386
 
@@ -3217,294 +3388,274 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3217
3388
  */
3218
3389
  toTextStreamResponse(init?: ResponseInit): Response;
3219
3390
  }
3220
- type TextStreamPart<TOOLS extends ToolSet> = {
3221
- type: 'text-delta';
3222
- textDelta: string;
3391
+ type ObjectStreamPart<PARTIAL> = {
3392
+ type: 'object';
3393
+ object: PARTIAL;
3223
3394
  } | {
3224
- type: 'reasoning';
3395
+ type: 'text-delta';
3225
3396
  textDelta: string;
3226
3397
  } | {
3227
- type: 'reasoning-signature';
3228
- signature: string;
3229
- } | {
3230
- type: 'redacted-reasoning';
3231
- data: string;
3232
- } | {
3233
- type: 'source';
3234
- source: Source;
3235
- } | ({
3236
- type: 'tool-call';
3237
- } & ToolCallUnion<TOOLS>) | {
3238
- type: 'tool-call-streaming-start';
3239
- toolCallId: string;
3240
- toolName: string;
3241
- } | {
3242
- type: 'tool-call-delta';
3243
- toolCallId: string;
3244
- toolName: string;
3245
- argsTextDelta: string;
3246
- } | ({
3247
- type: 'tool-result';
3248
- } & ToolResultUnion<TOOLS>) | {
3249
- type: 'step-start';
3250
- messageId: string;
3251
- request: LanguageModelRequestMetadata;
3252
- warnings: CallWarning[];
3253
- } | {
3254
- type: 'step-finish';
3255
- messageId: string;
3256
- logprobs?: LogProbs;
3257
- request: LanguageModelRequestMetadata;
3258
- warnings: CallWarning[] | undefined;
3259
- response: LanguageModelResponseMetadata;
3260
- usage: LanguageModelUsage;
3261
- finishReason: FinishReason;
3262
- providerMetadata: ProviderMetadata | undefined;
3263
- /**
3264
- * @deprecated Use `providerMetadata` instead.
3265
- */
3266
- experimental_providerMetadata?: ProviderMetadata;
3267
- isContinued: boolean;
3398
+ type: 'error';
3399
+ error: unknown;
3268
3400
  } | {
3269
3401
  type: 'finish';
3270
3402
  finishReason: FinishReason;
3271
- usage: LanguageModelUsage;
3272
- providerMetadata: ProviderMetadata | undefined;
3273
- /**
3274
- * @deprecated Use `providerMetadata` instead.
3275
- */
3276
- experimental_providerMetadata?: ProviderMetadata;
3277
- /**
3278
- * @deprecated will be moved into provider metadata
3279
- */
3280
3403
  logprobs?: LogProbs;
3281
- /**
3282
- * @deprecated use response on step-finish instead
3283
- */
3404
+ usage: LanguageModelUsage;
3284
3405
  response: LanguageModelResponseMetadata;
3285
- } | {
3286
- type: 'error';
3287
- error: unknown;
3406
+ providerMetadata?: ProviderMetadata;
3288
3407
  };
3289
3408
 
3290
- /**
3291
- * Smooths text streaming output.
3292
- *
3293
- * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
3294
- * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
3295
- *
3296
- * @returns A transform stream that smooths text streaming output.
3297
- */
3298
- declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
3299
- delayInMs?: number | null;
3300
- chunking?: 'word' | 'line' | RegExp;
3301
- /**
3302
- * Internal. For test use only. May change without notice.
3303
- */
3304
- _internal?: {
3305
- delay?: (delayInMs: number | null) => Promise<void>;
3306
- };
3307
- }): (options: {
3308
- tools: TOOLS;
3309
- }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
3310
-
3311
- /**
3312
- A transformation that is applied to the stream.
3313
-
3314
- @param stopStream - A function that stops the source stream.
3315
- @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
3316
- */
3317
- type StreamTextTransform<TOOLS extends ToolSet> = (options: {
3318
- tools: TOOLS;
3319
- stopStream: () => void;
3320
- }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
3321
3409
  /**
3322
3410
  Callback that is set using the `onError` option.
3323
3411
 
3324
3412
  @param event - The event that is passed to the callback.
3325
3413
  */
3326
- type StreamTextOnErrorCallback = (event: {
3414
+ type StreamObjectOnErrorCallback = (event: {
3327
3415
  error: unknown;
3328
3416
  }) => Promise<void> | void;
3329
3417
  /**
3330
- Callback that is set using the `onStepFinish` option.
3331
-
3332
- @param stepResult - The result of the step.
3333
- */
3334
- type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
3335
- /**
3336
- Callback that is set using the `onChunk` option.
3418
+ Callback that is set using the `onFinish` option.
3337
3419
 
3338
3420
  @param event - The event that is passed to the callback.
3339
3421
  */
3340
- type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
3341
- chunk: Extract<TextStreamPart<TOOLS>, {
3342
- type: 'text-delta' | 'reasoning' | 'source' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
3343
- }>;
3422
+ type StreamObjectOnFinishCallback<RESULT> = (event: {
3423
+ /**
3424
+ The token usage of the generated response.
3425
+ */
3426
+ usage: LanguageModelUsage;
3427
+ /**
3428
+ The generated object. Can be undefined if the final object does not match the schema.
3429
+ */
3430
+ object: RESULT | undefined;
3431
+ /**
3432
+ Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
3433
+ */
3434
+ error: unknown | undefined;
3435
+ /**
3436
+ Response metadata.
3437
+ */
3438
+ response: LanguageModelResponseMetadata;
3439
+ /**
3440
+ Warnings from the model provider (e.g. unsupported settings).
3441
+ */
3442
+ warnings?: CallWarning[];
3443
+ /**
3444
+ Additional provider-specific metadata. They are passed through
3445
+ to the provider from the AI SDK and enable provider-specific
3446
+ functionality that can be fully encapsulated in the provider.
3447
+ */
3448
+ providerMetadata: ProviderMetadata | undefined;
3449
+ /**
3450
+ @deprecated Use `providerMetadata` instead.
3451
+ */
3452
+ experimental_providerMetadata?: ProviderMetadata;
3344
3453
  }) => Promise<void> | void;
3345
3454
  /**
3346
- Callback that is set using the `onFinish` option.
3455
+ Generate a structured, typed object for a given prompt and schema using a language model.
3347
3456
 
3348
- @param event - The event that is passed to the callback.
3457
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
3458
+
3459
+ @return
3460
+ A result object for accessing the partial object stream and additional information.
3349
3461
  */
3350
- type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: Omit<StepResult<TOOLS>, 'stepType' | 'isContinued'> & {
3462
+ declare function streamObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3463
+ output?: 'object' | undefined;
3464
+ /**
3465
+ The language model to use.
3466
+ */
3467
+ model: LanguageModel;
3351
3468
  /**
3352
- Details for all steps.
3469
+ The schema of the object that the model should generate.
3470
+ */
3471
+ schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
3472
+ /**
3473
+ Optional name of the output that should be generated.
3474
+ Used by some providers for additional LLM guidance, e.g.
3475
+ via tool or schema name.
3353
3476
  */
3354
- readonly steps: StepResult<TOOLS>[];
3355
- }) => Promise<void> | void;
3356
- /**
3357
- Generate a text and call tools for a given prompt using a language model.
3358
-
3359
- This function streams the output. If you do not want to stream the output, use `generateText` instead.
3360
-
3361
- @param model - The language model to use.
3362
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
3477
+ schemaName?: string;
3478
+ /**
3479
+ Optional description of the output that should be generated.
3480
+ Used by some providers for additional LLM guidance, e.g.
3481
+ via tool or schema description.
3482
+ */
3483
+ schemaDescription?: string;
3484
+ /**
3485
+ The mode to use for object generation.
3363
3486
 
3364
- @param system - A system message that will be part of the prompt.
3365
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
3366
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
3487
+ The schema is converted into a JSON schema and used in one of the following ways
3367
3488
 
3368
- @param maxTokens - Maximum number of tokens to generate.
3369
- @param temperature - Temperature setting.
3370
- The value is passed through to the provider. The range depends on the provider and model.
3371
- It is recommended to set either `temperature` or `topP`, but not both.
3372
- @param topP - Nucleus sampling.
3373
- The value is passed through to the provider. The range depends on the provider and model.
3374
- It is recommended to set either `temperature` or `topP`, but not both.
3375
- @param topK - Only sample from the top K options for each subsequent token.
3376
- Used to remove "long tail" low probability responses.
3377
- Recommended for advanced use cases only. You usually only need to use temperature.
3378
- @param presencePenalty - Presence penalty setting.
3379
- It affects the likelihood of the model to repeat information that is already in the prompt.
3380
- The value is passed through to the provider. The range depends on the provider and model.
3381
- @param frequencyPenalty - Frequency penalty setting.
3382
- It affects the likelihood of the model to repeatedly use the same words or phrases.
3383
- The value is passed through to the provider. The range depends on the provider and model.
3384
- @param stopSequences - Stop sequences.
3385
- If set, the model will stop generating text when one of the stop sequences is generated.
3386
- @param seed - The seed (integer) to use for random sampling.
3387
- If set and supported by the model, calls will generate deterministic results.
3489
+ - 'auto': The provider will choose the best mode for the model.
3490
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3491
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
3388
3492
 
3389
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3390
- @param abortSignal - An optional abort signal that can be used to cancel the call.
3391
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3493
+ Please note that most providers do not support all modes.
3392
3494
 
3393
- @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
3394
- @param experimental_generateMessageId - Generate a unique ID for each message.
3495
+ Default and recommended: 'auto' (best mode for the model).
3496
+ */
3497
+ mode?: 'auto' | 'json' | 'tool';
3498
+ /**
3499
+ Optional telemetry configuration (experimental).
3500
+ */
3501
+ experimental_telemetry?: TelemetrySettings;
3502
+ /**
3503
+ Additional provider-specific options. They are passed through
3504
+ to the provider from the AI SDK and enable provider-specific
3505
+ functionality that can be fully encapsulated in the provider.
3506
+ */
3507
+ providerOptions?: ProviderOptions;
3508
+ /**
3509
+ @deprecated Use `providerOptions` instead.
3510
+ */
3511
+ experimental_providerMetadata?: ProviderMetadata;
3512
+ /**
3513
+ Callback that is invoked when an error occurs during streaming.
3514
+ You can use it to log errors.
3515
+ The stream processing will pause until the callback promise is resolved.
3516
+ */
3517
+ onError?: StreamObjectOnErrorCallback;
3518
+ /**
3519
+ Callback that is called when the LLM response and the final object validation are finished.
3520
+ */
3521
+ onFinish?: StreamObjectOnFinishCallback<OBJECT>;
3522
+ /**
3523
+ * Internal. For test use only. May change without notice.
3524
+ */
3525
+ _internal?: {
3526
+ generateId?: () => string;
3527
+ currentDate?: () => Date;
3528
+ now?: () => number;
3529
+ };
3530
+ }): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
3531
+ /**
3532
+ Generate an array with structured, typed elements for a given prompt and element schema using a language model.
3395
3533
 
3396
- @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
3397
- @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
3398
- @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
3399
- @param onFinish - Callback that is called when the LLM response and all request tool executions
3400
- (for tools that have an `execute` function) are finished.
3534
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
3401
3535
 
3402
3536
  @return
3403
- A result object for accessing different stream types and additional information.
3537
+ A result object for accessing the partial object stream and additional information.
3404
3538
  */
3405
- declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
3539
+ declare function streamObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3540
+ output: 'array';
3406
3541
  /**
3407
3542
  The language model to use.
3408
- */
3543
+ */
3409
3544
  model: LanguageModel;
3410
3545
  /**
3411
- The tools that the model can call. The model needs to support calling tools.
3412
- */
3413
- tools?: TOOLS;
3546
+ The element schema of the array that the model should generate.
3547
+ */
3548
+ schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
3414
3549
  /**
3415
- The tool choice strategy. Default: 'auto'.
3416
- */
3417
- toolChoice?: ToolChoice<TOOLS>;
3550
+ Optional name of the array that should be generated.
3551
+ Used by some providers for additional LLM guidance, e.g.
3552
+ via tool or schema name.
3553
+ */
3554
+ schemaName?: string;
3418
3555
  /**
3419
- Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
3556
+ Optional description of the array that should be generated.
3557
+ Used by some providers for additional LLM guidance, e.g.
3558
+ via tool or schema description.
3559
+ */
3560
+ schemaDescription?: string;
3561
+ /**
3562
+ The mode to use for object generation.
3420
3563
 
3421
- A maximum number is required to prevent infinite loops in the case of misconfigured tools.
3564
+ The schema is converted into a JSON schema and used in one of the following ways
3422
3565
 
3423
- By default, it's set to 1, which means that only a single LLM call is made.
3424
- */
3425
- maxSteps?: number;
3426
- /**
3427
- Generate a unique ID for each message.
3428
- */
3429
- experimental_generateMessageId?: IDGenerator;
3430
- /**
3431
- When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
3566
+ - 'auto': The provider will choose the best mode for the model.
3567
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3568
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
3432
3569
 
3433
- By default, it's set to false.
3434
- */
3435
- experimental_continueSteps?: boolean;
3570
+ Please note that most providers do not support all modes.
3571
+
3572
+ Default and recommended: 'auto' (best mode for the model).
3573
+ */
3574
+ mode?: 'auto' | 'json' | 'tool';
3436
3575
  /**
3437
3576
  Optional telemetry configuration (experimental).
3438
- */
3577
+ */
3439
3578
  experimental_telemetry?: TelemetrySettings;
3440
3579
  /**
3441
3580
  Additional provider-specific options. They are passed through
3442
3581
  to the provider from the AI SDK and enable provider-specific
3443
3582
  functionality that can be fully encapsulated in the provider.
3444
- */
3583
+ */
3445
3584
  providerOptions?: ProviderOptions;
3446
3585
  /**
3447
3586
  @deprecated Use `providerOptions` instead.
3448
- */
3587
+ */
3449
3588
  experimental_providerMetadata?: ProviderMetadata;
3450
3589
  /**
3451
- Limits the tools that are available for the model to call without
3452
- changing the tool call and result types in the result.
3453
- */
3454
- experimental_activeTools?: Array<keyof TOOLS>;
3590
+ Callback that is invoked when an error occurs during streaming.
3591
+ You can use it to log errors.
3592
+ The stream processing will pause until the callback promise is resolved.
3593
+ */
3594
+ onError?: StreamObjectOnErrorCallback;
3455
3595
  /**
3456
- Optional specification for parsing structured outputs from the LLM response.
3457
- */
3458
- experimental_output?: Output<OUTPUT, PARTIAL_OUTPUT>;
3596
+ Callback that is called when the LLM response and the final object validation are finished.
3597
+ */
3598
+ onFinish?: StreamObjectOnFinishCallback<Array<ELEMENT>>;
3459
3599
  /**
3460
- A function that attempts to repair a tool call that failed to parse.
3600
+ * Internal. For test use only. May change without notice.
3461
3601
  */
3462
- experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
3602
+ _internal?: {
3603
+ generateId?: () => string;
3604
+ currentDate?: () => Date;
3605
+ now?: () => number;
3606
+ };
3607
+ }): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
3608
+ /**
3609
+ Generate JSON with any schema for a given prompt using a language model.
3610
+
3611
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
3612
+
3613
+ @return
3614
+ A result object for accessing the partial object stream and additional information.
3615
+ */
3616
+ declare function streamObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
3617
+ output: 'no-schema';
3463
3618
  /**
3464
- Enable streaming of tool call deltas as they are generated. Disabled by default.
3465
- */
3466
- toolCallStreaming?: boolean;
3619
+ The language model to use.
3620
+ */
3621
+ model: LanguageModel;
3467
3622
  /**
3468
- @deprecated Use `toolCallStreaming` instead.
3469
- */
3470
- experimental_toolCallStreaming?: boolean;
3623
+ The mode to use for object generation. Must be "json" for no-schema output.
3624
+ */
3625
+ mode?: 'json';
3471
3626
  /**
3472
- Optional stream transformations.
3473
- They are applied in the order they are provided.
3474
- The stream transformations must maintain the stream structure for streamText to work correctly.
3475
- */
3476
- experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
3627
+ Optional telemetry configuration (experimental).
3628
+ */
3629
+ experimental_telemetry?: TelemetrySettings;
3477
3630
  /**
3478
- Callback that is called for each chunk of the stream.
3479
- The stream processing will pause until the callback promise is resolved.
3480
- */
3481
- onChunk?: StreamTextOnChunkCallback<TOOLS>;
3631
+ Additional provider-specific options. They are passed through
3632
+ to the provider from the AI SDK and enable provider-specific
3633
+ functionality that can be fully encapsulated in the provider.
3634
+ */
3635
+ providerOptions?: ProviderOptions;
3636
+ /**
3637
+ @deprecated Use `providerOptions` instead.
3638
+ */
3639
+ experimental_providerMetadata?: ProviderMetadata;
3482
3640
  /**
3483
3641
  Callback that is invoked when an error occurs during streaming.
3484
3642
  You can use it to log errors.
3485
3643
  The stream processing will pause until the callback promise is resolved.
3486
- */
3487
- onError?: StreamTextOnErrorCallback;
3488
- /**
3489
- Callback that is called when the LLM response and all request tool executions
3490
- (for tools that have an `execute` function) are finished.
3491
-
3492
- The usage is the combined usage of all steps.
3493
- */
3494
- onFinish?: StreamTextOnFinishCallback<TOOLS>;
3644
+ */
3645
+ onError?: StreamObjectOnErrorCallback;
3495
3646
  /**
3496
- Callback that is called when each step (LLM call) is finished, including intermediate steps.
3497
- */
3498
- onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
3647
+ Callback that is called when the LLM response and the final object validation are finished.
3648
+ */
3649
+ onFinish?: StreamObjectOnFinishCallback<JSONValue>;
3499
3650
  /**
3500
- Internal. For test use only. May change without notice.
3651
+ * Internal. For test use only. May change without notice.
3501
3652
  */
3502
3653
  _internal?: {
3503
- now?: () => number;
3504
- generateId?: IDGenerator;
3654
+ generateId?: () => string;
3505
3655
  currentDate?: () => Date;
3656
+ now?: () => number;
3506
3657
  };
3507
- }): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
3658
+ }): StreamObjectResult<JSONValue, JSONValue, never>;
3508
3659
 
3509
3660
  /**
3510
3661
  * Experimental middleware for LanguageModelV1.
@@ -3731,7 +3882,9 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
3731
3882
  } | {
3732
3883
  type: 'redacted-reasoning';
3733
3884
  data: string;
3734
- } | {
3885
+ } | ({
3886
+ type: 'file';
3887
+ } & GeneratedFile) | {
3735
3888
  type: 'source';
3736
3889
  source: Source;
3737
3890
  } | ({
@@ -4083,4 +4236,4 @@ declare namespace llamaindexAdapter {
4083
4236
  };
4084
4237
  }
4085
4238
 
4086
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
4239
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };