ai 6.0.0-beta.98 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,37 +1,35 @@
1
- export { createGateway, gateway } from '@ai-sdk/gateway';
1
+ import { GatewayModelId } from '@ai-sdk/gateway';
2
+ export { GatewayModelId, createGateway, gateway } from '@ai-sdk/gateway';
2
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, AssistantModelMessage, ToolModelMessage, ReasoningPart, ModelMessage, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
4
- export { AssistantContent, AssistantModelMessage, DataContent, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
4
+ import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, AssistantModelMessage, ToolModelMessage, ReasoningPart, SystemModelMessage, ModelMessage, ProviderOptions, UserModelMessage, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
5
+ export { AssistantContent, AssistantModelMessage, DataContent, DownloadError, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolExecutionOptions, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
5
6
  import * as _ai_sdk_provider from '@ai-sdk/provider';
6
- import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, ImageModelV3, ImageModelV3CallWarning, ImageModelV3ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, LanguageModelV3CallWarning, LanguageModelV3Source, LanguageModelV3Middleware, EmbeddingModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, SpeechModelV3CallWarning, TranscriptionModelV3, TranscriptionModelV2, TranscriptionModelV3CallWarning, LanguageModelV3Usage, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, JSONObject, SharedV3Warning, EmbeddingModelCallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
7
+ import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, EmbeddingModelV3CallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
7
8
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
8
- import { ServerResponse } from 'node:http';
9
9
  import { AttributeValue, Tracer } from '@opentelemetry/api';
10
+ import { ServerResponse } from 'node:http';
10
11
  import { ServerResponse as ServerResponse$1 } from 'http';
11
12
  import { z } from 'zod/v4';
12
13
 
13
14
  /**
14
- Embedding model that is used by the AI SDK Core functions.
15
+ Embedding model that is used by the AI SDK.
15
16
  */
16
- type EmbeddingModel<VALUE = string> = string | EmbeddingModelV3<VALUE> | EmbeddingModelV2<VALUE>;
17
+ type EmbeddingModel = string | EmbeddingModelV3 | EmbeddingModelV2<string>;
17
18
  /**
18
19
  Embedding.
19
20
  */
20
21
  type Embedding = EmbeddingModelV3Embedding;
21
22
 
23
+ type EmbeddingModelMiddleware = EmbeddingModelV3Middleware;
24
+
22
25
  /**
23
- Image model that is used by the AI SDK Core functions.
24
- */
25
- type ImageModel = ImageModelV3;
26
- /**
27
- Warning from the model provider for this call. The call will proceed, but e.g.
28
- some settings might not be supported, which can lead to suboptimal results.
26
+ Image model that is used by the AI SDK.
29
27
  */
30
- type ImageGenerationWarning = ImageModelV3CallWarning;
28
+ type ImageModel = string | ImageModelV3 | ImageModelV2;
31
29
  /**
32
30
  Metadata from the model provider for this call
33
31
  */
34
- type ImageModelProviderMetadata = ImageModelV3ProviderMetadata;
32
+ type ImageModelProviderMetadata = ImageModelV3ProviderMetadata | ImageModelV2ProviderMetadata;
35
33
 
36
34
  type ImageModelResponseMetadata = {
37
35
  /**
@@ -50,10 +48,50 @@ type ImageModelResponseMetadata = {
50
48
 
51
49
  type JSONValue = JSONValue$1;
52
50
 
51
+ declare global {
52
+ /**
53
+ * Global interface that can be augmented by third-party packages to register custom model IDs.
54
+ *
55
+ * You can register model IDs in two ways:
56
+ *
57
+ * 1. Register baesd on Model IDs from a provider package:
58
+ * @example
59
+ * ```typescript
60
+ * import { openai } from '@ai-sdk/openai';
61
+ * type OpenAIResponsesModelId = Parameters<typeof openai>[0];
62
+ *
63
+ * declare global {
64
+ * interface RegisteredProviderModels {
65
+ * openai: OpenAIResponsesModelId;
66
+ * }
67
+ * }
68
+ * ```
69
+ *
70
+ * 2. Register individual model IDs directly as keys:
71
+ * @example
72
+ * ```typescript
73
+ * declare global {
74
+ * interface RegisteredProviderModels {
75
+ * 'my-provider:my-model': any;
76
+ * 'my-provider:another-model': any;
77
+ * }
78
+ * }
79
+ * ```
80
+ */
81
+ interface RegisteredProviderModels {
82
+ }
83
+ }
84
+ /**
85
+ * Global provider model ID type that defaults to GatewayModelId but can be augmented
86
+ * by third-party packages via declaration merging.
87
+ */
88
+ type GlobalProviderModelId = [keyof RegisteredProviderModels] extends [
89
+ never
90
+ ] ? GatewayModelId : keyof RegisteredProviderModels | RegisteredProviderModels[keyof RegisteredProviderModels];
53
91
  /**
54
- Language model that is used by the AI SDK Core functions.
92
+ Language model that is used by the AI SDK.
55
93
  */
56
- type LanguageModel = string | LanguageModelV3 | LanguageModelV2;
94
+ type LanguageModel = GlobalProviderModelId | LanguageModelV3 | LanguageModelV2;
57
95
  /**
58
96
  Reason why a language model finished generating a response.
59
97
 
@@ -65,12 +103,12 @@ Can be one of the following:
65
103
  - `error`: model stopped because of an error
66
104
  - `other`: model stopped for other reasons
67
105
  */
68
- type FinishReason = LanguageModelV3FinishReason;
106
+ type FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
69
107
  /**
70
108
  Warning from the model provider for this call. The call will proceed, but e.g.
71
109
  some settings might not be supported, which can lead to suboptimal results.
72
110
  */
73
- type CallWarning = LanguageModelV3CallWarning;
111
+ type CallWarning = SharedV3Warning;
74
112
  /**
75
113
  A source that has been used as input to generate the response.
76
114
  */
@@ -90,8 +128,6 @@ type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'requ
90
128
 
91
129
  type LanguageModelMiddleware = LanguageModelV3Middleware;
92
130
 
93
- type EmbeddingModelMiddleware = EmbeddingModelV3Middleware;
94
-
95
131
  type LanguageModelRequestMetadata = {
96
132
  /**
97
133
  Request HTTP body that was sent to the provider API.
@@ -119,7 +155,7 @@ type LanguageModelResponseMetadata = {
119
155
  };
120
156
 
121
157
  /**
122
- * Reranking model that is used by the AI SDK Core functions.
158
+ * Reranking model that is used by the AI SDK.
123
159
  */
124
160
  type RerankingModel = RerankingModelV3;
125
161
 
@@ -148,7 +184,7 @@ type Provider = {
148
184
 
149
185
  @throws {NoSuchModelError} If no such model exists.
150
186
  */
151
- textEmbeddingModel(modelId: string): EmbeddingModel<string>;
187
+ embeddingModel(modelId: string): EmbeddingModel;
152
188
  /**
153
189
  Returns the image model with the given id.
154
190
  The model id is then passed to the provider function to get the model.
@@ -180,14 +216,9 @@ fully encapsulated in the provider.
180
216
  type ProviderMetadata = SharedV3ProviderMetadata;
181
217
 
182
218
  /**
183
- Speech model that is used by the AI SDK Core functions.
219
+ Speech model that is used by the AI SDK.
184
220
  */
185
221
  type SpeechModel = string | SpeechModelV3 | SpeechModelV2;
186
- /**
187
- Warning from the model provider for this call. The call will proceed, but e.g.
188
- some settings might not be supported, which can lead to suboptimal results.
189
- */
190
- type SpeechWarning = SpeechModelV3CallWarning;
191
222
 
192
223
  type SpeechModelResponseMetadata = {
193
224
  /**
@@ -209,14 +240,9 @@ type SpeechModelResponseMetadata = {
209
240
  };
210
241
 
211
242
  /**
212
- Transcription model that is used by the AI SDK Core functions.
243
+ Transcription model that is used by the AI SDK.
213
244
  */
214
245
  type TranscriptionModel = string | TranscriptionModelV3 | TranscriptionModelV2;
215
- /**
216
- Warning from the model provider for this call. The call will proceed, but e.g.
217
- some settings might not be supported, which can lead to suboptimal results.
218
- */
219
- type TranscriptionWarning = TranscriptionModelV3CallWarning;
220
246
 
221
247
  type TranscriptionModelResponseMetadata = {
222
248
  /**
@@ -234,9 +260,67 @@ type TranscriptionModelResponseMetadata = {
234
260
  };
235
261
 
236
262
  /**
237
- Represents the number of tokens used in a prompt and completion.
263
+ * Represents the number of tokens used in a prompt and completion.
238
264
  */
239
- type LanguageModelUsage = LanguageModelV3Usage;
265
+ type LanguageModelUsage = {
266
+ /**
267
+ * The total number of input (prompt) tokens used.
268
+ */
269
+ inputTokens: number | undefined;
270
+ /**
271
+ * Detailed information about the input tokens.
272
+ */
273
+ inputTokenDetails: {
274
+ /**
275
+ * The number of non-cached input (prompt) tokens used.
276
+ */
277
+ noCacheTokens: number | undefined;
278
+ /**
279
+ * The number of cached input (prompt) tokens read.
280
+ */
281
+ cacheReadTokens: number | undefined;
282
+ /**
283
+ * The number of cached input (prompt) tokens written.
284
+ */
285
+ cacheWriteTokens: number | undefined;
286
+ };
287
+ /**
288
+ * The number of total output (completion) tokens used.
289
+ */
290
+ outputTokens: number | undefined;
291
+ /**
292
+ * Detailed information about the output tokens.
293
+ */
294
+ outputTokenDetails: {
295
+ /**
296
+ * The number of text tokens used.
297
+ */
298
+ textTokens: number | undefined;
299
+ /**
300
+ * The number of reasoning tokens used.
301
+ */
302
+ reasoningTokens: number | undefined;
303
+ };
304
+ /**
305
+ * The total number of tokens used.
306
+ */
307
+ totalTokens: number | undefined;
308
+ /**
309
+ * @deprecated Use outputTokenDetails.reasoning instead.
310
+ */
311
+ reasoningTokens?: number | undefined;
312
+ /**
313
+ * @deprecated Use inputTokenDetails.cacheRead instead.
314
+ */
315
+ cachedInputTokens?: number | undefined;
316
+ /**
317
+ * Raw usage information from the provider.
318
+ *
319
+ * This is the usage information in the shape that the provider returns.
320
+ * It can include additional information that is not part of the standard usage information.
321
+ */
322
+ raw?: JSONObject;
323
+ };
240
324
  /**
241
325
  Represents the number of tokens used in an embedding.
242
326
  */
@@ -251,6 +335,12 @@ Usage information for an image model call.
251
335
  */
252
336
  type ImageModelUsage = ImageModelV3Usage;
253
337
 
338
+ /**
339
+ Warning from the model provider for this call. The call will proceed, but e.g.
340
+ some settings might not be supported, which can lead to suboptimal results.
341
+ */
342
+ type Warning = SharedV3Warning;
343
+
254
344
  /**
255
345
  * A generated file.
256
346
  */
@@ -390,6 +480,7 @@ type StaticToolError<TOOLS extends ToolSet> = ValueOf<{
390
480
  input: InferToolInput<TOOLS[NAME]>;
391
481
  error: unknown;
392
482
  providerExecuted?: boolean;
483
+ providerMetadata?: ProviderMetadata;
393
484
  dynamic?: false | undefined;
394
485
  title?: string;
395
486
  };
@@ -401,6 +492,7 @@ type DynamicToolError = {
401
492
  input: unknown;
402
493
  error: unknown;
403
494
  providerExecuted?: boolean;
495
+ providerMetadata?: ProviderMetadata;
404
496
  dynamic: true;
405
497
  title?: string;
406
498
  };
@@ -414,6 +506,7 @@ type StaticToolResult<TOOLS extends ToolSet> = ValueOf<{
414
506
  input: InferToolInput<TOOLS[NAME]>;
415
507
  output: InferToolOutput<TOOLS[NAME]>;
416
508
  providerExecuted?: boolean;
509
+ providerMetadata?: ProviderMetadata;
417
510
  dynamic?: false | undefined;
418
511
  preliminary?: boolean;
419
512
  title?: string;
@@ -426,6 +519,7 @@ type DynamicToolResult = {
426
519
  input: unknown;
427
520
  output: unknown;
428
521
  providerExecuted?: boolean;
522
+ providerMetadata?: ProviderMetadata;
429
523
  dynamic: true;
430
524
  preliminary?: boolean;
431
525
  title?: string;
@@ -509,41 +603,91 @@ declare const text: () => Output<string, string>;
509
603
  * When the model generates a text response, it will return an object that matches the schema.
510
604
  *
511
605
  * @param schema - The schema of the object to generate.
606
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
607
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
512
608
  *
513
609
  * @returns An output specification for generating objects with the specified schema.
514
610
  */
515
- declare const object: <OBJECT>({ schema: inputSchema, }: {
611
+ declare const object: <OBJECT>({ schema: inputSchema, name, description, }: {
516
612
  schema: FlexibleSchema<OBJECT>;
613
+ /**
614
+ * Optional name of the output that should be generated.
615
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
616
+ */
617
+ name?: string;
618
+ /**
619
+ * Optional description of the output that should be generated.
620
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
621
+ */
622
+ description?: string;
517
623
  }) => Output<OBJECT, DeepPartial<OBJECT>>;
518
624
  /**
519
625
  * Output specification for array generation.
520
626
  * When the model generates a text response, it will return an array of elements.
521
627
  *
522
628
  * @param element - The schema of the array elements to generate.
629
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
630
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
523
631
  *
524
632
  * @returns An output specification for generating an array of elements.
525
633
  */
526
- declare const array: <ELEMENT>({ element: inputElementSchema, }: {
634
+ declare const array: <ELEMENT>({ element: inputElementSchema, name, description, }: {
527
635
  element: FlexibleSchema<ELEMENT>;
636
+ /**
637
+ * Optional name of the output that should be generated.
638
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
639
+ */
640
+ name?: string;
641
+ /**
642
+ * Optional description of the output that should be generated.
643
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
644
+ */
645
+ description?: string;
528
646
  }) => Output<Array<ELEMENT>, Array<ELEMENT>>;
529
647
  /**
530
648
  * Output specification for choice generation.
531
649
  * When the model generates a text response, it will return a one of the choice options.
532
650
  *
533
651
  * @param options - The available choices.
652
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
653
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
534
654
  *
535
655
  * @returns An output specification for generating a choice.
536
656
  */
537
- declare const choice: <CHOICE extends string>({ options: choiceOptions, }: {
657
+ declare const choice: <CHOICE extends string>({ options: choiceOptions, name, description, }: {
538
658
  options: Array<CHOICE>;
659
+ /**
660
+ * Optional name of the output that should be generated.
661
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
662
+ */
663
+ name?: string;
664
+ /**
665
+ * Optional description of the output that should be generated.
666
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
667
+ */
668
+ description?: string;
539
669
  }) => Output<CHOICE, CHOICE>;
540
670
  /**
541
671
  * Output specification for unstructured JSON generation.
542
672
  * When the model generates a text response, it will return a JSON object.
543
673
  *
674
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
675
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
676
+ *
544
677
  * @returns An output specification for generating JSON.
545
678
  */
546
- declare const json: () => Output<JSONValue$1, JSONValue$1>;
679
+ declare const json: ({ name, description, }?: {
680
+ /**
681
+ * Optional name of the output that should be generated.
682
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
683
+ */
684
+ name?: string;
685
+ /**
686
+ * Optional description of the output that should be generated.
687
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
688
+ */
689
+ description?: string;
690
+ }) => Output<JSONValue$1, JSONValue$1>;
547
691
 
548
692
  type output_Output<OUTPUT = any, PARTIAL = any> = Output<OUTPUT, PARTIAL>;
549
693
  declare const output_array: typeof array;
@@ -630,10 +774,14 @@ type StepResult<TOOLS extends ToolSet> = {
630
774
  */
631
775
  readonly dynamicToolResults: Array<DynamicToolResult>;
632
776
  /**
633
- The reason why the generation finished.
634
- */
777
+ * The unified reason why the generation finished.
778
+ */
635
779
  readonly finishReason: FinishReason;
636
780
  /**
781
+ * The raw reason why the generation finished (from the provider).
782
+ */
783
+ readonly rawFinishReason: string | undefined;
784
+ /**
637
785
  The token usage of the generated text.
638
786
  */
639
787
  readonly usage: LanguageModelUsage;
@@ -724,10 +872,14 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
724
872
  */
725
873
  readonly dynamicToolResults: Array<DynamicToolResult>;
726
874
  /**
727
- The reason why the generation finished.
875
+ * The unified reason why the generation finished.
728
876
  */
729
877
  readonly finishReason: FinishReason;
730
878
  /**
879
+ * The raw reason why the generation finished (from the provider).
880
+ */
881
+ readonly rawFinishReason: string | undefined;
882
+ /**
731
883
  The token usage of the last step.
732
884
  */
733
885
  readonly usage: LanguageModelUsage;
@@ -866,7 +1018,7 @@ type Prompt = {
866
1018
  /**
867
1019
  System message to include in the prompt. Can be used with `prompt` or `messages`.
868
1020
  */
869
- system?: string;
1021
+ system?: string | SystemModelMessage | Array<SystemModelMessage>;
870
1022
  } & ({
871
1023
  /**
872
1024
  A prompt. It can be either a text prompt or a list of messages.
@@ -957,28 +1109,81 @@ type DownloadFunction = (options: Array<{
957
1109
  } | null>>;
958
1110
 
959
1111
  /**
960
- Function that you can use to provide different settings for a step.
961
-
962
- @param options - The options for the step.
963
- @param options.steps - The steps that have been executed so far.
964
- @param options.stepNumber - The number of the step that is being executed.
965
- @param options.model - The model that is being used.
966
-
967
- @returns An object that contains the settings for the step.
968
- If you return undefined (or for undefined settings), the settings from the outer level will be used.
969
- */
1112
+ * Function that you can use to provide different settings for a step.
1113
+ *
1114
+ * @param options - The options for the step.
1115
+ * @param options.steps - The steps that have been executed so far.
1116
+ * @param options.stepNumber - The number of the step that is being executed.
1117
+ * @param options.model - The model that is being used.
1118
+ * @param options.messages - The messages that will be sent to the model for the current step.
1119
+ * @param options.experimental_context - The context passed via the experimental_context setting (experimental).
1120
+ *
1121
+ * @returns An object that contains the settings for the step.
1122
+ * If you return undefined (or for undefined settings), the settings from the outer level will be used.
1123
+ */
970
1124
  type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
1125
+ /**
1126
+ * The steps that have been executed so far.
1127
+ */
971
1128
  steps: Array<StepResult<NoInfer<TOOLS>>>;
1129
+ /**
1130
+ * The number of the step that is being executed.
1131
+ */
972
1132
  stepNumber: number;
1133
+ /**
1134
+ * The model instance that is being used for this step.
1135
+ */
973
1136
  model: LanguageModel;
1137
+ /**
1138
+ * The messages that will be sent to the model for the current step.
1139
+ */
974
1140
  messages: Array<ModelMessage>;
1141
+ /**
1142
+ * The context passed via the experimental_context setting (experimental).
1143
+ */
1144
+ experimental_context: unknown;
975
1145
  }) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
1146
+ /**
1147
+ * The result type returned by a {@link PrepareStepFunction},
1148
+ * allowing per-step overrides of model, tools, or messages.
1149
+ */
976
1150
  type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
1151
+ /**
1152
+ * Optionally override which LanguageModel instance is used for this step.
1153
+ */
977
1154
  model?: LanguageModel;
1155
+ /**
1156
+ * Optionally set which tool the model must call, or provide tool call configuration
1157
+ * for this step.
1158
+ */
978
1159
  toolChoice?: ToolChoice<NoInfer<TOOLS>>;
1160
+ /**
1161
+ * If provided, only these tools are enabled/available for this step.
1162
+ */
979
1163
  activeTools?: Array<keyof NoInfer<TOOLS>>;
980
- system?: string;
1164
+ /**
1165
+ * Optionally override the system message(s) sent to the model for this step.
1166
+ */
1167
+ system?: string | SystemModelMessage | Array<SystemModelMessage>;
1168
+ /**
1169
+ * Optionally override the full set of messages sent to the model
1170
+ * for this step.
1171
+ */
981
1172
  messages?: Array<ModelMessage>;
1173
+ /**
1174
+ * Context that is passed into tool execution. Experimental.
1175
+ *
1176
+ * Changing the context will affect the context in this step
1177
+ * and all subsequent steps.
1178
+ */
1179
+ experimental_context?: unknown;
1180
+ /**
1181
+ * Additional provider-specific options for this step.
1182
+ *
1183
+ * Can be used to pass provider-specific configuration such as
1184
+ * container IDs for Anthropic's code execution.
1185
+ */
1186
+ providerOptions?: ProviderOptions;
982
1187
  } | undefined;
983
1188
 
984
1189
  type StopCondition<TOOLS extends ToolSet> = (options: {
@@ -987,9 +1192,9 @@ type StopCondition<TOOLS extends ToolSet> = (options: {
987
1192
  declare function stepCountIs(stepCount: number): StopCondition<any>;
988
1193
  declare function hasToolCall(toolName: string): StopCondition<any>;
989
1194
 
990
- declare const symbol$d: unique symbol;
1195
+ declare const symbol$c: unique symbol;
991
1196
  declare class InvalidToolInputError extends AISDKError {
992
- private readonly [symbol$d];
1197
+ private readonly [symbol$c];
993
1198
  readonly toolName: string;
994
1199
  readonly toolInput: string;
995
1200
  constructor({ toolInput, toolName, cause, message, }: {
@@ -1001,9 +1206,9 @@ declare class InvalidToolInputError extends AISDKError {
1001
1206
  static isInstance(error: unknown): error is InvalidToolInputError;
1002
1207
  }
1003
1208
 
1004
- declare const symbol$c: unique symbol;
1209
+ declare const symbol$b: unique symbol;
1005
1210
  declare class NoSuchToolError extends AISDKError {
1006
- private readonly [symbol$c];
1211
+ private readonly [symbol$b];
1007
1212
  readonly toolName: string;
1008
1213
  readonly availableTools: string[] | undefined;
1009
1214
  constructor({ toolName, availableTools, message, }: {
@@ -1014,51 +1219,11 @@ declare class NoSuchToolError extends AISDKError {
1014
1219
  static isInstance(error: unknown): error is NoSuchToolError;
1015
1220
  }
1016
1221
 
1017
- /**
1018
- @deprecated Use `SystemModelMessage` instead.
1019
- */
1020
- type CoreSystemMessage = SystemModelMessage;
1021
1222
  declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>;
1022
- /**
1023
- @deprecated Use `systemModelMessageSchema` instead.
1024
- */
1025
- declare const coreSystemMessageSchema: z.ZodType<SystemModelMessage, unknown, z.core.$ZodTypeInternals<SystemModelMessage, unknown>>;
1026
- /**
1027
- @deprecated Use `UserModelMessage` instead.
1028
- */
1029
- type CoreUserMessage = UserModelMessage;
1030
1223
  declare const userModelMessageSchema: z.ZodType<UserModelMessage>;
1031
- /**
1032
- @deprecated Use `userModelMessageSchema` instead.
1033
- */
1034
- declare const coreUserMessageSchema: z.ZodType<UserModelMessage, unknown, z.core.$ZodTypeInternals<UserModelMessage, unknown>>;
1035
- /**
1036
- @deprecated Use `AssistantModelMessage` instead.
1037
- */
1038
- type CoreAssistantMessage = AssistantModelMessage;
1039
1224
  declare const assistantModelMessageSchema: z.ZodType<AssistantModelMessage>;
1040
- /**
1041
- @deprecated Use `assistantModelMessageSchema` instead.
1042
- */
1043
- declare const coreAssistantMessageSchema: z.ZodType<AssistantModelMessage, unknown, z.core.$ZodTypeInternals<AssistantModelMessage, unknown>>;
1044
- /**
1045
- @deprecated Use `ToolModelMessage` instead.
1046
- */
1047
- type CoreToolMessage = ToolModelMessage;
1048
1225
  declare const toolModelMessageSchema: z.ZodType<ToolModelMessage>;
1049
- /**
1050
- @deprecated Use `toolModelMessageSchema` instead.
1051
- */
1052
- declare const coreToolMessageSchema: z.ZodType<ToolModelMessage, unknown, z.core.$ZodTypeInternals<ToolModelMessage, unknown>>;
1053
- /**
1054
- @deprecated Use `ModelMessage` instead.
1055
- */
1056
- type CoreMessage = ModelMessage;
1057
1226
  declare const modelMessageSchema: z.ZodType<ModelMessage>;
1058
- /**
1059
- @deprecated Use `modelMessageSchema` instead.
1060
- */
1061
- declare const coreMessageSchema: z.ZodType<CoreMessage>;
1062
1227
 
1063
1228
  /**
1064
1229
  * A function that attempts to repair a tool call that failed to parse.
@@ -1074,7 +1239,7 @@ declare const coreMessageSchema: z.ZodType<CoreMessage>;
1074
1239
  * @param options.error - The error that occurred while parsing the tool call.
1075
1240
  */
1076
1241
  type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
1077
- system: string | undefined;
1242
+ system: string | SystemModelMessage | Array<SystemModelMessage> | undefined;
1078
1243
  messages: ModelMessage[];
1079
1244
  toolCall: LanguageModelV3ToolCall;
1080
1245
  tools: TOOLS;
@@ -1097,13 +1262,21 @@ Callback that is set using the `onFinish` option.
1097
1262
  */
1098
1263
  type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
1099
1264
  /**
1100
- Details for all steps.
1101
- */
1265
+ * Details for all steps.
1266
+ */
1102
1267
  readonly steps: StepResult<TOOLS>[];
1103
1268
  /**
1104
- Total usage for all steps. This is the sum of the usage of all steps.
1269
+ * Total usage for all steps. This is the sum of the usage of all steps.
1105
1270
  */
1106
1271
  readonly totalUsage: LanguageModelUsage;
1272
+ /**
1273
+ * Context that is passed into tool execution.
1274
+ *
1275
+ * Experimental (can break in patch releases).
1276
+ *
1277
+ * @default undefined
1278
+ */
1279
+ experimental_context: unknown;
1107
1280
  }) => PromiseLike<void> | void;
1108
1281
  /**
1109
1282
  Generate a text and call tools for a given prompt using a language model.
@@ -1294,288 +1467,58 @@ declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _int
1294
1467
  }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
1295
1468
 
1296
1469
  /**
1297
- A transformation that is applied to the stream.
1298
-
1299
- @param stopStream - A function that stops the source stream.
1300
- @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
1470
+ * Tool output when the tool execution has been denied (for static tools).
1301
1471
  */
1302
- type StreamTextTransform<TOOLS extends ToolSet> = (options: {
1303
- tools: TOOLS;
1304
- stopStream: () => void;
1305
- }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
1472
+ type StaticToolOutputDenied<TOOLS extends ToolSet> = ValueOf<{
1473
+ [NAME in keyof TOOLS]: {
1474
+ type: 'tool-output-denied';
1475
+ toolCallId: string;
1476
+ toolName: NAME & string;
1477
+ providerExecuted?: boolean;
1478
+ dynamic?: false | undefined;
1479
+ };
1480
+ }>;
1306
1481
  /**
1307
- Callback that is set using the `onError` option.
1482
+ * Tool output when the tool execution has been denied.
1483
+ */
1484
+ type TypedToolOutputDenied<TOOLS extends ToolSet> = StaticToolOutputDenied<TOOLS>;
1308
1485
 
1309
- @param event - The event that is passed to the callback.
1486
+ /**
1487
+ The data types that can be used in the UI message for the UI message data parts.
1310
1488
  */
1311
- type StreamTextOnErrorCallback = (event: {
1312
- error: unknown;
1313
- }) => PromiseLike<void> | void;
1489
+ type UIDataTypes = Record<string, unknown>;
1490
+ type UITool = {
1491
+ input: unknown;
1492
+ output: unknown | undefined;
1493
+ };
1314
1494
  /**
1315
- Callback that is set using the `onStepFinish` option.
1316
-
1317
- @param stepResult - The result of the step.
1495
+ * Infer the input and output types of a tool so it can be used as a UI tool.
1318
1496
  */
1319
- type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => PromiseLike<void> | void;
1497
+ type InferUITool<TOOL extends Tool> = {
1498
+ input: InferToolInput<TOOL>;
1499
+ output: InferToolOutput<TOOL>;
1500
+ };
1320
1501
  /**
1321
- Callback that is set using the `onChunk` option.
1322
-
1323
- @param event - The event that is passed to the callback.
1502
+ * Infer the input and output types of a tool set so it can be used as a UI tool set.
1324
1503
  */
1325
- type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
1326
- chunk: Extract<TextStreamPart<TOOLS>, {
1327
- type: 'text-delta' | 'reasoning-delta' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
1328
- }>;
1329
- }) => PromiseLike<void> | void;
1504
+ type InferUITools<TOOLS extends ToolSet> = {
1505
+ [NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>;
1506
+ };
1507
+ type UITools = Record<string, UITool>;
1330
1508
  /**
1331
- Callback that is set using the `onFinish` option.
1332
-
1333
- @param event - The event that is passed to the callback.
1509
+ AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
1334
1510
  */
1335
- type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
1511
+ interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes, TOOLS extends UITools = UITools> {
1336
1512
  /**
1337
- Details for all steps.
1338
- */
1339
- readonly steps: StepResult<TOOLS>[];
1513
+ A unique identifier for the message.
1514
+ */
1515
+ id: string;
1340
1516
  /**
1341
- Total usage for all steps. This is the sum of the usage of all steps.
1517
+ The role of the message.
1342
1518
  */
1343
- readonly totalUsage: LanguageModelUsage;
1344
- }) => PromiseLike<void> | void;
1345
- /**
1346
- Callback that is set using the `onAbort` option.
1347
-
1348
- @param event - The event that is passed to the callback.
1349
- */
1350
- type StreamTextOnAbortCallback<TOOLS extends ToolSet> = (event: {
1519
+ role: 'system' | 'user' | 'assistant';
1351
1520
  /**
1352
- Details for all previously finished steps.
1353
- */
1354
- readonly steps: StepResult<TOOLS>[];
1355
- }) => PromiseLike<void> | void;
1356
- /**
1357
- Generate a text and call tools for a given prompt using a language model.
1358
-
1359
- This function streams the output. If you do not want to stream the output, use `generateText` instead.
1360
-
1361
- @param model - The language model to use.
1362
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
1363
-
1364
- @param system - A system message that will be part of the prompt.
1365
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
1366
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
1367
-
1368
- @param maxOutputTokens - Maximum number of tokens to generate.
1369
- @param temperature - Temperature setting.
1370
- The value is passed through to the provider. The range depends on the provider and model.
1371
- It is recommended to set either `temperature` or `topP`, but not both.
1372
- @param topP - Nucleus sampling.
1373
- The value is passed through to the provider. The range depends on the provider and model.
1374
- It is recommended to set either `temperature` or `topP`, but not both.
1375
- @param topK - Only sample from the top K options for each subsequent token.
1376
- Used to remove "long tail" low probability responses.
1377
- Recommended for advanced use cases only. You usually only need to use temperature.
1378
- @param presencePenalty - Presence penalty setting.
1379
- It affects the likelihood of the model to repeat information that is already in the prompt.
1380
- The value is passed through to the provider. The range depends on the provider and model.
1381
- @param frequencyPenalty - Frequency penalty setting.
1382
- It affects the likelihood of the model to repeatedly use the same words or phrases.
1383
- The value is passed through to the provider. The range depends on the provider and model.
1384
- @param stopSequences - Stop sequences.
1385
- If set, the model will stop generating text when one of the stop sequences is generated.
1386
- @param seed - The seed (integer) to use for random sampling.
1387
- If set and supported by the model, calls will generate deterministic results.
1388
-
1389
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
1390
- @param abortSignal - An optional abort signal that can be used to cancel the call.
1391
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
1392
-
1393
- @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
1394
- @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
1395
- @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1396
- @param onFinish - Callback that is called when all steps are finished and the response is complete.
1397
-
1398
- @return
1399
- A result object for accessing different stream types and additional information.
1400
- */
1401
- declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_context, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1402
- /**
1403
- The language model to use.
1404
- */
1405
- model: LanguageModel;
1406
- /**
1407
- The tools that the model can call. The model needs to support calling tools.
1408
- */
1409
- tools?: TOOLS;
1410
- /**
1411
- The tool choice strategy. Default: 'auto'.
1412
- */
1413
- toolChoice?: ToolChoice<TOOLS>;
1414
- /**
1415
- Condition for stopping the generation when there are tool results in the last step.
1416
- When the condition is an array, any of the conditions can be met to stop the generation.
1417
-
1418
- @default stepCountIs(1)
1419
- */
1420
- stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
1421
- /**
1422
- Optional telemetry configuration (experimental).
1423
- */
1424
- experimental_telemetry?: TelemetrySettings;
1425
- /**
1426
- Additional provider-specific options. They are passed through
1427
- to the provider from the AI SDK and enable provider-specific
1428
- functionality that can be fully encapsulated in the provider.
1429
- */
1430
- providerOptions?: ProviderOptions;
1431
- /**
1432
- * @deprecated Use `activeTools` instead.
1433
- */
1434
- experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
1435
- /**
1436
- Limits the tools that are available for the model to call without
1437
- changing the tool call and result types in the result.
1438
- */
1439
- activeTools?: Array<keyof NoInfer<TOOLS>>;
1440
- /**
1441
- Optional specification for parsing structured outputs from the LLM response.
1442
- */
1443
- output?: OUTPUT;
1444
- /**
1445
- Optional specification for parsing structured outputs from the LLM response.
1446
-
1447
- @deprecated Use `output` instead.
1448
- */
1449
- experimental_output?: OUTPUT;
1450
- /**
1451
- Optional function that you can use to provide different settings for a step.
1452
-
1453
- @param options - The options for the step.
1454
- @param options.steps - The steps that have been executed so far.
1455
- @param options.stepNumber - The number of the step that is being executed.
1456
- @param options.model - The model that is being used.
1457
-
1458
- @returns An object that contains the settings for the step.
1459
- If you return undefined (or for undefined settings), the settings from the outer level will be used.
1460
- */
1461
- prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
1462
- /**
1463
- A function that attempts to repair a tool call that failed to parse.
1464
- */
1465
- experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
1466
- /**
1467
- Optional stream transformations.
1468
- They are applied in the order they are provided.
1469
- The stream transformations must maintain the stream structure for streamText to work correctly.
1470
- */
1471
- experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
1472
- /**
1473
- Custom download function to use for URLs.
1474
-
1475
- By default, files are downloaded if the model does not support the URL for the given media type.
1476
- */
1477
- experimental_download?: DownloadFunction | undefined;
1478
- /**
1479
- Whether to include raw chunks from the provider in the stream.
1480
- When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
1481
- This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
1482
- Defaults to false.
1483
- */
1484
- includeRawChunks?: boolean;
1485
- /**
1486
- Callback that is called for each chunk of the stream.
1487
- The stream processing will pause until the callback promise is resolved.
1488
- */
1489
- onChunk?: StreamTextOnChunkCallback<TOOLS>;
1490
- /**
1491
- Callback that is invoked when an error occurs during streaming.
1492
- You can use it to log errors.
1493
- The stream processing will pause until the callback promise is resolved.
1494
- */
1495
- onError?: StreamTextOnErrorCallback;
1496
- /**
1497
- Callback that is called when the LLM response and all request tool executions
1498
- (for tools that have an `execute` function) are finished.
1499
-
1500
- The usage is the combined usage of all steps.
1501
- */
1502
- onFinish?: StreamTextOnFinishCallback<TOOLS>;
1503
- onAbort?: StreamTextOnAbortCallback<TOOLS>;
1504
- /**
1505
- Callback that is called when each step (LLM call) is finished, including intermediate steps.
1506
- */
1507
- onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
1508
- /**
1509
- * Context that is passed into tool execution.
1510
- *
1511
- * Experimental (can break in patch releases).
1512
- *
1513
- * @default undefined
1514
- */
1515
- experimental_context?: unknown;
1516
- /**
1517
- Internal. For test use only. May change without notice.
1518
- */
1519
- _internal?: {
1520
- now?: () => number;
1521
- generateId?: IdGenerator;
1522
- currentDate?: () => Date;
1523
- };
1524
- }): StreamTextResult<TOOLS, OUTPUT>;
1525
-
1526
- /**
1527
- * Tool output when the tool execution has been denied (for static tools).
1528
- */
1529
- type StaticToolOutputDenied<TOOLS extends ToolSet> = ValueOf<{
1530
- [NAME in keyof TOOLS]: {
1531
- type: 'tool-output-denied';
1532
- toolCallId: string;
1533
- toolName: NAME & string;
1534
- providerExecuted?: boolean;
1535
- dynamic?: false | undefined;
1536
- };
1537
- }>;
1538
- /**
1539
- * Tool output when the tool execution has been denied.
1540
- */
1541
- type TypedToolOutputDenied<TOOLS extends ToolSet> = StaticToolOutputDenied<TOOLS>;
1542
-
1543
- /**
1544
- The data types that can be used in the UI message for the UI message data parts.
1545
- */
1546
- type UIDataTypes = Record<string, unknown>;
1547
- type UITool = {
1548
- input: unknown;
1549
- output: unknown | undefined;
1550
- };
1551
- /**
1552
- * Infer the input and output types of a tool so it can be used as a UI tool.
1553
- */
1554
- type InferUITool<TOOL extends Tool> = {
1555
- input: InferToolInput<TOOL>;
1556
- output: InferToolOutput<TOOL>;
1557
- };
1558
- /**
1559
- * Infer the input and output types of a tool set so it can be used as a UI tool set.
1560
- */
1561
- type InferUITools<TOOLS extends ToolSet> = {
1562
- [NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>;
1563
- };
1564
- type UITools = Record<string, UITool>;
1565
- /**
1566
- AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
1567
- */
1568
- interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes, TOOLS extends UITools = UITools> {
1569
- /**
1570
- A unique identifier for the message.
1571
- */
1572
- id: string;
1573
- /**
1574
- The role of the message.
1575
- */
1576
- role: 'system' | 'user' | 'assistant';
1577
- /**
1578
- The metadata of the message.
1521
+ The metadata of the message.
1579
1522
  */
1580
1523
  metadata?: METADATA;
1581
1524
  /**
@@ -1879,10 +1822,41 @@ declare function isFileUIPart(part: UIMessagePart<UIDataTypes, UITools>): part i
1879
1822
  * Type guard to check if a message part is a reasoning part.
1880
1823
  */
1881
1824
  declare function isReasoningUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is ReasoningUIPart;
1882
- declare function isToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS>;
1883
- declare function isToolOrDynamicToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS> | DynamicToolUIPart;
1884
- declare function getToolName<TOOLS extends UITools>(part: ToolUIPart<TOOLS>): keyof TOOLS;
1885
- declare function getToolOrDynamicToolName(part: ToolUIPart<UITools> | DynamicToolUIPart): string;
1825
+ /**
1826
+ * Check if a message part is a static tool part.
1827
+ *
1828
+ * Static tools are tools for which the types are known at development time.
1829
+ */
1830
+ declare function isStaticToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS>;
1831
+ /**
1832
+ * Check if a message part is a tool part.
1833
+ *
1834
+ * Tool parts are either static or dynamic tools.
1835
+ *
1836
+ * Use `isStaticToolUIPart` or `isDynamicToolUIPart` to check the type of the tool.
1837
+ */
1838
+ declare function isToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS> | DynamicToolUIPart;
1839
+ /**
1840
+ * @deprecated Use isToolUIPart instead.
1841
+ */
1842
+ declare const isToolOrDynamicToolUIPart: typeof isToolUIPart;
1843
+ /**
1844
+ * Returns the name of the static tool.
1845
+ *
1846
+ * The possible values are the keys of the tool set.
1847
+ */
1848
+ declare function getStaticToolName<TOOLS extends UITools>(part: ToolUIPart<TOOLS>): keyof TOOLS;
1849
+ /**
1850
+ * Returns the name of the tool (static or dynamic).
1851
+ *
1852
+ * This function will not restrict the name to the keys of the tool set.
1853
+ * If you need to restrict the name to the keys of the tool set, use `getStaticToolName` instead.
1854
+ */
1855
+ declare function getToolName(part: ToolUIPart<UITools> | DynamicToolUIPart): string;
1856
+ /**
1857
+ * @deprecated Use getToolName instead.
1858
+ */
1859
+ declare const getToolOrDynamicToolName: typeof getToolName;
1886
1860
  type InferUIMessageMetadata<T extends UIMessage> = T extends UIMessage<infer METADATA> ? METADATA : unknown;
1887
1861
  type InferUIMessageData<T extends UIMessage> = T extends UIMessage<unknown, infer DATA_TYPES> ? DATA_TYPES : UIDataTypes;
1888
1862
  type InferUIMessageTools<T extends UIMessage> = T extends UIMessage<unknown, UIDataTypes, infer TOOLS> ? TOOLS : UITools;
@@ -2008,7 +1982,7 @@ declare const uiMessageChunkSchema: _ai_sdk_provider_utils.LazySchema<{
2008
1982
  messageMetadata?: unknown;
2009
1983
  } | {
2010
1984
  type: "finish";
2011
- finishReason?: "unknown" | "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
1985
+ finishReason?: "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
2012
1986
  messageMetadata?: unknown;
2013
1987
  } | {
2014
1988
  type: "abort";
@@ -2245,98 +2219,104 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2245
2219
 
2246
2220
  Automatically consumes the stream.
2247
2221
  */
2248
- readonly content: Promise<Array<ContentPart<TOOLS>>>;
2222
+ readonly content: PromiseLike<Array<ContentPart<TOOLS>>>;
2249
2223
  /**
2250
2224
  The full text that has been generated by the last step.
2251
2225
 
2252
2226
  Automatically consumes the stream.
2253
2227
  */
2254
- readonly text: Promise<string>;
2228
+ readonly text: PromiseLike<string>;
2255
2229
  /**
2256
2230
  The full reasoning that the model has generated.
2257
2231
 
2258
2232
  Automatically consumes the stream.
2259
2233
  */
2260
- readonly reasoning: Promise<Array<ReasoningOutput>>;
2234
+ readonly reasoning: PromiseLike<Array<ReasoningOutput>>;
2261
2235
  /**
2262
2236
  The reasoning that has been generated by the last step.
2263
2237
 
2264
2238
  Automatically consumes the stream.
2265
2239
  */
2266
- readonly reasoningText: Promise<string | undefined>;
2240
+ readonly reasoningText: PromiseLike<string | undefined>;
2267
2241
  /**
2268
2242
  Files that have been generated by the model in the last step.
2269
2243
 
2270
2244
  Automatically consumes the stream.
2271
2245
  */
2272
- readonly files: Promise<GeneratedFile[]>;
2246
+ readonly files: PromiseLike<GeneratedFile[]>;
2273
2247
  /**
2274
2248
  Sources that have been used as references in the last step.
2275
2249
 
2276
2250
  Automatically consumes the stream.
2277
2251
  */
2278
- readonly sources: Promise<Source[]>;
2252
+ readonly sources: PromiseLike<Source[]>;
2279
2253
  /**
2280
2254
  The tool calls that have been executed in the last step.
2281
2255
 
2282
2256
  Automatically consumes the stream.
2283
2257
  */
2284
- readonly toolCalls: Promise<TypedToolCall<TOOLS>[]>;
2258
+ readonly toolCalls: PromiseLike<TypedToolCall<TOOLS>[]>;
2285
2259
  /**
2286
2260
  The static tool calls that have been executed in the last step.
2287
2261
 
2288
2262
  Automatically consumes the stream.
2289
2263
  */
2290
- readonly staticToolCalls: Promise<StaticToolCall<TOOLS>[]>;
2264
+ readonly staticToolCalls: PromiseLike<StaticToolCall<TOOLS>[]>;
2291
2265
  /**
2292
2266
  The dynamic tool calls that have been executed in the last step.
2293
2267
 
2294
2268
  Automatically consumes the stream.
2295
2269
  */
2296
- readonly dynamicToolCalls: Promise<DynamicToolCall[]>;
2270
+ readonly dynamicToolCalls: PromiseLike<DynamicToolCall[]>;
2297
2271
  /**
2298
2272
  The static tool results that have been generated in the last step.
2299
2273
 
2300
2274
  Automatically consumes the stream.
2301
2275
  */
2302
- readonly staticToolResults: Promise<StaticToolResult<TOOLS>[]>;
2276
+ readonly staticToolResults: PromiseLike<StaticToolResult<TOOLS>[]>;
2303
2277
  /**
2304
2278
  The dynamic tool results that have been generated in the last step.
2305
2279
 
2306
2280
  Automatically consumes the stream.
2307
2281
  */
2308
- readonly dynamicToolResults: Promise<DynamicToolResult[]>;
2282
+ readonly dynamicToolResults: PromiseLike<DynamicToolResult[]>;
2309
2283
  /**
2310
2284
  The tool results that have been generated in the last step.
2311
2285
 
2312
2286
  Automatically consumes the stream.
2313
2287
  */
2314
- readonly toolResults: Promise<TypedToolResult<TOOLS>[]>;
2288
+ readonly toolResults: PromiseLike<TypedToolResult<TOOLS>[]>;
2315
2289
  /**
2316
- The reason why the generation finished. Taken from the last step.
2317
-
2318
- Automatically consumes the stream.
2319
- */
2320
- readonly finishReason: Promise<FinishReason>;
2290
+ * The unified finish reason why the generation finished. Taken from the last step.
2291
+ *
2292
+ * Automatically consumes the stream.
2293
+ */
2294
+ readonly finishReason: PromiseLike<FinishReason>;
2295
+ /**
2296
+ * The raw reason why the generation finished (from the provider). Taken from the last step.
2297
+ *
2298
+ * Automatically consumes the stream.
2299
+ */
2300
+ readonly rawFinishReason: PromiseLike<string | undefined>;
2321
2301
  /**
2322
2302
  The token usage of the last step.
2323
2303
 
2324
2304
  Automatically consumes the stream.
2325
2305
  */
2326
- readonly usage: Promise<LanguageModelUsage>;
2306
+ readonly usage: PromiseLike<LanguageModelUsage>;
2327
2307
  /**
2328
2308
  The total token usage of the generated response.
2329
2309
  When there are multiple steps, the usage is the sum of all step usages.
2330
2310
 
2331
2311
  Automatically consumes the stream.
2332
2312
  */
2333
- readonly totalUsage: Promise<LanguageModelUsage>;
2313
+ readonly totalUsage: PromiseLike<LanguageModelUsage>;
2334
2314
  /**
2335
2315
  Warnings from the model provider (e.g. unsupported settings) for the first step.
2336
2316
 
2337
2317
  Automatically consumes the stream.
2338
2318
  */
2339
- readonly warnings: Promise<CallWarning[] | undefined>;
2319
+ readonly warnings: PromiseLike<CallWarning[] | undefined>;
2340
2320
  /**
2341
2321
  Details for all steps.
2342
2322
  You can use this to get information about intermediate steps,
@@ -2344,19 +2324,19 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2344
2324
 
2345
2325
  Automatically consumes the stream.
2346
2326
  */
2347
- readonly steps: Promise<Array<StepResult<TOOLS>>>;
2327
+ readonly steps: PromiseLike<Array<StepResult<TOOLS>>>;
2348
2328
  /**
2349
2329
  Additional request information from the last step.
2350
2330
 
2351
2331
  Automatically consumes the stream.
2352
2332
  */
2353
- readonly request: Promise<LanguageModelRequestMetadata>;
2333
+ readonly request: PromiseLike<LanguageModelRequestMetadata>;
2354
2334
  /**
2355
2335
  Additional response information from the last step.
2356
2336
 
2357
2337
  Automatically consumes the stream.
2358
2338
  */
2359
- readonly response: Promise<LanguageModelResponseMetadata & {
2339
+ readonly response: PromiseLike<LanguageModelResponseMetadata & {
2360
2340
  /**
2361
2341
  The response messages that were generated during the call. It consists of an assistant message,
2362
2342
  potentially containing tool calls.
@@ -2372,7 +2352,7 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2372
2352
  Metadata is passed through from the provider to the AI SDK and
2373
2353
  enables provider-specific results that can be fully encapsulated in the provider.
2374
2354
  */
2375
- readonly providerMetadata: Promise<ProviderMetadata | undefined>;
2355
+ readonly providerMetadata: PromiseLike<ProviderMetadata | undefined>;
2376
2356
  /**
2377
2357
  A text stream that returns only the generated text deltas. You can use it
2378
2358
  as either an AsyncIterable or a ReadableStream. When an error occurs, the
@@ -2399,7 +2379,7 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2399
2379
  /**
2400
2380
  * The complete parsed output. It uses the `output` specification.
2401
2381
  */
2402
- readonly output: Promise<InferCompleteOutput<OUTPUT>>;
2382
+ readonly output: PromiseLike<InferCompleteOutput<OUTPUT>>;
2403
2383
  /**
2404
2384
  Consumes the stream without processing the parts.
2405
2385
  This is useful to force the stream to finish.
@@ -2408,7 +2388,7 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2408
2388
 
2409
2389
  If an error occurs, it is passed to the optional `onError` callback.
2410
2390
  */
2411
- consumeStream(options?: ConsumeStreamOptions): Promise<void>;
2391
+ consumeStream(options?: ConsumeStreamOptions): PromiseLike<void>;
2412
2392
  /**
2413
2393
  Converts the result to a UI message stream.
2414
2394
 
@@ -2418,112 +2398,355 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2418
2398
  /**
2419
2399
  *Writes UI message stream output to a Node.js response-like object.
2420
2400
  */
2421
- pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(response: ServerResponse, options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): void;
2401
+ pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(response: ServerResponse, options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): void;
2402
+ /**
2403
+ Writes text delta output to a Node.js response-like object.
2404
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
2405
+ writes each text delta as a separate chunk.
2406
+
2407
+ @param response A Node.js response-like object (ServerResponse).
2408
+ @param init Optional headers, status code, and status text.
2409
+ */
2410
+ pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
2411
+ /**
2412
+ Converts the result to a streamed response object with a stream data part stream.
2413
+
2414
+ @return A response object.
2415
+ */
2416
+ toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): Response;
2417
+ /**
2418
+ Creates a simple text stream response.
2419
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
2420
+ Non-text-delta events are ignored.
2421
+ @param init Optional headers, status code, and status text.
2422
+ */
2423
+ toTextStreamResponse(init?: ResponseInit): Response;
2424
+ }
2425
+ type TextStreamPart<TOOLS extends ToolSet> = {
2426
+ type: 'text-start';
2427
+ id: string;
2428
+ providerMetadata?: ProviderMetadata;
2429
+ } | {
2430
+ type: 'text-end';
2431
+ id: string;
2432
+ providerMetadata?: ProviderMetadata;
2433
+ } | {
2434
+ type: 'text-delta';
2435
+ id: string;
2436
+ providerMetadata?: ProviderMetadata;
2437
+ text: string;
2438
+ } | {
2439
+ type: 'reasoning-start';
2440
+ id: string;
2441
+ providerMetadata?: ProviderMetadata;
2442
+ } | {
2443
+ type: 'reasoning-end';
2444
+ id: string;
2445
+ providerMetadata?: ProviderMetadata;
2446
+ } | {
2447
+ type: 'reasoning-delta';
2448
+ providerMetadata?: ProviderMetadata;
2449
+ id: string;
2450
+ text: string;
2451
+ } | {
2452
+ type: 'tool-input-start';
2453
+ id: string;
2454
+ toolName: string;
2455
+ providerMetadata?: ProviderMetadata;
2456
+ providerExecuted?: boolean;
2457
+ dynamic?: boolean;
2458
+ title?: string;
2459
+ } | {
2460
+ type: 'tool-input-end';
2461
+ id: string;
2462
+ providerMetadata?: ProviderMetadata;
2463
+ } | {
2464
+ type: 'tool-input-delta';
2465
+ id: string;
2466
+ delta: string;
2467
+ providerMetadata?: ProviderMetadata;
2468
+ } | ({
2469
+ type: 'source';
2470
+ } & Source) | {
2471
+ type: 'file';
2472
+ file: GeneratedFile;
2473
+ } | ({
2474
+ type: 'tool-call';
2475
+ } & TypedToolCall<TOOLS>) | ({
2476
+ type: 'tool-result';
2477
+ } & TypedToolResult<TOOLS>) | ({
2478
+ type: 'tool-error';
2479
+ } & TypedToolError<TOOLS>) | ({
2480
+ type: 'tool-output-denied';
2481
+ } & StaticToolOutputDenied<TOOLS>) | ToolApprovalRequestOutput<TOOLS> | {
2482
+ type: 'start-step';
2483
+ request: LanguageModelRequestMetadata;
2484
+ warnings: CallWarning[];
2485
+ } | {
2486
+ type: 'finish-step';
2487
+ response: LanguageModelResponseMetadata;
2488
+ usage: LanguageModelUsage;
2489
+ finishReason: FinishReason;
2490
+ rawFinishReason: string | undefined;
2491
+ providerMetadata: ProviderMetadata | undefined;
2492
+ } | {
2493
+ type: 'start';
2494
+ } | {
2495
+ type: 'finish';
2496
+ finishReason: FinishReason;
2497
+ rawFinishReason: string | undefined;
2498
+ totalUsage: LanguageModelUsage;
2499
+ } | {
2500
+ type: 'abort';
2501
+ } | {
2502
+ type: 'error';
2503
+ error: unknown;
2504
+ } | {
2505
+ type: 'raw';
2506
+ rawValue: unknown;
2507
+ };
2508
+
2509
+ /**
2510
+ A transformation that is applied to the stream.
2511
+
2512
+ @param stopStream - A function that stops the source stream.
2513
+ @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
2514
+ */
2515
+ type StreamTextTransform<TOOLS extends ToolSet> = (options: {
2516
+ tools: TOOLS;
2517
+ stopStream: () => void;
2518
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
2519
+ /**
2520
+ Callback that is set using the `onError` option.
2521
+
2522
+ @param event - The event that is passed to the callback.
2523
+ */
2524
+ type StreamTextOnErrorCallback = (event: {
2525
+ error: unknown;
2526
+ }) => PromiseLike<void> | void;
2527
+ /**
2528
+ Callback that is set using the `onStepFinish` option.
2529
+
2530
+ @param stepResult - The result of the step.
2531
+ */
2532
+ type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => PromiseLike<void> | void;
2533
+ /**
2534
+ Callback that is set using the `onChunk` option.
2535
+
2536
+ @param event - The event that is passed to the callback.
2537
+ */
2538
+ type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
2539
+ chunk: Extract<TextStreamPart<TOOLS>, {
2540
+ type: 'text-delta' | 'reasoning-delta' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
2541
+ }>;
2542
+ }) => PromiseLike<void> | void;
2543
+ /**
2544
+ Callback that is set using the `onFinish` option.
2545
+
2546
+ @param event - The event that is passed to the callback.
2547
+ */
2548
+ type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
2549
+ /**
2550
+ * Details for all steps.
2551
+ */
2552
+ readonly steps: StepResult<TOOLS>[];
2553
+ /**
2554
+ * Total usage for all steps. This is the sum of the usage of all steps.
2555
+ */
2556
+ readonly totalUsage: LanguageModelUsage;
2557
+ /**
2558
+ * Context that is passed into tool execution.
2559
+ *
2560
+ * Experimental (can break in patch releases).
2561
+ *
2562
+ * @default undefined
2563
+ */
2564
+ experimental_context: unknown;
2565
+ }) => PromiseLike<void> | void;
2566
+ /**
2567
+ Callback that is set using the `onAbort` option.
2568
+
2569
+ @param event - The event that is passed to the callback.
2570
+ */
2571
+ type StreamTextOnAbortCallback<TOOLS extends ToolSet> = (event: {
2572
+ /**
2573
+ Details for all previously finished steps.
2574
+ */
2575
+ readonly steps: StepResult<TOOLS>[];
2576
+ }) => PromiseLike<void> | void;
2577
+ /**
2578
+ Generate a text and call tools for a given prompt using a language model.
2579
+
2580
+ This function streams the output. If you do not want to stream the output, use `generateText` instead.
2581
+
2582
+ @param model - The language model to use.
2583
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
2584
+
2585
+ @param system - A system message that will be part of the prompt.
2586
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2587
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2588
+
2589
+ @param maxOutputTokens - Maximum number of tokens to generate.
2590
+ @param temperature - Temperature setting.
2591
+ The value is passed through to the provider. The range depends on the provider and model.
2592
+ It is recommended to set either `temperature` or `topP`, but not both.
2593
+ @param topP - Nucleus sampling.
2594
+ The value is passed through to the provider. The range depends on the provider and model.
2595
+ It is recommended to set either `temperature` or `topP`, but not both.
2596
+ @param topK - Only sample from the top K options for each subsequent token.
2597
+ Used to remove "long tail" low probability responses.
2598
+ Recommended for advanced use cases only. You usually only need to use temperature.
2599
+ @param presencePenalty - Presence penalty setting.
2600
+ It affects the likelihood of the model to repeat information that is already in the prompt.
2601
+ The value is passed through to the provider. The range depends on the provider and model.
2602
+ @param frequencyPenalty - Frequency penalty setting.
2603
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
2604
+ The value is passed through to the provider. The range depends on the provider and model.
2605
+ @param stopSequences - Stop sequences.
2606
+ If set, the model will stop generating text when one of the stop sequences is generated.
2607
+ @param seed - The seed (integer) to use for random sampling.
2608
+ If set and supported by the model, calls will generate deterministic results.
2609
+
2610
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2611
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
2612
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2613
+
2614
+ @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
2615
+ @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
2616
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2617
+ @param onFinish - Callback that is called when all steps are finished and the response is complete.
2618
+
2619
+ @return
2620
+ A result object for accessing different stream types and additional information.
2621
+ */
2622
+ declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_context, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
2623
+ /**
2624
+ The language model to use.
2625
+ */
2626
+ model: LanguageModel;
2627
+ /**
2628
+ The tools that the model can call. The model needs to support calling tools.
2629
+ */
2630
+ tools?: TOOLS;
2631
+ /**
2632
+ The tool choice strategy. Default: 'auto'.
2633
+ */
2634
+ toolChoice?: ToolChoice<TOOLS>;
2635
+ /**
2636
+ Condition for stopping the generation when there are tool results in the last step.
2637
+ When the condition is an array, any of the conditions can be met to stop the generation.
2638
+
2639
+ @default stepCountIs(1)
2640
+ */
2641
+ stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
2642
+ /**
2643
+ Optional telemetry configuration (experimental).
2644
+ */
2645
+ experimental_telemetry?: TelemetrySettings;
2646
+ /**
2647
+ Additional provider-specific options. They are passed through
2648
+ to the provider from the AI SDK and enable provider-specific
2649
+ functionality that can be fully encapsulated in the provider.
2650
+ */
2651
+ providerOptions?: ProviderOptions;
2652
+ /**
2653
+ * @deprecated Use `activeTools` instead.
2654
+ */
2655
+ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
2656
+ /**
2657
+ Limits the tools that are available for the model to call without
2658
+ changing the tool call and result types in the result.
2659
+ */
2660
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
2661
+ /**
2662
+ Optional specification for parsing structured outputs from the LLM response.
2663
+ */
2664
+ output?: OUTPUT;
2665
+ /**
2666
+ Optional specification for parsing structured outputs from the LLM response.
2667
+
2668
+ @deprecated Use `output` instead.
2669
+ */
2670
+ experimental_output?: OUTPUT;
2671
+ /**
2672
+ Optional function that you can use to provide different settings for a step.
2673
+
2674
+ @param options - The options for the step.
2675
+ @param options.steps - The steps that have been executed so far.
2676
+ @param options.stepNumber - The number of the step that is being executed.
2677
+ @param options.model - The model that is being used.
2678
+
2679
+ @returns An object that contains the settings for the step.
2680
+ If you return undefined (or for undefined settings), the settings from the outer level will be used.
2681
+ */
2682
+ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
2683
+ /**
2684
+ A function that attempts to repair a tool call that failed to parse.
2685
+ */
2686
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
2687
+ /**
2688
+ Optional stream transformations.
2689
+ They are applied in the order they are provided.
2690
+ The stream transformations must maintain the stream structure for streamText to work correctly.
2691
+ */
2692
+ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
2693
+ /**
2694
+ Custom download function to use for URLs.
2695
+
2696
+ By default, files are downloaded if the model does not support the URL for the given media type.
2697
+ */
2698
+ experimental_download?: DownloadFunction | undefined;
2699
+ /**
2700
+ Whether to include raw chunks from the provider in the stream.
2701
+ When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
2702
+ This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
2703
+ Defaults to false.
2704
+ */
2705
+ includeRawChunks?: boolean;
2706
+ /**
2707
+ Callback that is called for each chunk of the stream.
2708
+ The stream processing will pause until the callback promise is resolved.
2709
+ */
2710
+ onChunk?: StreamTextOnChunkCallback<TOOLS>;
2711
+ /**
2712
+ Callback that is invoked when an error occurs during streaming.
2713
+ You can use it to log errors.
2714
+ The stream processing will pause until the callback promise is resolved.
2715
+ */
2716
+ onError?: StreamTextOnErrorCallback;
2717
+ /**
2718
+ Callback that is called when the LLM response and all request tool executions
2719
+ (for tools that have an `execute` function) are finished.
2720
+
2721
+ The usage is the combined usage of all steps.
2722
+ */
2723
+ onFinish?: StreamTextOnFinishCallback<TOOLS>;
2724
+ onAbort?: StreamTextOnAbortCallback<TOOLS>;
2422
2725
  /**
2423
- Writes text delta output to a Node.js response-like object.
2424
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
2425
- writes each text delta as a separate chunk.
2426
-
2427
- @param response A Node.js response-like object (ServerResponse).
2428
- @param init Optional headers, status code, and status text.
2429
- */
2430
- pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
2726
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
2727
+ */
2728
+ onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
2431
2729
  /**
2432
- Converts the result to a streamed response object with a stream data part stream.
2433
-
2434
- @return A response object.
2435
- */
2436
- toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): Response;
2730
+ * Context that is passed into tool execution.
2731
+ *
2732
+ * Experimental (can break in patch releases).
2733
+ *
2734
+ * @default undefined
2735
+ */
2736
+ experimental_context?: unknown;
2437
2737
  /**
2438
- Creates a simple text stream response.
2439
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
2440
- Non-text-delta events are ignored.
2441
- @param init Optional headers, status code, and status text.
2442
- */
2443
- toTextStreamResponse(init?: ResponseInit): Response;
2444
- }
2445
- type TextStreamPart<TOOLS extends ToolSet> = {
2446
- type: 'text-start';
2447
- id: string;
2448
- providerMetadata?: ProviderMetadata;
2449
- } | {
2450
- type: 'text-end';
2451
- id: string;
2452
- providerMetadata?: ProviderMetadata;
2453
- } | {
2454
- type: 'text-delta';
2455
- id: string;
2456
- providerMetadata?: ProviderMetadata;
2457
- text: string;
2458
- } | {
2459
- type: 'reasoning-start';
2460
- id: string;
2461
- providerMetadata?: ProviderMetadata;
2462
- } | {
2463
- type: 'reasoning-end';
2464
- id: string;
2465
- providerMetadata?: ProviderMetadata;
2466
- } | {
2467
- type: 'reasoning-delta';
2468
- providerMetadata?: ProviderMetadata;
2469
- id: string;
2470
- text: string;
2471
- } | {
2472
- type: 'tool-input-start';
2473
- id: string;
2474
- toolName: string;
2475
- providerMetadata?: ProviderMetadata;
2476
- providerExecuted?: boolean;
2477
- dynamic?: boolean;
2478
- title?: string;
2479
- } | {
2480
- type: 'tool-input-end';
2481
- id: string;
2482
- providerMetadata?: ProviderMetadata;
2483
- } | {
2484
- type: 'tool-input-delta';
2485
- id: string;
2486
- delta: string;
2487
- providerMetadata?: ProviderMetadata;
2488
- } | ({
2489
- type: 'source';
2490
- } & Source) | {
2491
- type: 'file';
2492
- file: GeneratedFile;
2493
- } | ({
2494
- type: 'tool-call';
2495
- } & TypedToolCall<TOOLS>) | ({
2496
- type: 'tool-result';
2497
- } & TypedToolResult<TOOLS>) | ({
2498
- type: 'tool-error';
2499
- } & TypedToolError<TOOLS>) | ({
2500
- type: 'tool-output-denied';
2501
- } & StaticToolOutputDenied<TOOLS>) | ToolApprovalRequestOutput<TOOLS> | {
2502
- type: 'start-step';
2503
- request: LanguageModelRequestMetadata;
2504
- warnings: CallWarning[];
2505
- } | {
2506
- type: 'finish-step';
2507
- response: LanguageModelResponseMetadata;
2508
- usage: LanguageModelUsage;
2509
- finishReason: FinishReason;
2510
- providerMetadata: ProviderMetadata | undefined;
2511
- } | {
2512
- type: 'start';
2513
- } | {
2514
- type: 'finish';
2515
- finishReason: FinishReason;
2516
- totalUsage: LanguageModelUsage;
2517
- } | {
2518
- type: 'abort';
2519
- } | {
2520
- type: 'error';
2521
- error: unknown;
2522
- } | {
2523
- type: 'raw';
2524
- rawValue: unknown;
2525
- };
2738
+ Internal. For test use only. May change without notice.
2739
+ */
2740
+ _internal?: {
2741
+ now?: () => number;
2742
+ generateId?: IdGenerator;
2743
+ currentDate?: () => Date;
2744
+ };
2745
+ }): StreamTextResult<TOOLS, OUTPUT>;
2526
2746
 
2747
+ /**
2748
+ * Parameters for calling an agent.
2749
+ */
2527
2750
  type AgentCallParameters<CALL_OPTIONS> = ([CALL_OPTIONS] extends [never] ? {
2528
2751
  options?: never;
2529
2752
  } : {
@@ -2554,7 +2777,23 @@ type AgentCallParameters<CALL_OPTIONS> = ([CALL_OPTIONS] extends [never] ? {
2554
2777
  * You can either use `prompt` or `messages` but not both.
2555
2778
  */
2556
2779
  prompt?: never;
2557
- });
2780
+ }) & {
2781
+ /**
2782
+ * Abort signal.
2783
+ */
2784
+ abortSignal?: AbortSignal;
2785
+ };
2786
+ /**
2787
+ * Parameters for streaming an output from an agent.
2788
+ */
2789
+ type AgentStreamParameters<CALL_OPTIONS, TOOLS extends ToolSet> = AgentCallParameters<CALL_OPTIONS> & {
2790
+ /**
2791
+ * Optional stream transformations.
2792
+ * They are applied in the order they are provided.
2793
+ * The stream transformations must maintain the stream structure for streamText to work correctly.
2794
+ */
2795
+ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
2796
+ };
2558
2797
  /**
2559
2798
  * An Agent receives a prompt (text or messages) and generates or streams an output
2560
2799
  * that consists of steps, tool calls, data parts, etc.
@@ -2583,7 +2822,7 @@ interface Agent<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends
2583
2822
  /**
2584
2823
  * Streams an output from the agent (streaming).
2585
2824
  */
2586
- stream(options: AgentCallParameters<CALL_OPTIONS>): PromiseLike<StreamTextResult<TOOLS, OUTPUT>>;
2825
+ stream(options: AgentStreamParameters<CALL_OPTIONS, TOOLS>): PromiseLike<StreamTextResult<TOOLS, OUTPUT>>;
2587
2826
  }
2588
2827
 
2589
2828
  /**
@@ -2593,13 +2832,21 @@ Callback that is set using the `onFinish` option.
2593
2832
  */
2594
2833
  type ToolLoopAgentOnFinishCallback<TOOLS extends ToolSet = {}> = (event: StepResult<TOOLS> & {
2595
2834
  /**
2596
- Details for all steps.
2597
- */
2835
+ * Details for all steps.
2836
+ */
2598
2837
  readonly steps: StepResult<TOOLS>[];
2599
2838
  /**
2600
- Total usage for all steps. This is the sum of the usage of all steps.
2839
+ * Total usage for all steps. This is the sum of the usage of all steps.
2601
2840
  */
2602
2841
  readonly totalUsage: LanguageModelUsage;
2842
+ /**
2843
+ * Context that is passed into tool calls.
2844
+ *
2845
+ * Experimental (can break in patch releases).
2846
+ *
2847
+ * @default undefined
2848
+ */
2849
+ experimental_context?: unknown;
2603
2850
  }) => PromiseLike<void> | void;
2604
2851
 
2605
2852
  /**
@@ -2612,15 +2859,17 @@ type ToolLoopAgentOnStepFinishCallback<TOOLS extends ToolSet = {}> = (stepResult
2612
2859
  /**
2613
2860
  * Configuration options for an agent.
2614
2861
  */
2615
- type ToolLoopAgentSettings<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> = CallSettings & {
2862
+ type ToolLoopAgentSettings<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> = Omit<CallSettings, 'abortSignal'> & {
2616
2863
  /**
2617
2864
  * The id of the agent.
2618
2865
  */
2619
2866
  id?: string;
2620
2867
  /**
2621
2868
  * The instructions for the agent.
2869
+ *
2870
+ * It can be a string, or, if you need to pass additional provider options (e.g. for caching), a `SystemModelMessage`.
2622
2871
  */
2623
- instructions?: string;
2872
+ instructions?: string | SystemModelMessage | Array<SystemModelMessage>;
2624
2873
  /**
2625
2874
  The language model to use.
2626
2875
  */
@@ -2684,6 +2933,12 @@ type ToolLoopAgentSettings<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUT
2684
2933
  */
2685
2934
  experimental_context?: unknown;
2686
2935
  /**
2936
+ Custom download function to use for URLs.
2937
+
2938
+ By default, files are downloaded if the model does not support the URL for the given media type.
2939
+ */
2940
+ experimental_download?: DownloadFunction | undefined;
2941
+ /**
2687
2942
  * The schema for the call options.
2688
2943
  */
2689
2944
  callOptionsSchema?: FlexibleSchema<CALL_OPTIONS>;
@@ -2692,7 +2947,7 @@ type ToolLoopAgentSettings<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUT
2692
2947
  *
2693
2948
  * You can use this to have templates based on call options.
2694
2949
  */
2695
- prepareCall?: (options: AgentCallParameters<CALL_OPTIONS> & Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context'>) => MaybePromiseLike<Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context'> & Omit<Prompt, 'system'>>;
2950
+ prepareCall?: (options: AgentCallParameters<CALL_OPTIONS> & Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context' | 'experimental_download'>) => MaybePromiseLike<Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context' | 'experimental_download'> & Omit<Prompt, 'system'>>;
2696
2951
  };
2697
2952
 
2698
2953
  /**
@@ -2722,11 +2977,11 @@ declare class ToolLoopAgent<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OU
2722
2977
  /**
2723
2978
  * Generates an output from the agent (non-streaming).
2724
2979
  */
2725
- generate(options: AgentCallParameters<CALL_OPTIONS>): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
2980
+ generate({ abortSignal, ...options }: AgentCallParameters<CALL_OPTIONS>): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
2726
2981
  /**
2727
2982
  * Streams an output from the agent (streaming).
2728
2983
  */
2729
- stream(options: AgentCallParameters<CALL_OPTIONS>): Promise<StreamTextResult<TOOLS, OUTPUT>>;
2984
+ stream({ abortSignal, experimental_transform, ...options }: AgentStreamParameters<CALL_OPTIONS, TOOLS>): Promise<StreamTextResult<TOOLS, OUTPUT>>;
2730
2985
  }
2731
2986
 
2732
2987
  /**
@@ -2737,20 +2992,22 @@ type InferAgentTools<AGENT> = AGENT extends Agent<any, infer TOOLS, any> ? TOOLS
2737
2992
  /**
2738
2993
  * Infer the UI message type of an agent.
2739
2994
  */
2740
- type InferAgentUIMessage<AGENT> = UIMessage<never, never, InferUITools<InferAgentTools<AGENT>>>;
2995
+ type InferAgentUIMessage<AGENT, MESSAGE_METADATA = unknown> = UIMessage<MESSAGE_METADATA, never, InferUITools<InferAgentTools<AGENT>>>;
2741
2996
 
2742
2997
  /**
2743
2998
  * Runs the agent and returns a response object with a UI message stream.
2744
2999
  *
2745
3000
  * @param agent - The agent to run.
2746
- * @param messages - The input UI messages.
3001
+ * @param uiMessages - The input UI messages.
2747
3002
  *
2748
3003
  * @returns The response object.
2749
3004
  */
2750
3005
  declare function createAgentUIStreamResponse<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ headers, status, statusText, consumeSseStream, ...options }: {
2751
3006
  agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
2752
- messages: unknown[];
3007
+ uiMessages: unknown[];
3008
+ abortSignal?: AbortSignal;
2753
3009
  options?: CALL_OPTIONS;
3010
+ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
2754
3011
  } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<Response>;
2755
3012
 
2756
3013
  declare const getOriginalFetch: () => typeof fetch;
@@ -3083,11 +3340,7 @@ declare function convertToModelMessages<UI_MESSAGE extends UIMessage>(messages:
3083
3340
  tools?: ToolSet;
3084
3341
  ignoreIncompleteToolCalls?: boolean;
3085
3342
  convertDataPart?: (part: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => TextPart | FilePart | undefined;
3086
- }): ModelMessage[];
3087
- /**
3088
- @deprecated Use `convertToModelMessages` instead.
3089
- */
3090
- declare const convertToCoreMessages: typeof convertToModelMessages;
3343
+ }): Promise<ModelMessage[]>;
3091
3344
 
3092
3345
  type PrepareSendMessagesRequest<UI_MESSAGE extends UIMessage> = (options: {
3093
3346
  id: string;
@@ -3415,38 +3668,45 @@ declare const UI_MESSAGE_STREAM_HEADERS: {
3415
3668
  * Runs the agent and stream the output as a UI message stream.
3416
3669
  *
3417
3670
  * @param agent - The agent to run.
3418
- * @param messages - The input UI messages.
3671
+ * @param uiMessages - The input UI messages.
3672
+ * @param abortSignal - The abort signal. Optional.
3673
+ * @param options - The options for the agent.
3674
+ * @param experimental_transform - The stream transformations. Optional.
3419
3675
  *
3420
3676
  * @returns The UI message stream.
3421
3677
  */
3422
- declare function createAgentUIStream<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ agent, messages, options, ...uiMessageStreamOptions }: {
3678
+ declare function createAgentUIStream<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ agent, uiMessages, options, abortSignal, experimental_transform, ...uiMessageStreamOptions }: {
3423
3679
  agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
3424
- messages: unknown[];
3680
+ uiMessages: unknown[];
3681
+ abortSignal?: AbortSignal;
3425
3682
  options?: CALL_OPTIONS;
3683
+ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
3426
3684
  } & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<AsyncIterableStream<InferUIMessageChunk<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>>>;
3427
3685
 
3428
3686
  /**
3429
3687
  * Pipes the agent UI message stream to a Node.js ServerResponse object.
3430
3688
  *
3431
3689
  * @param agent - The agent to run.
3432
- * @param messages - The input UI messages.
3690
+ * @param uiMessages - The input UI messages.
3433
3691
  */
3434
3692
  declare function pipeAgentUIStreamToResponse<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ response, headers, status, statusText, consumeSseStream, ...options }: {
3435
3693
  response: ServerResponse;
3436
3694
  agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
3437
- messages: unknown[];
3695
+ uiMessages: unknown[];
3696
+ abortSignal?: AbortSignal;
3438
3697
  options?: CALL_OPTIONS;
3698
+ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
3439
3699
  } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<void>;
3440
3700
 
3441
3701
  /**
3442
3702
  The result of an `embed` call.
3443
3703
  It contains the embedding, the value, and additional information.
3444
3704
  */
3445
- interface EmbedResult<VALUE> {
3705
+ interface EmbedResult {
3446
3706
  /**
3447
3707
  The value that was embedded.
3448
3708
  */
3449
- readonly value: VALUE;
3709
+ readonly value: string;
3450
3710
  /**
3451
3711
  The embedding of the value.
3452
3712
  */
@@ -3456,6 +3716,10 @@ interface EmbedResult<VALUE> {
3456
3716
  */
3457
3717
  readonly usage: EmbeddingModelUsage;
3458
3718
  /**
3719
+ Warnings for the call, e.g. unsupported settings.
3720
+ */
3721
+ readonly warnings: Array<Warning>;
3722
+ /**
3459
3723
  Optional provider-specific metadata.
3460
3724
  */
3461
3725
  readonly providerMetadata?: ProviderMetadata;
@@ -3486,15 +3750,15 @@ Embed a value using an embedding model. The type of the value is defined by the
3486
3750
 
3487
3751
  @returns A result object that contains the embedding, the value, and additional information.
3488
3752
  */
3489
- declare function embed<VALUE = string>({ model: modelArg, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
3753
+ declare function embed({ model: modelArg, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
3490
3754
  /**
3491
3755
  The embedding model to use.
3492
3756
  */
3493
- model: EmbeddingModel<VALUE>;
3757
+ model: EmbeddingModel;
3494
3758
  /**
3495
3759
  The value that should be embedded.
3496
3760
  */
3497
- value: VALUE;
3761
+ value: string;
3498
3762
  /**
3499
3763
  Maximum number of retries per embedding model call. Set to 0 to disable retries.
3500
3764
 
@@ -3520,17 +3784,17 @@ declare function embed<VALUE = string>({ model: modelArg, value, providerOptions
3520
3784
  * Optional telemetry configuration (experimental).
3521
3785
  */
3522
3786
  experimental_telemetry?: TelemetrySettings;
3523
- }): Promise<EmbedResult<VALUE>>;
3787
+ }): Promise<EmbedResult>;
3524
3788
 
3525
3789
  /**
3526
3790
  The result of a `embedMany` call.
3527
3791
  It contains the embeddings, the values, and additional information.
3528
3792
  */
3529
- interface EmbedManyResult<VALUE> {
3793
+ interface EmbedManyResult {
3530
3794
  /**
3531
3795
  The values that were embedded.
3532
3796
  */
3533
- readonly values: Array<VALUE>;
3797
+ readonly values: Array<string>;
3534
3798
  /**
3535
3799
  The embeddings. They are in the same order as the values.
3536
3800
  */
@@ -3540,6 +3804,10 @@ interface EmbedManyResult<VALUE> {
3540
3804
  */
3541
3805
  readonly usage: EmbeddingModelUsage;
3542
3806
  /**
3807
+ Warnings for the call, e.g. unsupported settings.
3808
+ */
3809
+ readonly warnings: Array<Warning>;
3810
+ /**
3543
3811
  Optional provider-specific metadata.
3544
3812
  */
3545
3813
  readonly providerMetadata?: ProviderMetadata;
@@ -3574,15 +3842,15 @@ has a limit on how many embeddings can be generated in a single call.
3574
3842
 
3575
3843
  @returns A result object that contains the embeddings, the value, and additional information.
3576
3844
  */
3577
- declare function embedMany<VALUE = string>({ model: modelArg, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
3845
+ declare function embedMany({ model: modelArg, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
3578
3846
  /**
3579
3847
  The embedding model to use.
3580
3848
  */
3581
- model: EmbeddingModel<VALUE>;
3849
+ model: EmbeddingModel;
3582
3850
  /**
3583
3851
  The values that should be embedded.
3584
3852
  */
3585
- values: Array<VALUE>;
3853
+ values: Array<string>;
3586
3854
  /**
3587
3855
  Maximum number of retries per embedding model call. Set to 0 to disable retries.
3588
3856
 
@@ -3614,11 +3882,11 @@ declare function embedMany<VALUE = string>({ model: modelArg, values, maxParalle
3614
3882
  * @default Infinity
3615
3883
  */
3616
3884
  maxParallelCalls?: number;
3617
- }): Promise<EmbedManyResult<VALUE>>;
3885
+ }): Promise<EmbedManyResult>;
3618
3886
 
3619
- declare const symbol$b: unique symbol;
3887
+ declare const symbol$a: unique symbol;
3620
3888
  declare class InvalidArgumentError extends AISDKError {
3621
- private readonly [symbol$b];
3889
+ private readonly [symbol$a];
3622
3890
  readonly parameter: string;
3623
3891
  readonly value: unknown;
3624
3892
  constructor({ parameter, value, message, }: {
@@ -3687,7 +3955,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
3687
3955
  file: GeneratedFile;
3688
3956
  } | {
3689
3957
  type: 'stream-start';
3690
- warnings: LanguageModelV3CallWarning[];
3958
+ warnings: SharedV3Warning[];
3691
3959
  } | {
3692
3960
  type: 'response-metadata';
3693
3961
  id?: string;
@@ -3696,6 +3964,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
3696
3964
  } | {
3697
3965
  type: 'finish';
3698
3966
  finishReason: FinishReason;
3967
+ rawFinishReason: string | undefined;
3699
3968
  usage: LanguageModelUsage;
3700
3969
  providerMetadata?: ProviderMetadata;
3701
3970
  } | {
@@ -3706,9 +3975,9 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
3706
3975
  rawValue: unknown;
3707
3976
  };
3708
3977
 
3709
- declare const symbol$a: unique symbol;
3978
+ declare const symbol$9: unique symbol;
3710
3979
  declare class InvalidStreamPartError extends AISDKError {
3711
- private readonly [symbol$a];
3980
+ private readonly [symbol$9];
3712
3981
  readonly chunk: SingleRequestTextStreamPart<any>;
3713
3982
  constructor({ chunk, message, }: {
3714
3983
  chunk: SingleRequestTextStreamPart<any>;
@@ -3717,7 +3986,7 @@ declare class InvalidStreamPartError extends AISDKError {
3717
3986
  static isInstance(error: unknown): error is InvalidStreamPartError;
3718
3987
  }
3719
3988
 
3720
- declare const symbol$9: unique symbol;
3989
+ declare const symbol$8: unique symbol;
3721
3990
  /**
3722
3991
  Thrown when no image could be generated. This can have multiple causes:
3723
3992
 
@@ -3725,7 +3994,7 @@ Thrown when no image could be generated. This can have multiple causes:
3725
3994
  - The model generated a response that could not be parsed.
3726
3995
  */
3727
3996
  declare class NoImageGeneratedError extends AISDKError {
3728
- private readonly [symbol$9];
3997
+ private readonly [symbol$8];
3729
3998
  /**
3730
3999
  The response metadata for each call.
3731
4000
  */
@@ -3738,7 +4007,7 @@ declare class NoImageGeneratedError extends AISDKError {
3738
4007
  static isInstance(error: unknown): error is NoImageGeneratedError;
3739
4008
  }
3740
4009
 
3741
- declare const symbol$8: unique symbol;
4010
+ declare const symbol$7: unique symbol;
3742
4011
  /**
3743
4012
  Thrown when no object could be generated. This can have several causes:
3744
4013
 
@@ -3751,7 +4020,7 @@ The error contains the following properties:
3751
4020
  - `text`: The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
3752
4021
  */
3753
4022
  declare class NoObjectGeneratedError extends AISDKError {
3754
- private readonly [symbol$8];
4023
+ private readonly [symbol$7];
3755
4024
  /**
3756
4025
  The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
3757
4026
  */
@@ -3779,12 +4048,12 @@ declare class NoObjectGeneratedError extends AISDKError {
3779
4048
  static isInstance(error: unknown): error is NoObjectGeneratedError;
3780
4049
  }
3781
4050
 
3782
- declare const symbol$7: unique symbol;
4051
+ declare const symbol$6: unique symbol;
3783
4052
  /**
3784
4053
  Thrown when no LLM output was generated, e.g. because of errors.
3785
4054
  */
3786
4055
  declare class NoOutputGeneratedError extends AISDKError {
3787
- private readonly [symbol$7];
4056
+ private readonly [symbol$6];
3788
4057
  constructor({ message, cause, }?: {
3789
4058
  message?: string;
3790
4059
  cause?: Error;
@@ -3802,9 +4071,9 @@ declare class NoSpeechGeneratedError extends AISDKError {
3802
4071
  });
3803
4072
  }
3804
4073
 
3805
- declare const symbol$6: unique symbol;
4074
+ declare const symbol$5: unique symbol;
3806
4075
  declare class ToolCallRepairError extends AISDKError {
3807
- private readonly [symbol$6];
4076
+ private readonly [symbol$5];
3808
4077
  readonly originalError: NoSuchToolError | InvalidToolInputError;
3809
4078
  constructor({ cause, originalError, message, }: {
3810
4079
  message?: string;
@@ -3828,9 +4097,9 @@ declare class UnsupportedModelVersionError extends AISDKError {
3828
4097
  });
3829
4098
  }
3830
4099
 
3831
- declare const symbol$5: unique symbol;
4100
+ declare const symbol$4: unique symbol;
3832
4101
  declare class InvalidDataContentError extends AISDKError {
3833
- private readonly [symbol$5];
4102
+ private readonly [symbol$4];
3834
4103
  readonly content: unknown;
3835
4104
  constructor({ content, cause, message, }: {
3836
4105
  content: unknown;
@@ -3840,9 +4109,9 @@ declare class InvalidDataContentError extends AISDKError {
3840
4109
  static isInstance(error: unknown): error is InvalidDataContentError;
3841
4110
  }
3842
4111
 
3843
- declare const symbol$4: unique symbol;
4112
+ declare const symbol$3: unique symbol;
3844
4113
  declare class InvalidMessageRoleError extends AISDKError {
3845
- private readonly [symbol$4];
4114
+ private readonly [symbol$3];
3846
4115
  readonly role: string;
3847
4116
  constructor({ role, message, }: {
3848
4117
  role: string;
@@ -3851,9 +4120,9 @@ declare class InvalidMessageRoleError extends AISDKError {
3851
4120
  static isInstance(error: unknown): error is InvalidMessageRoleError;
3852
4121
  }
3853
4122
 
3854
- declare const symbol$3: unique symbol;
4123
+ declare const symbol$2: unique symbol;
3855
4124
  declare class MessageConversionError extends AISDKError {
3856
- private readonly [symbol$3];
4125
+ private readonly [symbol$2];
3857
4126
  readonly originalMessage: Omit<UIMessage, 'id'>;
3858
4127
  constructor({ originalMessage, message, }: {
3859
4128
  originalMessage: Omit<UIMessage, 'id'>;
@@ -3862,22 +4131,6 @@ declare class MessageConversionError extends AISDKError {
3862
4131
  static isInstance(error: unknown): error is MessageConversionError;
3863
4132
  }
3864
4133
 
3865
- declare const symbol$2: unique symbol;
3866
- declare class DownloadError extends AISDKError {
3867
- private readonly [symbol$2];
3868
- readonly url: string;
3869
- readonly statusCode?: number;
3870
- readonly statusText?: string;
3871
- constructor({ url, statusCode, statusText, cause, message, }: {
3872
- url: string;
3873
- statusCode?: number;
3874
- statusText?: string;
3875
- message?: string;
3876
- cause?: unknown;
3877
- });
3878
- static isInstance(error: unknown): error is DownloadError;
3879
- }
3880
-
3881
4134
  declare const symbol$1: unique symbol;
3882
4135
  type RetryErrorReason = 'maxRetriesExceeded' | 'errorNotRetryable' | 'abort';
3883
4136
  declare class RetryError extends AISDKError {
@@ -3909,7 +4162,7 @@ interface GenerateImageResult {
3909
4162
  /**
3910
4163
  Warnings for the call, e.g. unsupported settings.
3911
4164
  */
3912
- readonly warnings: Array<ImageGenerationWarning>;
4165
+ readonly warnings: Array<Warning>;
3913
4166
  /**
3914
4167
  Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
3915
4168
  */
@@ -3925,6 +4178,11 @@ interface GenerateImageResult {
3925
4178
  readonly usage: ImageModelUsage;
3926
4179
  }
3927
4180
 
4181
+ type GenerateImagePrompt = string | {
4182
+ images: Array<DataContent>;
4183
+ text?: string;
4184
+ mask?: DataContent;
4185
+ };
3928
4186
  /**
3929
4187
  Generates images using an image model.
3930
4188
 
@@ -3942,15 +4200,15 @@ as body parameters.
3942
4200
 
3943
4201
  @returns A result object that contains the generated images.
3944
4202
  */
3945
- declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4203
+ declare function generateImage({ model: modelArg, prompt: promptArg, n, maxImagesPerCall, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
3946
4204
  /**
3947
4205
  The image model to use.
3948
4206
  */
3949
- model: ImageModelV3;
4207
+ model: ImageModel;
3950
4208
  /**
3951
4209
  The prompt that should be used to generate the image.
3952
4210
  */
3953
- prompt: string;
4211
+ prompt: GenerateImagePrompt;
3954
4212
  /**
3955
4213
  Number of images to generate.
3956
4214
  */
@@ -4003,6 +4261,16 @@ declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspec
4003
4261
  headers?: Record<string, string>;
4004
4262
  }): Promise<GenerateImageResult>;
4005
4263
 
4264
+ /**
4265
+ * @deprecated Use `generateImage` instead.
4266
+ */
4267
+ declare const experimental_generateImage: typeof generateImage;
4268
+
4269
+ /**
4270
+ * @deprecated Use `GenerateImageResult` instead.
4271
+ */
4272
+ type Experimental_GenerateImageResult = GenerateImageResult;
4273
+
4006
4274
  /**
4007
4275
  The result of a `generateObject` call.
4008
4276
  */
@@ -4128,13 +4396,14 @@ functionality that can be fully encapsulated in the provider.
4128
4396
 
4129
4397
  @returns
4130
4398
  A result object that contains the generated object, the finish reason, the token usage, and additional information.
4399
+
4400
+ @deprecated Use `generateText` with an `output` setting instead.
4131
4401
  */
4132
4402
  declare function generateObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
4133
4403
  /**
4134
4404
  The enum values that the model should use.
4135
4405
  */
4136
4406
  enum: Array<RESULT>;
4137
- mode?: 'json';
4138
4407
  output: 'enum';
4139
4408
  } : OUTPUT extends 'no-schema' ? {} : {
4140
4409
  /**
@@ -4153,20 +4422,6 @@ Used by some providers for additional LLM guidance, e.g.
4153
4422
  via tool or schema description.
4154
4423
  */
4155
4424
  schemaDescription?: string;
4156
- /**
4157
- The mode to use for object generation.
4158
-
4159
- The schema is converted into a JSON schema and used in one of the following ways
4160
-
4161
- - 'auto': The provider will choose the best mode for the model.
4162
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
4163
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
4164
-
4165
- Please note that most providers do not support all modes.
4166
-
4167
- Default and recommended: 'auto' (best mode for the model).
4168
- */
4169
- mode?: 'auto' | 'json' | 'tool';
4170
4425
  }) & {
4171
4426
  output?: OUTPUT;
4172
4427
  /**
@@ -4472,13 +4727,14 @@ functionality that can be fully encapsulated in the provider.
4472
4727
 
4473
4728
  @returns
4474
4729
  A result object for accessing the partial object stream and additional information.
4730
+
4731
+ @deprecated Use `streamText` with an `output` setting instead.
4475
4732
  */
4476
4733
  declare function streamObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
4477
4734
  /**
4478
4735
  The enum values that the model should use.
4479
4736
  */
4480
4737
  enum: Array<RESULT>;
4481
- mode?: 'json';
4482
4738
  output: 'enum';
4483
4739
  } : OUTPUT extends 'no-schema' ? {} : {
4484
4740
  /**
@@ -4497,20 +4753,6 @@ Used by some providers for additional LLM guidance, e.g.
4497
4753
  via tool or schema description.
4498
4754
  */
4499
4755
  schemaDescription?: string;
4500
- /**
4501
- The mode to use for object generation.
4502
-
4503
- The schema is converted into a JSON schema and used in one of the following ways
4504
-
4505
- - 'auto': The provider will choose the best mode for the model.
4506
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
4507
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
4508
-
4509
- Please note that most providers do not support all modes.
4510
-
4511
- Default and recommended: 'auto' (best mode for the model).
4512
- */
4513
- mode?: 'auto' | 'json' | 'tool';
4514
4756
  }) & {
4515
4757
  output?: OUTPUT;
4516
4758
  /**
@@ -4580,7 +4822,7 @@ interface SpeechResult {
4580
4822
  /**
4581
4823
  Warnings for the call, e.g. unsupported settings.
4582
4824
  */
4583
- readonly warnings: Array<SpeechWarning>;
4825
+ readonly warnings: Array<Warning>;
4584
4826
  /**
4585
4827
  Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
4586
4828
  */
@@ -4668,10 +4910,30 @@ declare function generateSpeech({ model, text, voice, outputFormat, instructions
4668
4910
  headers?: Record<string, string>;
4669
4911
  }): Promise<SpeechResult>;
4670
4912
 
4671
- type Warning = LanguageModelV3CallWarning | ImageModelV3CallWarning | SpeechModelV3CallWarning | TranscriptionModelV3CallWarning | SharedV3Warning;
4913
+ /**
4914
+ * A function for logging warnings.
4915
+ *
4916
+ * You can assign it to the `AI_SDK_LOG_WARNINGS` global variable to use it as the default warning logger.
4917
+ *
4918
+ * @example
4919
+ * ```ts
4920
+ * globalThis.AI_SDK_LOG_WARNINGS = (options) => {
4921
+ * console.log('WARNINGS:', options.warnings, options.provider, options.model);
4922
+ * };
4923
+ * ```
4924
+ */
4672
4925
  type LogWarningsFunction = (options: {
4926
+ /**
4927
+ * The warnings returned by the model provider.
4928
+ */
4673
4929
  warnings: Warning[];
4930
+ /**
4931
+ * The provider id used for the call.
4932
+ */
4674
4933
  provider: string;
4934
+ /**
4935
+ * The model id used for the call.
4936
+ */
4675
4937
  model: string;
4676
4938
  }) => void;
4677
4939
 
@@ -4680,8 +4942,8 @@ type LogWarningsFunction = (options: {
4680
4942
  */
4681
4943
  declare function defaultEmbeddingSettingsMiddleware({ settings, }: {
4682
4944
  settings: Partial<{
4683
- headers?: EmbeddingModelCallOptions<string>['headers'];
4684
- providerOptions?: EmbeddingModelCallOptions<string>['providerOptions'];
4945
+ headers?: EmbeddingModelV3CallOptions['headers'];
4946
+ providerOptions?: EmbeddingModelV3CallOptions['providerOptions'];
4685
4947
  }>;
4686
4948
  }): EmbeddingModelMiddleware;
4687
4949
 
@@ -4725,6 +4987,49 @@ declare function extractReasoningMiddleware({ tagName, separator, startWithReaso
4725
4987
  */
4726
4988
  declare function simulateStreamingMiddleware(): LanguageModelMiddleware;
4727
4989
 
4990
+ /**
4991
+ * Middleware that appends input examples to tool descriptions.
4992
+ *
4993
+ * This is useful for providers that don't natively support the `inputExamples`
4994
+ * property. The middleware serializes examples into the tool's description text.
4995
+ *
4996
+ * @param options - Configuration options for the middleware.
4997
+ * @param options.prefix - A prefix to prepend before the examples. Default: 'Input Examples:'
4998
+ * @param options.format - Optional custom formatter for each example.
4999
+ * Receives the example object and its index. Default: JSON.stringify(example.input)
5000
+ * @param options.remove - Whether to remove the inputExamples property
5001
+ * after adding them to the description. Default: true
5002
+ *
5003
+ * @example
5004
+ * ```ts
5005
+ * import { wrapLanguageModel, addToolInputExamplesMiddleware } from 'ai';
5006
+ *
5007
+ * const model = wrapLanguageModel({
5008
+ * model: yourModel,
5009
+ * middleware: addToolInputExamplesMiddleware(),
5010
+ * });
5011
+ * ```
5012
+ */
5013
+ declare function addToolInputExamplesMiddleware({ prefix, format, remove, }?: {
5014
+ /**
5015
+ * A prefix to prepend before the examples.
5016
+ */
5017
+ prefix?: string;
5018
+ /**
5019
+ * Optional custom formatter for each example.
5020
+ * Receives the example object and its index.
5021
+ * Default: JSON.stringify(example.input)
5022
+ */
5023
+ format?: (example: {
5024
+ input: JSONObject;
5025
+ }, index: number) => string;
5026
+ /**
5027
+ * Whether to remove the inputExamples property after adding them to the description.
5028
+ * Default: true
5029
+ */
5030
+ remove?: boolean;
5031
+ }): LanguageModelMiddleware;
5032
+
4728
5033
  /**
4729
5034
  * Wraps a LanguageModelV3 instance with middleware functionality.
4730
5035
  * This function allows you to apply middleware to transform parameters,
@@ -4757,11 +5062,11 @@ declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, p
4757
5062
  * @returns A new EmbeddingModelV3 instance with middleware applied.
4758
5063
  */
4759
5064
  declare const wrapEmbeddingModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
4760
- model: EmbeddingModelV3<string>;
5065
+ model: EmbeddingModelV3;
4761
5066
  middleware: EmbeddingModelMiddleware | EmbeddingModelMiddleware[];
4762
5067
  modelId?: string;
4763
5068
  providerId?: string;
4764
- }) => EmbeddingModelV3<string>;
5069
+ }) => EmbeddingModelV3;
4765
5070
 
4766
5071
  /**
4767
5072
  * Wraps a ProviderV3 instance with middleware functionality.
@@ -4784,19 +5089,19 @@ declare function wrapProvider({ provider, languageModelMiddleware, }: {
4784
5089
  *
4785
5090
  * @param {Object} options - The options for creating the custom provider.
4786
5091
  * @param {Record<string, LanguageModel>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModel instances.
4787
- * @param {Record<string, EmbeddingModel<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel<string> instances.
5092
+ * @param {Record<string, EmbeddingModel>} [options.embeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel instances.
4788
5093
  * @param {Record<string, ImageModel>} [options.imageModels] - A record of image models, where keys are model IDs and values are ImageModel instances.
4789
5094
  * @param {Record<string, TranscriptionModel>} [options.transcriptionModels] - A record of transcription models, where keys are model IDs and values are TranscriptionModel instances.
4790
5095
  * @param {Record<string, SpeechModel>} [options.speechModels] - A record of speech models, where keys are model IDs and values are SpeechModel instances.
4791
- * @param {Record<string, RerankingModel<string>>} [options.rerankingModels] - A record of reranking models, where keys are model IDs and values are RerankingModel<string> instances.
5096
+ * @param {Record<string, RerankingModel>} [options.rerankingModels] - A record of reranking models, where keys are model IDs and values are RerankingModel instances.
4792
5097
  * @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
4793
- * @returns {Provider} A Provider object with languageModel, textEmbeddingModel, imageModel, transcriptionModel, and speechModel methods.
5098
+ * @returns {Provider} A Provider object with languageModel, embeddingModel, imageModel, transcriptionModel, and speechModel methods.
4794
5099
  *
4795
5100
  * @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
4796
5101
  */
4797
- declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV3>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV3<string>>, IMAGE_MODELS extends Record<string, ImageModelV3>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV3>, SPEECH_MODELS extends Record<string, SpeechModelV3>, RERANKING_MODELS extends Record<string, RerankingModelV3>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, rerankingModels, fallbackProvider: fallbackProviderArg, }: {
5102
+ declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV3>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV3>, IMAGE_MODELS extends Record<string, ImageModelV3>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV3>, SPEECH_MODELS extends Record<string, SpeechModelV3>, RERANKING_MODELS extends Record<string, RerankingModelV3>>({ languageModels, embeddingModels, imageModels, transcriptionModels, speechModels, rerankingModels, fallbackProvider: fallbackProviderArg, }: {
4798
5103
  languageModels?: LANGUAGE_MODELS;
4799
- textEmbeddingModels?: EMBEDDING_MODELS;
5104
+ embeddingModels?: EMBEDDING_MODELS;
4800
5105
  imageModels?: IMAGE_MODELS;
4801
5106
  transcriptionModels?: TRANSCRIPTION_MODELS;
4802
5107
  speechModels?: SPEECH_MODELS;
@@ -4804,7 +5109,7 @@ declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageM
4804
5109
  fallbackProvider?: ProviderV3 | ProviderV2;
4805
5110
  }): ProviderV3 & {
4806
5111
  languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV3;
4807
- textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV3<string>;
5112
+ embeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV3;
4808
5113
  imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV3;
4809
5114
  transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV3;
4810
5115
  rerankingModel(modelId: ExtractModelId<RERANKING_MODELS>): RerankingModelV3;
@@ -4823,7 +5128,7 @@ declare class NoSuchProviderError extends NoSuchModelError {
4823
5128
  readonly availableProviders: string[];
4824
5129
  constructor({ modelId, modelType, providerId, availableProviders, message, }: {
4825
5130
  modelId: string;
4826
- modelType: 'languageModel' | 'textEmbeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel' | 'rerankingModel';
5131
+ modelType: 'languageModel' | 'embeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel' | 'rerankingModel';
4827
5132
  providerId: string;
4828
5133
  availableProviders: string[];
4829
5134
  message?: string;
@@ -4835,8 +5140,8 @@ type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T :
4835
5140
  interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV3> = Record<string, ProviderV3>, SEPARATOR extends string = ':'> {
4836
5141
  languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV3;
4837
5142
  languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV3;
4838
- textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV3<string>;
4839
- textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV3<string>;
5143
+ embeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['embeddingModel']>>[0]>}` : never): EmbeddingModelV3;
5144
+ embeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV3;
4840
5145
  imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV3;
4841
5146
  imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV3;
4842
5147
  transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV3;
@@ -5035,7 +5340,7 @@ interface TranscriptionResult {
5035
5340
  /**
5036
5341
  Warnings for the call, e.g. unsupported settings.
5037
5342
  */
5038
- readonly warnings: Array<TranscriptionWarning>;
5343
+ readonly warnings: Array<Warning>;
5039
5344
  /**
5040
5345
  Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
5041
5346
  */
@@ -5120,4 +5425,4 @@ declare global {
5120
5425
  var AI_SDK_LOG_WARNINGS: LogWarningsFunction | undefined | false;
5121
5426
  }
5122
5427
 
5123
- export { AbstractChat, Agent, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelMiddleware, EmbeddingModelUsage, ErrorHandler, ToolLoopAgent as Experimental_Agent, ToolLoopAgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, Warning as Experimental_Warning, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnFinishCallback, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImageModelUsage, InferAgentUIMessage, InferCompleteOutput as InferGenerateOutput, InferPartialOutput as InferStreamOutput, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RerankResult, RerankingModel, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolApprovalRequestOutput, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolLoopAgent, ToolLoopAgentOnFinishCallback, ToolLoopAgentOnStepFinishCallback, ToolLoopAgentSettings, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createAgentUIStream, createAgentUIStreamResponse, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultEmbeddingSettingsMiddleware, defaultSettingsMiddleware, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDataUIPart, isDeepEqualData, isFileUIPart, isReasoningUIPart, isTextUIPart, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeAgentUIStreamToResponse, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, pruneMessages, readUIMessageStream, rerank, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapEmbeddingModel, wrapLanguageModel, wrapProvider };
5428
+ export { AbstractChat, Agent, AgentCallParameters, AgentStreamParameters, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, ContentPart, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelMiddleware, EmbeddingModelUsage, ErrorHandler, ToolLoopAgent as Experimental_Agent, ToolLoopAgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FileUIPart, FinishReason, GenerateImageResult, GenerateObjectResult, GenerateTextOnFinishCallback, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageModelProviderMetadata, ImageModelResponseMetadata, ImageModelUsage, InferAgentUIMessage, InferCompleteOutput as InferGenerateOutput, InferPartialOutput as InferStreamOutput, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LogWarningsFunction, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RerankResult, RerankingModel, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolApprovalRequestOutput, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolLoopAgent, ToolLoopAgentOnFinishCallback, ToolLoopAgentOnStepFinishCallback, ToolLoopAgentSettings, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, Warning, addToolInputExamplesMiddleware, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToModelMessages, cosineSimilarity, createAgentUIStream, createAgentUIStreamResponse, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultEmbeddingSettingsMiddleware, defaultSettingsMiddleware, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateImage, generateObject, generateText, getStaticToolName, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDataUIPart, isDeepEqualData, isFileUIPart, isReasoningUIPart, isStaticToolUIPart, isTextUIPart, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeAgentUIStreamToResponse, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, pruneMessages, readUIMessageStream, rerank, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapEmbeddingModel, wrapLanguageModel, wrapProvider };