ai 5.1.0-beta.8 → 6.0.0-beta.29
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +181 -0
- package/README.md +128 -44
- package/dist/index.d.mts +559 -388
- package/dist/index.d.ts +559 -388
- package/dist/index.js +985 -491
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +850 -362
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +4 -4
- package/dist/internal/index.d.ts +4 -4
- package/dist/internal/index.js +42 -14
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +42 -14
- package/dist/internal/index.mjs.map +1 -1
- package/dist/test/index.d.mts +51 -51
- package/dist/test/index.d.ts +51 -51
- package/dist/test/index.js +25 -25
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +20 -20
- package/dist/test/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.d.ts
CHANGED
|
@@ -1,164 +1,20 @@
|
|
|
1
1
|
export { createGateway, gateway } from '@ai-sdk/gateway';
|
|
2
|
-
import {
|
|
3
|
-
export { AssistantContent, AssistantModelMessage, DataContent, FilePart, IdGenerator, ImagePart, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
|
|
4
|
-
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
|
2
|
+
import { Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, ModelMessage, Schema, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, InferSchema, FlexibleSchema, DataContent, Validator, StandardSchemaV1, Resolvable, FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
|
+
export { AssistantContent, AssistantModelMessage, DataContent, FilePart, IdGenerator, ImagePart, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
|
|
5
4
|
import * as _ai_sdk_provider from '@ai-sdk/provider';
|
|
6
|
-
import { EmbeddingModelV3, EmbeddingModelV3Embedding,
|
|
5
|
+
import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, ImageModelV3, ImageModelV3CallWarning, ImageModelV3ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, LanguageModelV3CallWarning, LanguageModelV3Source, LanguageModelV3Middleware, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV3CallWarning, TranscriptionModelV3, TranscriptionModelV3CallWarning, LanguageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV3, ProviderV2, NoSuchModelError, JSONObject } from '@ai-sdk/provider';
|
|
7
6
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
|
8
|
-
import
|
|
7
|
+
import { ServerResponse } from 'node:http';
|
|
9
8
|
import * as z4 from 'zod/v4';
|
|
10
9
|
import { z } from 'zod/v4';
|
|
11
|
-
import
|
|
10
|
+
import * as z3 from 'zod/v3';
|
|
11
|
+
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
|
12
12
|
import { ServerResponse as ServerResponse$1 } from 'http';
|
|
13
13
|
|
|
14
|
-
type CallSettings = {
|
|
15
|
-
/**
|
|
16
|
-
Maximum number of tokens to generate.
|
|
17
|
-
*/
|
|
18
|
-
maxOutputTokens?: number;
|
|
19
|
-
/**
|
|
20
|
-
Temperature setting. The range depends on the provider and model.
|
|
21
|
-
|
|
22
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
|
23
|
-
*/
|
|
24
|
-
temperature?: number;
|
|
25
|
-
/**
|
|
26
|
-
Nucleus sampling. This is a number between 0 and 1.
|
|
27
|
-
|
|
28
|
-
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
|
29
|
-
are considered.
|
|
30
|
-
|
|
31
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
|
32
|
-
*/
|
|
33
|
-
topP?: number;
|
|
34
|
-
/**
|
|
35
|
-
Only sample from the top K options for each subsequent token.
|
|
36
|
-
|
|
37
|
-
Used to remove "long tail" low probability responses.
|
|
38
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
|
39
|
-
*/
|
|
40
|
-
topK?: number;
|
|
41
|
-
/**
|
|
42
|
-
Presence penalty setting. It affects the likelihood of the model to
|
|
43
|
-
repeat information that is already in the prompt.
|
|
44
|
-
|
|
45
|
-
The presence penalty is a number between -1 (increase repetition)
|
|
46
|
-
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
47
|
-
*/
|
|
48
|
-
presencePenalty?: number;
|
|
49
|
-
/**
|
|
50
|
-
Frequency penalty setting. It affects the likelihood of the model
|
|
51
|
-
to repeatedly use the same words or phrases.
|
|
52
|
-
|
|
53
|
-
The frequency penalty is a number between -1 (increase repetition)
|
|
54
|
-
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
55
|
-
*/
|
|
56
|
-
frequencyPenalty?: number;
|
|
57
|
-
/**
|
|
58
|
-
Stop sequences.
|
|
59
|
-
If set, the model will stop generating text when one of the stop sequences is generated.
|
|
60
|
-
Providers may have limits on the number of stop sequences.
|
|
61
|
-
*/
|
|
62
|
-
stopSequences?: string[];
|
|
63
|
-
/**
|
|
64
|
-
The seed (integer) to use for random sampling. If set and supported
|
|
65
|
-
by the model, calls will generate deterministic results.
|
|
66
|
-
*/
|
|
67
|
-
seed?: number;
|
|
68
|
-
/**
|
|
69
|
-
Maximum number of retries. Set to 0 to disable retries.
|
|
70
|
-
|
|
71
|
-
@default 2
|
|
72
|
-
*/
|
|
73
|
-
maxRetries?: number;
|
|
74
|
-
/**
|
|
75
|
-
Abort signal.
|
|
76
|
-
*/
|
|
77
|
-
abortSignal?: AbortSignal;
|
|
78
|
-
/**
|
|
79
|
-
Additional HTTP headers to be sent with the request.
|
|
80
|
-
Only applicable for HTTP-based providers.
|
|
81
|
-
*/
|
|
82
|
-
headers?: Record<string, string | undefined>;
|
|
83
|
-
};
|
|
84
|
-
|
|
85
|
-
/**
|
|
86
|
-
Prompt part of the AI function options.
|
|
87
|
-
It contains a system message, a simple text prompt, or a list of messages.
|
|
88
|
-
*/
|
|
89
|
-
type Prompt = {
|
|
90
|
-
/**
|
|
91
|
-
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
92
|
-
*/
|
|
93
|
-
system?: string;
|
|
94
|
-
} & ({
|
|
95
|
-
/**
|
|
96
|
-
A prompt. It can be either a text prompt or a list of messages.
|
|
97
|
-
|
|
98
|
-
You can either use `prompt` or `messages` but not both.
|
|
99
|
-
*/
|
|
100
|
-
prompt: string | Array<ModelMessage>;
|
|
101
|
-
/**
|
|
102
|
-
A list of messages.
|
|
103
|
-
|
|
104
|
-
You can either use `prompt` or `messages` but not both.
|
|
105
|
-
*/
|
|
106
|
-
messages?: never;
|
|
107
|
-
} | {
|
|
108
|
-
/**
|
|
109
|
-
A list of messages.
|
|
110
|
-
|
|
111
|
-
You can either use `prompt` or `messages` but not both.
|
|
112
|
-
*/
|
|
113
|
-
messages: Array<ModelMessage>;
|
|
114
|
-
/**
|
|
115
|
-
A prompt. It can be either a text prompt or a list of messages.
|
|
116
|
-
|
|
117
|
-
You can either use `prompt` or `messages` but not both.
|
|
118
|
-
*/
|
|
119
|
-
prompt?: never;
|
|
120
|
-
});
|
|
121
|
-
|
|
122
|
-
/**
|
|
123
|
-
* Telemetry configuration.
|
|
124
|
-
*/
|
|
125
|
-
type TelemetrySettings = {
|
|
126
|
-
/**
|
|
127
|
-
* Enable or disable telemetry. Disabled by default while experimental.
|
|
128
|
-
*/
|
|
129
|
-
isEnabled?: boolean;
|
|
130
|
-
/**
|
|
131
|
-
* Enable or disable input recording. Enabled by default.
|
|
132
|
-
*
|
|
133
|
-
* You might want to disable input recording to avoid recording sensitive
|
|
134
|
-
* information, to reduce data transfers, or to increase performance.
|
|
135
|
-
*/
|
|
136
|
-
recordInputs?: boolean;
|
|
137
|
-
/**
|
|
138
|
-
* Enable or disable output recording. Enabled by default.
|
|
139
|
-
*
|
|
140
|
-
* You might want to disable output recording to avoid recording sensitive
|
|
141
|
-
* information, to reduce data transfers, or to increase performance.
|
|
142
|
-
*/
|
|
143
|
-
recordOutputs?: boolean;
|
|
144
|
-
/**
|
|
145
|
-
* Identifier for this function. Used to group telemetry data by function.
|
|
146
|
-
*/
|
|
147
|
-
functionId?: string;
|
|
148
|
-
/**
|
|
149
|
-
* Additional information to include in the telemetry data.
|
|
150
|
-
*/
|
|
151
|
-
metadata?: Record<string, AttributeValue>;
|
|
152
|
-
/**
|
|
153
|
-
* A custom tracer to use for the telemetry data.
|
|
154
|
-
*/
|
|
155
|
-
tracer?: Tracer;
|
|
156
|
-
};
|
|
157
|
-
|
|
158
14
|
/**
|
|
159
15
|
Embedding model that is used by the AI SDK Core functions.
|
|
160
16
|
*/
|
|
161
|
-
type EmbeddingModel<VALUE = string> = string | EmbeddingModelV3<VALUE>;
|
|
17
|
+
type EmbeddingModel<VALUE = string> = string | EmbeddingModelV3<VALUE> | EmbeddingModelV2<VALUE>;
|
|
162
18
|
/**
|
|
163
19
|
Embedding.
|
|
164
20
|
*/
|
|
@@ -167,16 +23,16 @@ type Embedding = EmbeddingModelV3Embedding;
|
|
|
167
23
|
/**
|
|
168
24
|
Image model that is used by the AI SDK Core functions.
|
|
169
25
|
*/
|
|
170
|
-
type ImageModel =
|
|
26
|
+
type ImageModel = ImageModelV3;
|
|
171
27
|
/**
|
|
172
28
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
173
29
|
some settings might not be supported, which can lead to suboptimal results.
|
|
174
30
|
*/
|
|
175
|
-
type ImageGenerationWarning =
|
|
31
|
+
type ImageGenerationWarning = ImageModelV3CallWarning;
|
|
176
32
|
/**
|
|
177
33
|
Metadata from the model provider for this call
|
|
178
34
|
*/
|
|
179
|
-
type ImageModelProviderMetadata =
|
|
35
|
+
type ImageModelProviderMetadata = ImageModelV3ProviderMetadata;
|
|
180
36
|
|
|
181
37
|
type ImageModelResponseMetadata = {
|
|
182
38
|
/**
|
|
@@ -198,7 +54,7 @@ type JSONValue = JSONValue$1;
|
|
|
198
54
|
/**
|
|
199
55
|
Language model that is used by the AI SDK Core functions.
|
|
200
56
|
*/
|
|
201
|
-
type LanguageModel = string | LanguageModelV2;
|
|
57
|
+
type LanguageModel = string | LanguageModelV3 | LanguageModelV2;
|
|
202
58
|
/**
|
|
203
59
|
Reason why a language model finished generating a response.
|
|
204
60
|
|
|
@@ -210,16 +66,16 @@ Can be one of the following:
|
|
|
210
66
|
- `error`: model stopped because of an error
|
|
211
67
|
- `other`: model stopped for other reasons
|
|
212
68
|
*/
|
|
213
|
-
type FinishReason =
|
|
69
|
+
type FinishReason = LanguageModelV3FinishReason;
|
|
214
70
|
/**
|
|
215
71
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
216
72
|
some settings might not be supported, which can lead to suboptimal results.
|
|
217
73
|
*/
|
|
218
|
-
type CallWarning =
|
|
74
|
+
type CallWarning = LanguageModelV3CallWarning;
|
|
219
75
|
/**
|
|
220
76
|
A source that has been used as input to generate the response.
|
|
221
77
|
*/
|
|
222
|
-
type Source =
|
|
78
|
+
type Source = LanguageModelV3Source;
|
|
223
79
|
/**
|
|
224
80
|
Tool choice for the generation. It supports the following settings:
|
|
225
81
|
|
|
@@ -233,7 +89,7 @@ type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'requ
|
|
|
233
89
|
toolName: Extract<keyof TOOLS, string>;
|
|
234
90
|
};
|
|
235
91
|
|
|
236
|
-
type LanguageModelMiddleware =
|
|
92
|
+
type LanguageModelMiddleware = LanguageModelV3Middleware;
|
|
237
93
|
|
|
238
94
|
type LanguageModelRequestMetadata = {
|
|
239
95
|
/**
|
|
@@ -304,17 +160,17 @@ Additional provider-specific metadata that is returned from the provider.
|
|
|
304
160
|
This is needed to enable provider-specific functionality that can be
|
|
305
161
|
fully encapsulated in the provider.
|
|
306
162
|
*/
|
|
307
|
-
type ProviderMetadata =
|
|
163
|
+
type ProviderMetadata = SharedV3ProviderMetadata;
|
|
308
164
|
|
|
309
165
|
/**
|
|
310
166
|
Speech model that is used by the AI SDK Core functions.
|
|
311
167
|
*/
|
|
312
|
-
type SpeechModel =
|
|
168
|
+
type SpeechModel = SpeechModelV3;
|
|
313
169
|
/**
|
|
314
170
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
315
171
|
some settings might not be supported, which can lead to suboptimal results.
|
|
316
172
|
*/
|
|
317
|
-
type SpeechWarning =
|
|
173
|
+
type SpeechWarning = SpeechModelV3CallWarning;
|
|
318
174
|
|
|
319
175
|
type SpeechModelResponseMetadata = {
|
|
320
176
|
/**
|
|
@@ -338,12 +194,12 @@ type SpeechModelResponseMetadata = {
|
|
|
338
194
|
/**
|
|
339
195
|
Transcription model that is used by the AI SDK Core functions.
|
|
340
196
|
*/
|
|
341
|
-
type TranscriptionModel =
|
|
197
|
+
type TranscriptionModel = TranscriptionModelV3;
|
|
342
198
|
/**
|
|
343
199
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
344
200
|
some settings might not be supported, which can lead to suboptimal results.
|
|
345
201
|
*/
|
|
346
|
-
type TranscriptionWarning =
|
|
202
|
+
type TranscriptionWarning = TranscriptionModelV3CallWarning;
|
|
347
203
|
|
|
348
204
|
type TranscriptionModelResponseMetadata = {
|
|
349
205
|
/**
|
|
@@ -363,7 +219,7 @@ type TranscriptionModelResponseMetadata = {
|
|
|
363
219
|
/**
|
|
364
220
|
Represents the number of tokens used in a prompt and completion.
|
|
365
221
|
*/
|
|
366
|
-
type LanguageModelUsage =
|
|
222
|
+
type LanguageModelUsage = LanguageModelV3Usage;
|
|
367
223
|
/**
|
|
368
224
|
Represents the number of tokens used in an embedding.
|
|
369
225
|
*/
|
|
@@ -374,31 +230,6 @@ type EmbeddingModelUsage = {
|
|
|
374
230
|
tokens: number;
|
|
375
231
|
};
|
|
376
232
|
|
|
377
|
-
/**
|
|
378
|
-
* Experimental. Can change in patch versions without warning.
|
|
379
|
-
*
|
|
380
|
-
* Download function. Called with the array of URLs and a boolean indicating
|
|
381
|
-
* whether the URL is supported by the model.
|
|
382
|
-
*
|
|
383
|
-
* The download function can decide for each URL:
|
|
384
|
-
* - to return null (which means that the URL should be passed to the model)
|
|
385
|
-
* - to download the asset and return the data (incl. retries, authentication, etc.)
|
|
386
|
-
*
|
|
387
|
-
* Should throw DownloadError if the download fails.
|
|
388
|
-
*
|
|
389
|
-
* Should return an array of objects sorted by the order of the requested downloads.
|
|
390
|
-
* For each object, the data should be a Uint8Array if the URL was downloaded.
|
|
391
|
-
* For each object, the mediaType should be the media type of the downloaded asset.
|
|
392
|
-
* For each object, the data should be null if the URL should be passed through as is.
|
|
393
|
-
*/
|
|
394
|
-
type DownloadFunction = (options: Array<{
|
|
395
|
-
url: URL;
|
|
396
|
-
isUrlSupportedByModel: boolean;
|
|
397
|
-
}>) => PromiseLike<Array<{
|
|
398
|
-
data: Uint8Array;
|
|
399
|
-
mediaType: string | undefined;
|
|
400
|
-
} | null>>;
|
|
401
|
-
|
|
402
233
|
/**
|
|
403
234
|
* A generated file.
|
|
404
235
|
*/
|
|
@@ -419,23 +250,6 @@ interface GeneratedFile {
|
|
|
419
250
|
readonly mediaType: string;
|
|
420
251
|
}
|
|
421
252
|
|
|
422
|
-
/**
|
|
423
|
-
* Reasoning output of a text generation. It contains a reasoning.
|
|
424
|
-
*/
|
|
425
|
-
interface ReasoningOutput {
|
|
426
|
-
type: 'reasoning';
|
|
427
|
-
/**
|
|
428
|
-
* The reasoning text.
|
|
429
|
-
*/
|
|
430
|
-
text: string;
|
|
431
|
-
/**
|
|
432
|
-
* Additional provider-specific metadata. They are passed through
|
|
433
|
-
* to the provider from the AI SDK and enable provider-specific
|
|
434
|
-
* functionality that can be fully encapsulated in the provider.
|
|
435
|
-
*/
|
|
436
|
-
providerMetadata?: ProviderMetadata;
|
|
437
|
-
}
|
|
438
|
-
|
|
439
253
|
/**
|
|
440
254
|
Create a union of the given object's values, and optionally specify which keys to get the values from.
|
|
441
255
|
|
|
@@ -480,27 +294,25 @@ type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType>
|
|
|
480
294
|
|
|
481
295
|
type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute' | 'onInputAvailable' | 'onInputStart' | 'onInputDelta'>>;
|
|
482
296
|
|
|
297
|
+
type BaseToolCall = {
|
|
298
|
+
type: 'tool-call';
|
|
299
|
+
toolCallId: string;
|
|
300
|
+
providerExecuted?: boolean;
|
|
301
|
+
providerMetadata?: ProviderMetadata;
|
|
302
|
+
};
|
|
483
303
|
type StaticToolCall<TOOLS extends ToolSet> = ValueOf<{
|
|
484
|
-
[NAME in keyof TOOLS]: {
|
|
485
|
-
type: 'tool-call';
|
|
486
|
-
toolCallId: string;
|
|
304
|
+
[NAME in keyof TOOLS]: BaseToolCall & {
|
|
487
305
|
toolName: NAME & string;
|
|
488
306
|
input: TOOLS[NAME] extends Tool<infer PARAMETERS> ? PARAMETERS : never;
|
|
489
|
-
providerExecuted?: boolean;
|
|
490
307
|
dynamic?: false | undefined;
|
|
491
308
|
invalid?: false | undefined;
|
|
492
309
|
error?: never;
|
|
493
|
-
providerMetadata?: ProviderMetadata;
|
|
494
310
|
};
|
|
495
311
|
}>;
|
|
496
|
-
type DynamicToolCall = {
|
|
497
|
-
type: 'tool-call';
|
|
498
|
-
toolCallId: string;
|
|
312
|
+
type DynamicToolCall = BaseToolCall & {
|
|
499
313
|
toolName: string;
|
|
500
314
|
input: unknown;
|
|
501
|
-
providerExecuted?: boolean;
|
|
502
315
|
dynamic: true;
|
|
503
|
-
providerMetadata?: ProviderMetadata;
|
|
504
316
|
/**
|
|
505
317
|
* True if this is caused by an unparsable tool call or
|
|
506
318
|
* a tool that does not exist.
|
|
@@ -513,6 +325,40 @@ type DynamicToolCall = {
|
|
|
513
325
|
};
|
|
514
326
|
type TypedToolCall<TOOLS extends ToolSet> = StaticToolCall<TOOLS> | DynamicToolCall;
|
|
515
327
|
|
|
328
|
+
/**
|
|
329
|
+
* Output part that indicates that a tool approval request has been made.
|
|
330
|
+
*
|
|
331
|
+
* The tool approval request can be approved or denied in the next tool message.
|
|
332
|
+
*/
|
|
333
|
+
type ToolApprovalRequestOutput<TOOLS extends ToolSet> = {
|
|
334
|
+
type: 'tool-approval-request';
|
|
335
|
+
/**
|
|
336
|
+
* ID of the tool approval request.
|
|
337
|
+
*/
|
|
338
|
+
approvalId: string;
|
|
339
|
+
/**
|
|
340
|
+
* Tool call that the approval request is for.
|
|
341
|
+
*/
|
|
342
|
+
toolCall: TypedToolCall<TOOLS>;
|
|
343
|
+
};
|
|
344
|
+
|
|
345
|
+
/**
|
|
346
|
+
* Reasoning output of a text generation. It contains a reasoning.
|
|
347
|
+
*/
|
|
348
|
+
interface ReasoningOutput {
|
|
349
|
+
type: 'reasoning';
|
|
350
|
+
/**
|
|
351
|
+
* The reasoning text.
|
|
352
|
+
*/
|
|
353
|
+
text: string;
|
|
354
|
+
/**
|
|
355
|
+
* Additional provider-specific metadata. They are passed through
|
|
356
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
357
|
+
* functionality that can be fully encapsulated in the provider.
|
|
358
|
+
*/
|
|
359
|
+
providerMetadata?: ProviderMetadata;
|
|
360
|
+
}
|
|
361
|
+
|
|
516
362
|
type StaticToolError<TOOLS extends ToolSet> = ValueOf<{
|
|
517
363
|
[NAME in keyof TOOLS]: {
|
|
518
364
|
type: 'tool-error';
|
|
@@ -581,7 +427,7 @@ type ContentPart<TOOLS extends ToolSet> = {
|
|
|
581
427
|
type: 'tool-error';
|
|
582
428
|
} & TypedToolError<TOOLS> & {
|
|
583
429
|
providerMetadata?: ProviderMetadata;
|
|
584
|
-
})
|
|
430
|
+
}) | ToolApprovalRequestOutput<TOOLS>;
|
|
585
431
|
|
|
586
432
|
/**
|
|
587
433
|
A message that was generated during the generation process.
|
|
@@ -749,48 +595,217 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
|
749
595
|
*/
|
|
750
596
|
readonly totalUsage: LanguageModelUsage;
|
|
751
597
|
/**
|
|
752
|
-
Warnings from the model provider (e.g. unsupported settings)
|
|
598
|
+
Warnings from the model provider (e.g. unsupported settings)
|
|
599
|
+
*/
|
|
600
|
+
readonly warnings: CallWarning[] | undefined;
|
|
601
|
+
/**
|
|
602
|
+
Additional request information.
|
|
603
|
+
*/
|
|
604
|
+
readonly request: LanguageModelRequestMetadata;
|
|
605
|
+
/**
|
|
606
|
+
Additional response information.
|
|
607
|
+
*/
|
|
608
|
+
readonly response: LanguageModelResponseMetadata & {
|
|
609
|
+
/**
|
|
610
|
+
The response messages that were generated during the call. It consists of an assistant message,
|
|
611
|
+
potentially containing tool calls.
|
|
612
|
+
|
|
613
|
+
When there are tool results, there is an additional tool message with the tool results that are available.
|
|
614
|
+
If there are tools that do not have execute functions, they are not included in the tool results and
|
|
615
|
+
need to be added separately.
|
|
616
|
+
*/
|
|
617
|
+
messages: Array<ResponseMessage>;
|
|
618
|
+
/**
|
|
619
|
+
Response body (available only for providers that use HTTP requests).
|
|
620
|
+
*/
|
|
621
|
+
body?: unknown;
|
|
622
|
+
};
|
|
623
|
+
/**
|
|
624
|
+
Additional provider-specific metadata. They are passed through
|
|
625
|
+
from the provider to the AI SDK and enable provider-specific
|
|
626
|
+
results that can be fully encapsulated in the provider.
|
|
627
|
+
*/
|
|
628
|
+
readonly providerMetadata: ProviderMetadata | undefined;
|
|
629
|
+
/**
|
|
630
|
+
Details for all steps.
|
|
631
|
+
You can use this to get information about intermediate steps,
|
|
632
|
+
such as the tool calls or the response headers.
|
|
633
|
+
*/
|
|
634
|
+
readonly steps: Array<StepResult<TOOLS>>;
|
|
635
|
+
/**
|
|
636
|
+
The generated structured output. It uses the `experimental_output` specification.
|
|
637
|
+
*/
|
|
638
|
+
readonly experimental_output: OUTPUT;
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
type CallSettings = {
|
|
642
|
+
/**
|
|
643
|
+
Maximum number of tokens to generate.
|
|
644
|
+
*/
|
|
645
|
+
maxOutputTokens?: number;
|
|
646
|
+
/**
|
|
647
|
+
Temperature setting. The range depends on the provider and model.
|
|
648
|
+
|
|
649
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
|
650
|
+
*/
|
|
651
|
+
temperature?: number;
|
|
652
|
+
/**
|
|
653
|
+
Nucleus sampling. This is a number between 0 and 1.
|
|
654
|
+
|
|
655
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
|
656
|
+
are considered.
|
|
657
|
+
|
|
658
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
|
659
|
+
*/
|
|
660
|
+
topP?: number;
|
|
661
|
+
/**
|
|
662
|
+
Only sample from the top K options for each subsequent token.
|
|
663
|
+
|
|
664
|
+
Used to remove "long tail" low probability responses.
|
|
665
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
|
666
|
+
*/
|
|
667
|
+
topK?: number;
|
|
668
|
+
/**
|
|
669
|
+
Presence penalty setting. It affects the likelihood of the model to
|
|
670
|
+
repeat information that is already in the prompt.
|
|
671
|
+
|
|
672
|
+
The presence penalty is a number between -1 (increase repetition)
|
|
673
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
674
|
+
*/
|
|
675
|
+
presencePenalty?: number;
|
|
676
|
+
/**
|
|
677
|
+
Frequency penalty setting. It affects the likelihood of the model
|
|
678
|
+
to repeatedly use the same words or phrases.
|
|
679
|
+
|
|
680
|
+
The frequency penalty is a number between -1 (increase repetition)
|
|
681
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
682
|
+
*/
|
|
683
|
+
frequencyPenalty?: number;
|
|
684
|
+
/**
|
|
685
|
+
Stop sequences.
|
|
686
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
|
687
|
+
Providers may have limits on the number of stop sequences.
|
|
688
|
+
*/
|
|
689
|
+
stopSequences?: string[];
|
|
690
|
+
/**
|
|
691
|
+
The seed (integer) to use for random sampling. If set and supported
|
|
692
|
+
by the model, calls will generate deterministic results.
|
|
693
|
+
*/
|
|
694
|
+
seed?: number;
|
|
695
|
+
/**
|
|
696
|
+
Maximum number of retries. Set to 0 to disable retries.
|
|
697
|
+
|
|
698
|
+
@default 2
|
|
699
|
+
*/
|
|
700
|
+
maxRetries?: number;
|
|
701
|
+
/**
|
|
702
|
+
Abort signal.
|
|
703
|
+
*/
|
|
704
|
+
abortSignal?: AbortSignal;
|
|
705
|
+
/**
|
|
706
|
+
Additional HTTP headers to be sent with the request.
|
|
707
|
+
Only applicable for HTTP-based providers.
|
|
708
|
+
*/
|
|
709
|
+
headers?: Record<string, string | undefined>;
|
|
710
|
+
};
|
|
711
|
+
|
|
712
|
+
/**
|
|
713
|
+
Prompt part of the AI function options.
|
|
714
|
+
It contains a system message, a simple text prompt, or a list of messages.
|
|
715
|
+
*/
|
|
716
|
+
type Prompt = {
|
|
717
|
+
/**
|
|
718
|
+
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
719
|
+
*/
|
|
720
|
+
system?: string;
|
|
721
|
+
} & ({
|
|
722
|
+
/**
|
|
723
|
+
A prompt. It can be either a text prompt or a list of messages.
|
|
724
|
+
|
|
725
|
+
You can either use `prompt` or `messages` but not both.
|
|
726
|
+
*/
|
|
727
|
+
prompt: string | Array<ModelMessage>;
|
|
728
|
+
/**
|
|
729
|
+
A list of messages.
|
|
730
|
+
|
|
731
|
+
You can either use `prompt` or `messages` but not both.
|
|
732
|
+
*/
|
|
733
|
+
messages?: never;
|
|
734
|
+
} | {
|
|
735
|
+
/**
|
|
736
|
+
A list of messages.
|
|
737
|
+
|
|
738
|
+
You can either use `prompt` or `messages` but not both.
|
|
739
|
+
*/
|
|
740
|
+
messages: Array<ModelMessage>;
|
|
741
|
+
/**
|
|
742
|
+
A prompt. It can be either a text prompt or a list of messages.
|
|
743
|
+
|
|
744
|
+
You can either use `prompt` or `messages` but not both.
|
|
745
|
+
*/
|
|
746
|
+
prompt?: never;
|
|
747
|
+
});
|
|
748
|
+
|
|
749
|
+
/**
|
|
750
|
+
* Telemetry configuration.
|
|
751
|
+
*/
|
|
752
|
+
type TelemetrySettings = {
|
|
753
|
+
/**
|
|
754
|
+
* Enable or disable telemetry. Disabled by default while experimental.
|
|
753
755
|
*/
|
|
754
|
-
|
|
756
|
+
isEnabled?: boolean;
|
|
755
757
|
/**
|
|
756
|
-
|
|
758
|
+
* Enable or disable input recording. Enabled by default.
|
|
759
|
+
*
|
|
760
|
+
* You might want to disable input recording to avoid recording sensitive
|
|
761
|
+
* information, to reduce data transfers, or to increase performance.
|
|
757
762
|
*/
|
|
758
|
-
|
|
763
|
+
recordInputs?: boolean;
|
|
759
764
|
/**
|
|
760
|
-
|
|
765
|
+
* Enable or disable output recording. Enabled by default.
|
|
766
|
+
*
|
|
767
|
+
* You might want to disable output recording to avoid recording sensitive
|
|
768
|
+
* information, to reduce data transfers, or to increase performance.
|
|
761
769
|
*/
|
|
762
|
-
|
|
763
|
-
/**
|
|
764
|
-
The response messages that were generated during the call. It consists of an assistant message,
|
|
765
|
-
potentially containing tool calls.
|
|
766
|
-
|
|
767
|
-
When there are tool results, there is an additional tool message with the tool results that are available.
|
|
768
|
-
If there are tools that do not have execute functions, they are not included in the tool results and
|
|
769
|
-
need to be added separately.
|
|
770
|
-
*/
|
|
771
|
-
messages: Array<ResponseMessage>;
|
|
772
|
-
/**
|
|
773
|
-
Response body (available only for providers that use HTTP requests).
|
|
774
|
-
*/
|
|
775
|
-
body?: unknown;
|
|
776
|
-
};
|
|
770
|
+
recordOutputs?: boolean;
|
|
777
771
|
/**
|
|
778
|
-
|
|
779
|
-
from the provider to the AI SDK and enable provider-specific
|
|
780
|
-
results that can be fully encapsulated in the provider.
|
|
772
|
+
* Identifier for this function. Used to group telemetry data by function.
|
|
781
773
|
*/
|
|
782
|
-
|
|
774
|
+
functionId?: string;
|
|
783
775
|
/**
|
|
784
|
-
|
|
785
|
-
You can use this to get information about intermediate steps,
|
|
786
|
-
such as the tool calls or the response headers.
|
|
776
|
+
* Additional information to include in the telemetry data.
|
|
787
777
|
*/
|
|
788
|
-
|
|
778
|
+
metadata?: Record<string, AttributeValue>;
|
|
789
779
|
/**
|
|
790
|
-
|
|
780
|
+
* A custom tracer to use for the telemetry data.
|
|
791
781
|
*/
|
|
792
|
-
|
|
793
|
-
}
|
|
782
|
+
tracer?: Tracer;
|
|
783
|
+
};
|
|
784
|
+
|
|
785
|
+
/**
|
|
786
|
+
* Experimental. Can change in patch versions without warning.
|
|
787
|
+
*
|
|
788
|
+
* Download function. Called with the array of URLs and a boolean indicating
|
|
789
|
+
* whether the URL is supported by the model.
|
|
790
|
+
*
|
|
791
|
+
* The download function can decide for each URL:
|
|
792
|
+
* - to return null (which means that the URL should be passed to the model)
|
|
793
|
+
* - to download the asset and return the data (incl. retries, authentication, etc.)
|
|
794
|
+
*
|
|
795
|
+
* Should throw DownloadError if the download fails.
|
|
796
|
+
*
|
|
797
|
+
* Should return an array of objects sorted by the order of the requested downloads.
|
|
798
|
+
* For each object, the data should be a Uint8Array if the URL was downloaded.
|
|
799
|
+
* For each object, the mediaType should be the media type of the downloaded asset.
|
|
800
|
+
* For each object, the data should be null if the URL should be passed through as is.
|
|
801
|
+
*/
|
|
802
|
+
type DownloadFunction = (options: Array<{
|
|
803
|
+
url: URL;
|
|
804
|
+
isUrlSupportedByModel: boolean;
|
|
805
|
+
}>) => PromiseLike<Array<{
|
|
806
|
+
data: Uint8Array;
|
|
807
|
+
mediaType: string | undefined;
|
|
808
|
+
} | null>>;
|
|
794
809
|
|
|
795
810
|
/**
|
|
796
811
|
Create a type from an object with all keys and nested keys set to optional.
|
|
@@ -811,7 +826,7 @@ type PartialObject<ObjectType extends object> = {
|
|
|
811
826
|
|
|
812
827
|
interface Output<OUTPUT, PARTIAL> {
|
|
813
828
|
readonly type: 'object' | 'text';
|
|
814
|
-
responseFormat:
|
|
829
|
+
responseFormat: LanguageModelV3CallOptions['responseFormat'];
|
|
815
830
|
parsePartial(options: {
|
|
816
831
|
text: string;
|
|
817
832
|
}): Promise<{
|
|
@@ -961,13 +976,13 @@ declare const coreMessageSchema: z.ZodType<CoreMessage>;
|
|
|
961
976
|
type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
|
|
962
977
|
system: string | undefined;
|
|
963
978
|
messages: ModelMessage[];
|
|
964
|
-
toolCall:
|
|
979
|
+
toolCall: LanguageModelV3ToolCall;
|
|
965
980
|
tools: TOOLS;
|
|
966
981
|
inputSchema: (options: {
|
|
967
982
|
toolName: string;
|
|
968
983
|
}) => JSONSchema7;
|
|
969
984
|
error: NoSuchToolError | InvalidToolInputError;
|
|
970
|
-
}) => Promise<
|
|
985
|
+
}) => Promise<LanguageModelV3ToolCall | null>;
|
|
971
986
|
|
|
972
987
|
/**
|
|
973
988
|
Callback that is set using the `onStepFinish` option.
|
|
@@ -976,6 +991,21 @@ Callback that is set using the `onStepFinish` option.
|
|
|
976
991
|
*/
|
|
977
992
|
type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
|
978
993
|
/**
|
|
994
|
+
Callback that is set using the `onFinish` option.
|
|
995
|
+
|
|
996
|
+
@param event - The event that is passed to the callback.
|
|
997
|
+
*/
|
|
998
|
+
type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
|
|
999
|
+
/**
|
|
1000
|
+
Details for all steps.
|
|
1001
|
+
*/
|
|
1002
|
+
readonly steps: StepResult<TOOLS>[];
|
|
1003
|
+
/**
|
|
1004
|
+
Total usage for all steps. This is the sum of the usage of all steps.
|
|
1005
|
+
*/
|
|
1006
|
+
readonly totalUsage: LanguageModelUsage;
|
|
1007
|
+
}) => PromiseLike<void> | void;
|
|
1008
|
+
/**
|
|
979
1009
|
Generate a text and call tools for a given prompt using a language model.
|
|
980
1010
|
|
|
981
1011
|
This function does not stream the output. If you want to stream the output, use `streamText` instead.
|
|
@@ -1017,11 +1047,12 @@ If set and supported by the model, calls will generate deterministic results.
|
|
|
1017
1047
|
@param experimental_generateMessageId - Generate a unique ID for each message.
|
|
1018
1048
|
|
|
1019
1049
|
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
1050
|
+
@param onFinish - Callback that is called when all steps are finished and the response is complete.
|
|
1020
1051
|
|
|
1021
1052
|
@returns
|
|
1022
1053
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
|
1023
1054
|
*/
|
|
1024
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
|
1055
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId, currentDate, }, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
|
|
1025
1056
|
/**
|
|
1026
1057
|
The language model to use.
|
|
1027
1058
|
*/
|
|
@@ -1083,9 +1114,13 @@ A function that attempts to repair a tool call that failed to parse.
|
|
|
1083
1114
|
*/
|
|
1084
1115
|
experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
|
|
1085
1116
|
/**
|
|
1086
|
-
|
|
1087
|
-
|
|
1117
|
+
* Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
1118
|
+
*/
|
|
1088
1119
|
onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;
|
|
1120
|
+
/**
|
|
1121
|
+
* Callback that is called when all steps are finished and the response is complete.
|
|
1122
|
+
*/
|
|
1123
|
+
onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;
|
|
1089
1124
|
/**
|
|
1090
1125
|
* Context that is passed into tool execution.
|
|
1091
1126
|
*
|
|
@@ -1229,13 +1264,10 @@ If set and supported by the model, calls will generate deterministic results.
|
|
|
1229
1264
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
|
1230
1265
|
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
1231
1266
|
|
|
1232
|
-
@param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
|
1233
|
-
|
|
1234
1267
|
@param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
|
|
1235
1268
|
@param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
|
|
1236
1269
|
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
1237
|
-
@param onFinish - Callback that is called when
|
|
1238
|
-
(for tools that have an `execute` function) are finished.
|
|
1270
|
+
@param onFinish - Callback that is called when all steps are finished and the response is complete.
|
|
1239
1271
|
|
|
1240
1272
|
@return
|
|
1241
1273
|
A result object for accessing different stream types and additional information.
|
|
@@ -1359,6 +1391,23 @@ Internal. For test use only. May change without notice.
|
|
|
1359
1391
|
};
|
|
1360
1392
|
}): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
|
|
1361
1393
|
|
|
1394
|
+
/**
|
|
1395
|
+
* Tool output when the tool execution has been denied (for static tools).
|
|
1396
|
+
*/
|
|
1397
|
+
type StaticToolOutputDenied<TOOLS extends ToolSet> = ValueOf<{
|
|
1398
|
+
[NAME in keyof TOOLS]: {
|
|
1399
|
+
type: 'tool-output-denied';
|
|
1400
|
+
toolCallId: string;
|
|
1401
|
+
toolName: NAME & string;
|
|
1402
|
+
providerExecuted?: boolean;
|
|
1403
|
+
dynamic?: false | undefined;
|
|
1404
|
+
};
|
|
1405
|
+
}>;
|
|
1406
|
+
/**
|
|
1407
|
+
* Tool output when the tool execution has been denied.
|
|
1408
|
+
*/
|
|
1409
|
+
type TypedToolOutputDenied<TOOLS extends ToolSet> = StaticToolOutputDenied<TOOLS>;
|
|
1410
|
+
|
|
1362
1411
|
/**
|
|
1363
1412
|
The data types that can be used in the UI message for the UI message data parts.
|
|
1364
1413
|
*/
|
|
@@ -1519,6 +1568,7 @@ type UIToolInvocation<TOOL extends UITool | Tool> = {
|
|
|
1519
1568
|
providerExecuted?: boolean;
|
|
1520
1569
|
output?: never;
|
|
1521
1570
|
errorText?: never;
|
|
1571
|
+
approval?: never;
|
|
1522
1572
|
} | {
|
|
1523
1573
|
state: 'input-available';
|
|
1524
1574
|
input: asUITool<TOOL>['input'];
|
|
@@ -1526,6 +1576,31 @@ type UIToolInvocation<TOOL extends UITool | Tool> = {
|
|
|
1526
1576
|
output?: never;
|
|
1527
1577
|
errorText?: never;
|
|
1528
1578
|
callProviderMetadata?: ProviderMetadata;
|
|
1579
|
+
approval?: never;
|
|
1580
|
+
} | {
|
|
1581
|
+
state: 'approval-requested';
|
|
1582
|
+
input: asUITool<TOOL>['input'];
|
|
1583
|
+
providerExecuted?: boolean;
|
|
1584
|
+
output?: never;
|
|
1585
|
+
errorText?: never;
|
|
1586
|
+
callProviderMetadata?: ProviderMetadata;
|
|
1587
|
+
approval: {
|
|
1588
|
+
id: string;
|
|
1589
|
+
approved?: never;
|
|
1590
|
+
reason?: never;
|
|
1591
|
+
};
|
|
1592
|
+
} | {
|
|
1593
|
+
state: 'approval-responded';
|
|
1594
|
+
input: asUITool<TOOL>['input'];
|
|
1595
|
+
providerExecuted?: boolean;
|
|
1596
|
+
output?: never;
|
|
1597
|
+
errorText?: never;
|
|
1598
|
+
callProviderMetadata?: ProviderMetadata;
|
|
1599
|
+
approval: {
|
|
1600
|
+
id: string;
|
|
1601
|
+
approved: boolean;
|
|
1602
|
+
reason?: string;
|
|
1603
|
+
};
|
|
1529
1604
|
} | {
|
|
1530
1605
|
state: 'output-available';
|
|
1531
1606
|
input: asUITool<TOOL>['input'];
|
|
@@ -1534,6 +1609,11 @@ type UIToolInvocation<TOOL extends UITool | Tool> = {
|
|
|
1534
1609
|
providerExecuted?: boolean;
|
|
1535
1610
|
callProviderMetadata?: ProviderMetadata;
|
|
1536
1611
|
preliminary?: boolean;
|
|
1612
|
+
approval?: {
|
|
1613
|
+
id: string;
|
|
1614
|
+
approved: true;
|
|
1615
|
+
reason?: string;
|
|
1616
|
+
};
|
|
1537
1617
|
} | {
|
|
1538
1618
|
state: 'output-error';
|
|
1539
1619
|
input: asUITool<TOOL>['input'] | undefined;
|
|
@@ -1542,6 +1622,23 @@ type UIToolInvocation<TOOL extends UITool | Tool> = {
|
|
|
1542
1622
|
errorText: string;
|
|
1543
1623
|
providerExecuted?: boolean;
|
|
1544
1624
|
callProviderMetadata?: ProviderMetadata;
|
|
1625
|
+
approval?: {
|
|
1626
|
+
id: string;
|
|
1627
|
+
approved: true;
|
|
1628
|
+
reason?: string;
|
|
1629
|
+
};
|
|
1630
|
+
} | {
|
|
1631
|
+
state: 'output-denied';
|
|
1632
|
+
input: asUITool<TOOL>['input'];
|
|
1633
|
+
providerExecuted?: boolean;
|
|
1634
|
+
output?: never;
|
|
1635
|
+
errorText?: never;
|
|
1636
|
+
callProviderMetadata?: ProviderMetadata;
|
|
1637
|
+
approval: {
|
|
1638
|
+
id: string;
|
|
1639
|
+
approved: false;
|
|
1640
|
+
reason?: string;
|
|
1641
|
+
};
|
|
1545
1642
|
});
|
|
1546
1643
|
type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
|
|
1547
1644
|
[NAME in keyof TOOLS & string]: {
|
|
@@ -1597,16 +1694,16 @@ type InferUIMessageToolCall<UI_MESSAGE extends UIMessage> = ValueOf<{
|
|
|
1597
1694
|
declare const uiMessageChunkSchema: z.ZodUnion<readonly [z.ZodObject<{
|
|
1598
1695
|
type: z.ZodLiteral<"text-start">;
|
|
1599
1696
|
id: z.ZodString;
|
|
1600
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1697
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1601
1698
|
}, z.core.$strict>, z.ZodObject<{
|
|
1602
1699
|
type: z.ZodLiteral<"text-delta">;
|
|
1603
1700
|
id: z.ZodString;
|
|
1604
1701
|
delta: z.ZodString;
|
|
1605
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1702
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1606
1703
|
}, z.core.$strict>, z.ZodObject<{
|
|
1607
1704
|
type: z.ZodLiteral<"text-end">;
|
|
1608
1705
|
id: z.ZodString;
|
|
1609
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1706
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1610
1707
|
}, z.core.$strict>, z.ZodObject<{
|
|
1611
1708
|
type: z.ZodLiteral<"error">;
|
|
1612
1709
|
errorText: z.ZodString;
|
|
@@ -1626,7 +1723,7 @@ declare const uiMessageChunkSchema: z.ZodUnion<readonly [z.ZodObject<{
|
|
|
1626
1723
|
toolName: z.ZodString;
|
|
1627
1724
|
input: z.ZodUnknown;
|
|
1628
1725
|
providerExecuted: z.ZodOptional<z.ZodBoolean>;
|
|
1629
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1726
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1630
1727
|
dynamic: z.ZodOptional<z.ZodBoolean>;
|
|
1631
1728
|
}, z.core.$strict>, z.ZodObject<{
|
|
1632
1729
|
type: z.ZodLiteral<"tool-input-error">;
|
|
@@ -1634,9 +1731,13 @@ declare const uiMessageChunkSchema: z.ZodUnion<readonly [z.ZodObject<{
|
|
|
1634
1731
|
toolName: z.ZodString;
|
|
1635
1732
|
input: z.ZodUnknown;
|
|
1636
1733
|
providerExecuted: z.ZodOptional<z.ZodBoolean>;
|
|
1637
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1734
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1638
1735
|
dynamic: z.ZodOptional<z.ZodBoolean>;
|
|
1639
1736
|
errorText: z.ZodString;
|
|
1737
|
+
}, z.core.$strict>, z.ZodObject<{
|
|
1738
|
+
type: z.ZodLiteral<"tool-approval-request">;
|
|
1739
|
+
approvalId: z.ZodString;
|
|
1740
|
+
toolCallId: z.ZodString;
|
|
1640
1741
|
}, z.core.$strict>, z.ZodObject<{
|
|
1641
1742
|
type: z.ZodLiteral<"tool-output-available">;
|
|
1642
1743
|
toolCallId: z.ZodString;
|
|
@@ -1650,37 +1751,40 @@ declare const uiMessageChunkSchema: z.ZodUnion<readonly [z.ZodObject<{
|
|
|
1650
1751
|
errorText: z.ZodString;
|
|
1651
1752
|
providerExecuted: z.ZodOptional<z.ZodBoolean>;
|
|
1652
1753
|
dynamic: z.ZodOptional<z.ZodBoolean>;
|
|
1754
|
+
}, z.core.$strict>, z.ZodObject<{
|
|
1755
|
+
type: z.ZodLiteral<"tool-output-denied">;
|
|
1756
|
+
toolCallId: z.ZodString;
|
|
1653
1757
|
}, z.core.$strict>, z.ZodObject<{
|
|
1654
1758
|
type: z.ZodLiteral<"reasoning-start">;
|
|
1655
1759
|
id: z.ZodString;
|
|
1656
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1760
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1657
1761
|
}, z.core.$strict>, z.ZodObject<{
|
|
1658
1762
|
type: z.ZodLiteral<"reasoning-delta">;
|
|
1659
1763
|
id: z.ZodString;
|
|
1660
1764
|
delta: z.ZodString;
|
|
1661
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1765
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1662
1766
|
}, z.core.$strict>, z.ZodObject<{
|
|
1663
1767
|
type: z.ZodLiteral<"reasoning-end">;
|
|
1664
1768
|
id: z.ZodString;
|
|
1665
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1769
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1666
1770
|
}, z.core.$strict>, z.ZodObject<{
|
|
1667
1771
|
type: z.ZodLiteral<"source-url">;
|
|
1668
1772
|
sourceId: z.ZodString;
|
|
1669
1773
|
url: z.ZodString;
|
|
1670
1774
|
title: z.ZodOptional<z.ZodString>;
|
|
1671
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1775
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1672
1776
|
}, z.core.$strict>, z.ZodObject<{
|
|
1673
1777
|
type: z.ZodLiteral<"source-document">;
|
|
1674
1778
|
sourceId: z.ZodString;
|
|
1675
1779
|
mediaType: z.ZodString;
|
|
1676
1780
|
title: z.ZodString;
|
|
1677
1781
|
filename: z.ZodOptional<z.ZodString>;
|
|
1678
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1782
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1679
1783
|
}, z.core.$strict>, z.ZodObject<{
|
|
1680
1784
|
type: z.ZodLiteral<"file">;
|
|
1681
1785
|
url: z.ZodString;
|
|
1682
1786
|
mediaType: z.ZodString;
|
|
1683
|
-
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.
|
|
1787
|
+
providerMetadata: z.ZodOptional<z.ZodType<_ai_sdk_provider.SharedV3ProviderMetadata, unknown, z.core.$ZodTypeInternals<_ai_sdk_provider.SharedV3ProviderMetadata, unknown>>>;
|
|
1684
1788
|
}, z.core.$strict>, z.ZodObject<{
|
|
1685
1789
|
type: z.ZodCustom<`data-${string}`, `data-${string}`>;
|
|
1686
1790
|
id: z.ZodOptional<z.ZodString>;
|
|
@@ -1757,6 +1861,10 @@ type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataT
|
|
|
1757
1861
|
providerMetadata?: ProviderMetadata;
|
|
1758
1862
|
dynamic?: boolean;
|
|
1759
1863
|
errorText: string;
|
|
1864
|
+
} | {
|
|
1865
|
+
type: 'tool-approval-request';
|
|
1866
|
+
approvalId: string;
|
|
1867
|
+
toolCallId: string;
|
|
1760
1868
|
} | {
|
|
1761
1869
|
type: 'tool-output-available';
|
|
1762
1870
|
toolCallId: string;
|
|
@@ -1770,6 +1878,9 @@ type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataT
|
|
|
1770
1878
|
errorText: string;
|
|
1771
1879
|
providerExecuted?: boolean;
|
|
1772
1880
|
dynamic?: boolean;
|
|
1881
|
+
} | {
|
|
1882
|
+
type: 'tool-output-denied';
|
|
1883
|
+
toolCallId: string;
|
|
1773
1884
|
} | {
|
|
1774
1885
|
type: 'tool-input-start';
|
|
1775
1886
|
toolCallId: string;
|
|
@@ -2072,47 +2183,28 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
|
2072
2183
|
*/
|
|
2073
2184
|
consumeStream(options?: ConsumeStreamOptions): Promise<void>;
|
|
2074
2185
|
/**
|
|
2075
|
-
|
|
2076
|
-
|
|
2077
|
-
@param options.getErrorMessage an optional function that converts an error to an error message.
|
|
2078
|
-
@param options.sendUsage whether to send the usage information to the client. Defaults to true.
|
|
2079
|
-
@param options.sendReasoning whether to send the reasoning information to the client. Defaults to false.
|
|
2080
|
-
@param options.sendSources whether to send the sources information to the client. Defaults to false.
|
|
2081
|
-
@param options.experimental_sendFinish whether to send the finish information to the client. Defaults to true.
|
|
2082
|
-
@param options.experimental_sendStart whether to send the start information to the client. Defaults to true.
|
|
2186
|
+
Converts the result to a UI message stream.
|
|
2083
2187
|
|
|
2084
|
-
|
|
2188
|
+
@return A UI message stream.
|
|
2085
2189
|
*/
|
|
2086
2190
|
toUIMessageStream<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamOptions<UI_MESSAGE>): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>;
|
|
2087
2191
|
/**
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
@param options.status The status code.
|
|
2091
|
-
@param options.statusText The status text.
|
|
2092
|
-
@param options.headers The headers.
|
|
2093
|
-
@param options.getErrorMessage An optional function that converts an error to an error message.
|
|
2094
|
-
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
|
2095
|
-
@param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
|
|
2096
|
-
*/
|
|
2192
|
+
*Writes UI message stream output to a Node.js response-like object.
|
|
2193
|
+
*/
|
|
2097
2194
|
pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(response: ServerResponse, options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): void;
|
|
2098
2195
|
/**
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2196
|
+
Writes text delta output to a Node.js response-like object.
|
|
2197
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
|
2198
|
+
writes each text delta as a separate chunk.
|
|
2199
|
+
|
|
2200
|
+
@param response A Node.js response-like object (ServerResponse).
|
|
2201
|
+
@param init Optional headers, status code, and status text.
|
|
2104
2202
|
*/
|
|
2105
2203
|
pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
|
|
2106
2204
|
/**
|
|
2107
|
-
|
|
2205
|
+
Converts the result to a streamed response object with a stream data part stream.
|
|
2108
2206
|
|
|
2109
|
-
|
|
2110
|
-
@param options.statusText The status text.
|
|
2111
|
-
@param options.headers The headers.
|
|
2112
|
-
@param options.getErrorMessage An optional function that converts an error to an error message.
|
|
2113
|
-
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
|
2114
|
-
@param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
|
|
2115
|
-
@return A response object.
|
|
2207
|
+
@return A response object.
|
|
2116
2208
|
*/
|
|
2117
2209
|
toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): Response;
|
|
2118
2210
|
/**
|
|
@@ -2176,7 +2268,9 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2176
2268
|
type: 'tool-result';
|
|
2177
2269
|
} & TypedToolResult<TOOLS>) | ({
|
|
2178
2270
|
type: 'tool-error';
|
|
2179
|
-
} & TypedToolError<TOOLS>) | {
|
|
2271
|
+
} & TypedToolError<TOOLS>) | ({
|
|
2272
|
+
type: 'tool-output-denied';
|
|
2273
|
+
} & StaticToolOutputDenied<TOOLS>) | ToolApprovalRequestOutput<TOOLS> | {
|
|
2180
2274
|
type: 'start-step';
|
|
2181
2275
|
request: LanguageModelRequestMetadata;
|
|
2182
2276
|
warnings: CallWarning[];
|
|
@@ -2202,7 +2296,37 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2202
2296
|
rawValue: unknown;
|
|
2203
2297
|
};
|
|
2204
2298
|
|
|
2299
|
+
/**
|
|
2300
|
+
Callback that is set using the `onFinish` option.
|
|
2301
|
+
|
|
2302
|
+
@param event - The event that is passed to the callback.
|
|
2303
|
+
*/
|
|
2304
|
+
type AgentOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
|
|
2305
|
+
/**
|
|
2306
|
+
Details for all steps.
|
|
2307
|
+
*/
|
|
2308
|
+
readonly steps: StepResult<TOOLS>[];
|
|
2309
|
+
/**
|
|
2310
|
+
Total usage for all steps. This is the sum of the usage of all steps.
|
|
2311
|
+
*/
|
|
2312
|
+
readonly totalUsage: LanguageModelUsage;
|
|
2313
|
+
}) => PromiseLike<void> | void;
|
|
2314
|
+
|
|
2315
|
+
/**
|
|
2316
|
+
Callback that is set using the `onStepFinish` option.
|
|
2317
|
+
|
|
2318
|
+
@param stepResult - The result of the step.
|
|
2319
|
+
*/
|
|
2320
|
+
type AgentOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
|
2321
|
+
|
|
2322
|
+
/**
|
|
2323
|
+
* Configuration options for an agent.
|
|
2324
|
+
*/
|
|
2205
2325
|
type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never> = CallSettings & {
|
|
2326
|
+
/**
|
|
2327
|
+
* The name of the agent.
|
|
2328
|
+
*/
|
|
2329
|
+
name?: string;
|
|
2206
2330
|
/**
|
|
2207
2331
|
* The system prompt to use.
|
|
2208
2332
|
*/
|
|
@@ -2223,7 +2347,7 @@ type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never
|
|
|
2223
2347
|
Condition for stopping the generation when there are tool results in the last step.
|
|
2224
2348
|
When the condition is an array, any of the conditions can be met to stop the generation.
|
|
2225
2349
|
|
|
2226
|
-
@default stepCountIs(
|
|
2350
|
+
@default stepCountIs(20)
|
|
2227
2351
|
*/
|
|
2228
2352
|
stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
|
|
2229
2353
|
/**
|
|
@@ -2252,9 +2376,19 @@ type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never
|
|
|
2252
2376
|
*/
|
|
2253
2377
|
experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
|
|
2254
2378
|
/**
|
|
2255
|
-
|
|
2256
|
-
|
|
2257
|
-
onStepFinish?:
|
|
2379
|
+
* Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
2380
|
+
*/
|
|
2381
|
+
onStepFinish?: AgentOnStepFinishCallback<NoInfer<TOOLS>>;
|
|
2382
|
+
/**
|
|
2383
|
+
* Callback that is called when all steps are finished and the response is complete.
|
|
2384
|
+
*/
|
|
2385
|
+
onFinish?: AgentOnFinishCallback<NoInfer<TOOLS>>;
|
|
2386
|
+
/**
|
|
2387
|
+
Additional provider-specific options. They are passed through
|
|
2388
|
+
to the provider from the AI SDK and enable provider-specific
|
|
2389
|
+
functionality that can be fully encapsulated in the provider.
|
|
2390
|
+
*/
|
|
2391
|
+
providerOptions?: ProviderOptions;
|
|
2258
2392
|
/**
|
|
2259
2393
|
* Context that is passed into tool calls.
|
|
2260
2394
|
*
|
|
@@ -2263,46 +2397,36 @@ type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never
|
|
|
2263
2397
|
* @default undefined
|
|
2264
2398
|
*/
|
|
2265
2399
|
experimental_context?: unknown;
|
|
2266
|
-
/**
|
|
2267
|
-
* Internal. For test use only. May change without notice.
|
|
2268
|
-
*/
|
|
2269
|
-
_internal?: {
|
|
2270
|
-
generateId?: IdGenerator;
|
|
2271
|
-
currentDate?: () => Date;
|
|
2272
|
-
};
|
|
2273
2400
|
};
|
|
2401
|
+
|
|
2402
|
+
/**
|
|
2403
|
+
* The Agent class provides a structured way to encapsulate LLM configuration, tools,
|
|
2404
|
+
* and behavior into reusable components.
|
|
2405
|
+
*
|
|
2406
|
+
* It handles the agent loop for you, allowing the LLM to call tools multiple times in
|
|
2407
|
+
* sequence to accomplish complex tasks.
|
|
2408
|
+
*
|
|
2409
|
+
* Define agents once and use them across your application.
|
|
2410
|
+
*/
|
|
2274
2411
|
declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never> {
|
|
2275
2412
|
private readonly settings;
|
|
2276
2413
|
constructor(settings: AgentSettings<TOOLS, OUTPUT, OUTPUT_PARTIAL>);
|
|
2414
|
+
/**
|
|
2415
|
+
* The name of the agent.
|
|
2416
|
+
*/
|
|
2417
|
+
get name(): string | undefined;
|
|
2418
|
+
/**
|
|
2419
|
+
* The tools that the agent can use.
|
|
2420
|
+
*/
|
|
2277
2421
|
get tools(): TOOLS;
|
|
2278
|
-
|
|
2279
|
-
|
|
2280
|
-
Additional provider-specific metadata. They are passed through
|
|
2281
|
-
from the provider to the AI SDK and enable provider-specific
|
|
2282
|
-
results that can be fully encapsulated in the provider.
|
|
2422
|
+
/**
|
|
2423
|
+
* Generates an output from the agent (non-streaming).
|
|
2283
2424
|
*/
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
to the provider from the AI SDK and enable provider-specific
|
|
2288
|
-
functionality that can be fully encapsulated in the provider.
|
|
2289
|
-
*/
|
|
2290
|
-
providerOptions?: ProviderOptions;
|
|
2291
|
-
}): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
|
2292
|
-
stream(options: Prompt & {
|
|
2293
|
-
/**
|
|
2294
|
-
Additional provider-specific metadata. They are passed through
|
|
2295
|
-
from the provider to the AI SDK and enable provider-specific
|
|
2296
|
-
results that can be fully encapsulated in the provider.
|
|
2425
|
+
generate(options: Prompt): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
|
2426
|
+
/**
|
|
2427
|
+
* Streams an output from the agent (streaming).
|
|
2297
2428
|
*/
|
|
2298
|
-
|
|
2299
|
-
/**
|
|
2300
|
-
Additional provider-specific metadata. They are passed through
|
|
2301
|
-
to the provider from the AI SDK and enable provider-specific
|
|
2302
|
-
functionality that can be fully encapsulated in the provider.
|
|
2303
|
-
*/
|
|
2304
|
-
providerOptions?: ProviderOptions;
|
|
2305
|
-
}): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
|
|
2429
|
+
stream(options: Prompt): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
|
|
2306
2430
|
/**
|
|
2307
2431
|
* Creates a response object that streams UI messages to the client.
|
|
2308
2432
|
*/
|
|
@@ -2310,7 +2434,12 @@ declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = neve
|
|
|
2310
2434
|
messages: UIMessage<never, never, InferUITools<TOOLS>>[];
|
|
2311
2435
|
}): Response;
|
|
2312
2436
|
}
|
|
2437
|
+
|
|
2438
|
+
/**
|
|
2439
|
+
* Infer the type of the tools of an agent.
|
|
2440
|
+
*/
|
|
2313
2441
|
type InferAgentTools<AGENT> = AGENT extends Agent<infer TOOLS, any, any> ? TOOLS : never;
|
|
2442
|
+
|
|
2314
2443
|
/**
|
|
2315
2444
|
* Infer the UI message type of an agent.
|
|
2316
2445
|
*/
|
|
@@ -2547,7 +2676,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2547
2676
|
type: 'tool-input-end';
|
|
2548
2677
|
id: string;
|
|
2549
2678
|
providerMetadata?: ProviderMetadata;
|
|
2550
|
-
} | ({
|
|
2679
|
+
} | ToolApprovalRequestOutput<TOOLS> | ({
|
|
2551
2680
|
type: 'source';
|
|
2552
2681
|
} & Source) | {
|
|
2553
2682
|
type: 'file';
|
|
@@ -2563,7 +2692,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2563
2692
|
file: GeneratedFile;
|
|
2564
2693
|
} | {
|
|
2565
2694
|
type: 'stream-start';
|
|
2566
|
-
warnings:
|
|
2695
|
+
warnings: LanguageModelV3CallWarning[];
|
|
2567
2696
|
} | {
|
|
2568
2697
|
type: 'response-metadata';
|
|
2569
2698
|
id?: string;
|
|
@@ -2694,6 +2823,16 @@ declare class NoOutputSpecifiedError extends AISDKError {
|
|
|
2694
2823
|
static isInstance(error: unknown): error is NoOutputSpecifiedError;
|
|
2695
2824
|
}
|
|
2696
2825
|
|
|
2826
|
+
/**
|
|
2827
|
+
Error that is thrown when no speech audio was generated.
|
|
2828
|
+
*/
|
|
2829
|
+
declare class NoSpeechGeneratedError extends AISDKError {
|
|
2830
|
+
readonly responses: Array<SpeechModelResponseMetadata>;
|
|
2831
|
+
constructor(options: {
|
|
2832
|
+
responses: Array<SpeechModelResponseMetadata>;
|
|
2833
|
+
});
|
|
2834
|
+
}
|
|
2835
|
+
|
|
2697
2836
|
declare const symbol$6: unique symbol;
|
|
2698
2837
|
declare class ToolCallRepairError extends AISDKError {
|
|
2699
2838
|
private readonly [symbol$6];
|
|
@@ -2834,7 +2973,7 @@ declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspec
|
|
|
2834
2973
|
/**
|
|
2835
2974
|
The image model to use.
|
|
2836
2975
|
*/
|
|
2837
|
-
model:
|
|
2976
|
+
model: ImageModelV3;
|
|
2838
2977
|
/**
|
|
2839
2978
|
The prompt that should be used to generate the image.
|
|
2840
2979
|
*/
|
|
@@ -3500,7 +3639,7 @@ declare function generateSpeech({ model, text, voice, outputFormat, instructions
|
|
|
3500
3639
|
/**
|
|
3501
3640
|
The speech model to use.
|
|
3502
3641
|
*/
|
|
3503
|
-
model:
|
|
3642
|
+
model: SpeechModelV3;
|
|
3504
3643
|
/**
|
|
3505
3644
|
The text to convert to speech.
|
|
3506
3645
|
*/
|
|
@@ -3556,7 +3695,7 @@ declare function generateSpeech({ model, text, voice, outputFormat, instructions
|
|
|
3556
3695
|
headers?: Record<string, string>;
|
|
3557
3696
|
}): Promise<SpeechResult>;
|
|
3558
3697
|
|
|
3559
|
-
type Warning =
|
|
3698
|
+
type Warning = LanguageModelV3CallWarning | ImageModelV3CallWarning | SpeechModelV3CallWarning | TranscriptionModelV3CallWarning;
|
|
3560
3699
|
type LogWarningsFunction = (warnings: Warning[]) => void;
|
|
3561
3700
|
|
|
3562
3701
|
/**
|
|
@@ -3564,19 +3703,19 @@ type LogWarningsFunction = (warnings: Warning[]) => void;
|
|
|
3564
3703
|
*/
|
|
3565
3704
|
declare function defaultSettingsMiddleware({ settings, }: {
|
|
3566
3705
|
settings: Partial<{
|
|
3567
|
-
maxOutputTokens?:
|
|
3568
|
-
temperature?:
|
|
3569
|
-
stopSequences?:
|
|
3570
|
-
topP?:
|
|
3571
|
-
topK?:
|
|
3572
|
-
presencePenalty?:
|
|
3573
|
-
frequencyPenalty?:
|
|
3574
|
-
responseFormat?:
|
|
3575
|
-
seed?:
|
|
3576
|
-
tools?:
|
|
3577
|
-
toolChoice?:
|
|
3578
|
-
headers?:
|
|
3579
|
-
providerOptions?:
|
|
3706
|
+
maxOutputTokens?: LanguageModelV3CallOptions['maxOutputTokens'];
|
|
3707
|
+
temperature?: LanguageModelV3CallOptions['temperature'];
|
|
3708
|
+
stopSequences?: LanguageModelV3CallOptions['stopSequences'];
|
|
3709
|
+
topP?: LanguageModelV3CallOptions['topP'];
|
|
3710
|
+
topK?: LanguageModelV3CallOptions['topK'];
|
|
3711
|
+
presencePenalty?: LanguageModelV3CallOptions['presencePenalty'];
|
|
3712
|
+
frequencyPenalty?: LanguageModelV3CallOptions['frequencyPenalty'];
|
|
3713
|
+
responseFormat?: LanguageModelV3CallOptions['responseFormat'];
|
|
3714
|
+
seed?: LanguageModelV3CallOptions['seed'];
|
|
3715
|
+
tools?: LanguageModelV3CallOptions['tools'];
|
|
3716
|
+
toolChoice?: LanguageModelV3CallOptions['toolChoice'];
|
|
3717
|
+
headers?: LanguageModelV3CallOptions['headers'];
|
|
3718
|
+
providerOptions?: LanguageModelV3CallOptions['providerOptions'];
|
|
3580
3719
|
}>;
|
|
3581
3720
|
}): LanguageModelMiddleware;
|
|
3582
3721
|
|
|
@@ -3600,39 +3739,39 @@ declare function extractReasoningMiddleware({ tagName, separator, startWithReaso
|
|
|
3600
3739
|
declare function simulateStreamingMiddleware(): LanguageModelMiddleware;
|
|
3601
3740
|
|
|
3602
3741
|
/**
|
|
3603
|
-
* Wraps a
|
|
3742
|
+
* Wraps a LanguageModelV3 instance with middleware functionality.
|
|
3604
3743
|
* This function allows you to apply middleware to transform parameters,
|
|
3605
3744
|
* wrap generate operations, and wrap stream operations of a language model.
|
|
3606
3745
|
*
|
|
3607
3746
|
* @param options - Configuration options for wrapping the language model.
|
|
3608
|
-
* @param options.model - The original
|
|
3747
|
+
* @param options.model - The original LanguageModelV3 instance to be wrapped.
|
|
3609
3748
|
* @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
3610
3749
|
* @param options.modelId - Optional custom model ID to override the original model's ID.
|
|
3611
3750
|
* @param options.providerId - Optional custom provider ID to override the original model's provider ID.
|
|
3612
|
-
* @returns A new
|
|
3751
|
+
* @returns A new LanguageModelV3 instance with middleware applied.
|
|
3613
3752
|
*/
|
|
3614
3753
|
declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
|
3615
|
-
model:
|
|
3754
|
+
model: LanguageModelV3;
|
|
3616
3755
|
middleware: LanguageModelMiddleware | LanguageModelMiddleware[];
|
|
3617
3756
|
modelId?: string;
|
|
3618
3757
|
providerId?: string;
|
|
3619
|
-
}) =>
|
|
3758
|
+
}) => LanguageModelV3;
|
|
3620
3759
|
|
|
3621
3760
|
/**
|
|
3622
|
-
* Wraps a
|
|
3761
|
+
* Wraps a ProviderV3 instance with middleware functionality.
|
|
3623
3762
|
* This function allows you to apply middleware to all language models
|
|
3624
3763
|
* from the provider, enabling you to transform parameters, wrap generate
|
|
3625
3764
|
* operations, and wrap stream operations for every language model.
|
|
3626
3765
|
*
|
|
3627
3766
|
* @param options - Configuration options for wrapping the provider.
|
|
3628
|
-
* @param options.provider - The original
|
|
3767
|
+
* @param options.provider - The original ProviderV3 instance to be wrapped.
|
|
3629
3768
|
* @param options.languageModelMiddleware - The middleware to be applied to all language models from the provider. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
3630
|
-
* @returns A new
|
|
3769
|
+
* @returns A new ProviderV3 instance with middleware applied to all language models.
|
|
3631
3770
|
*/
|
|
3632
3771
|
declare function wrapProvider({ provider, languageModelMiddleware, }: {
|
|
3633
|
-
provider: ProviderV2;
|
|
3772
|
+
provider: ProviderV3 | ProviderV2;
|
|
3634
3773
|
languageModelMiddleware: LanguageModelMiddleware | LanguageModelMiddleware[];
|
|
3635
|
-
}):
|
|
3774
|
+
}): ProviderV3;
|
|
3636
3775
|
|
|
3637
3776
|
/**
|
|
3638
3777
|
* Creates a custom provider with specified language models, text embedding models, image models, transcription models, speech models, and an optional fallback provider.
|
|
@@ -3648,19 +3787,19 @@ declare function wrapProvider({ provider, languageModelMiddleware, }: {
|
|
|
3648
3787
|
*
|
|
3649
3788
|
* @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
|
|
3650
3789
|
*/
|
|
3651
|
-
declare function customProvider<LANGUAGE_MODELS extends Record<string,
|
|
3790
|
+
declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV3>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV3<string>>, IMAGE_MODELS extends Record<string, ImageModelV3>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV3>, SPEECH_MODELS extends Record<string, SpeechModelV3>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, fallbackProvider, }: {
|
|
3652
3791
|
languageModels?: LANGUAGE_MODELS;
|
|
3653
3792
|
textEmbeddingModels?: EMBEDDING_MODELS;
|
|
3654
3793
|
imageModels?: IMAGE_MODELS;
|
|
3655
3794
|
transcriptionModels?: TRANSCRIPTION_MODELS;
|
|
3656
3795
|
speechModels?: SPEECH_MODELS;
|
|
3657
|
-
fallbackProvider?: ProviderV2;
|
|
3658
|
-
}):
|
|
3659
|
-
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>):
|
|
3796
|
+
fallbackProvider?: ProviderV3 | ProviderV2;
|
|
3797
|
+
}): ProviderV3 & {
|
|
3798
|
+
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV3;
|
|
3660
3799
|
textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV3<string>;
|
|
3661
|
-
imageModel(modelId: ExtractModelId<IMAGE_MODELS>):
|
|
3662
|
-
transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>):
|
|
3663
|
-
speechModel(modelId: ExtractModelId<SPEECH_MODELS>):
|
|
3800
|
+
imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV3;
|
|
3801
|
+
transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV3;
|
|
3802
|
+
speechModel(modelId: ExtractModelId<SPEECH_MODELS>): SpeechModelV3;
|
|
3664
3803
|
};
|
|
3665
3804
|
/**
|
|
3666
3805
|
* @deprecated Use `customProvider` instead.
|
|
@@ -3684,17 +3823,17 @@ declare class NoSuchProviderError extends NoSuchModelError {
|
|
|
3684
3823
|
}
|
|
3685
3824
|
|
|
3686
3825
|
type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
|
|
3687
|
-
interface ProviderRegistryProvider<PROVIDERS extends Record<string,
|
|
3688
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never):
|
|
3689
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never):
|
|
3826
|
+
interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV3> = Record<string, ProviderV3>, SEPARATOR extends string = ':'> {
|
|
3827
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV3;
|
|
3828
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV3;
|
|
3690
3829
|
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV3<string>;
|
|
3691
3830
|
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV3<string>;
|
|
3692
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never):
|
|
3693
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never):
|
|
3694
|
-
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never):
|
|
3695
|
-
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never):
|
|
3696
|
-
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never):
|
|
3697
|
-
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never):
|
|
3831
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV3;
|
|
3832
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV3;
|
|
3833
|
+
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV3;
|
|
3834
|
+
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): TranscriptionModelV3;
|
|
3835
|
+
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never): SpeechModelV3;
|
|
3836
|
+
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): SpeechModelV3;
|
|
3698
3837
|
}
|
|
3699
3838
|
/**
|
|
3700
3839
|
* Creates a registry for the given providers with optional middleware functionality.
|
|
@@ -3708,7 +3847,7 @@ interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV2>
|
|
|
3708
3847
|
* @param options.languageModelMiddleware - Optional middleware to be applied to all language models from the registry. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
3709
3848
|
* @returns A new ProviderRegistryProvider instance that provides access to all registered providers with optional middleware applied to language models.
|
|
3710
3849
|
*/
|
|
3711
|
-
declare function createProviderRegistry<PROVIDERS extends Record<string,
|
|
3850
|
+
declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV3>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, languageModelMiddleware, }?: {
|
|
3712
3851
|
separator?: SEPARATOR;
|
|
3713
3852
|
languageModelMiddleware?: LanguageModelMiddleware | LanguageModelMiddleware[];
|
|
3714
3853
|
}): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
|
|
@@ -3958,7 +4097,7 @@ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetr
|
|
|
3958
4097
|
/**
|
|
3959
4098
|
The transcription model to use.
|
|
3960
4099
|
*/
|
|
3961
|
-
model:
|
|
4100
|
+
model: TranscriptionModelV3;
|
|
3962
4101
|
/**
|
|
3963
4102
|
The audio data to transcribe.
|
|
3964
4103
|
*/
|
|
@@ -4179,6 +4318,20 @@ type ChatRequestOptions = {
|
|
|
4179
4318
|
body?: object;
|
|
4180
4319
|
metadata?: unknown;
|
|
4181
4320
|
};
|
|
4321
|
+
/**
|
|
4322
|
+
* Function that can be called to add a tool approval response to the chat.
|
|
4323
|
+
*/
|
|
4324
|
+
type ChatAddToolApproveResponseFunction = ({ id, approved, reason, }: {
|
|
4325
|
+
id: string;
|
|
4326
|
+
/**
|
|
4327
|
+
* Flag indicating whether the approval was granted or denied.
|
|
4328
|
+
*/
|
|
4329
|
+
approved: boolean;
|
|
4330
|
+
/**
|
|
4331
|
+
* Optional reason for the approval or denial.
|
|
4332
|
+
*/
|
|
4333
|
+
reason?: string;
|
|
4334
|
+
}) => void | PromiseLike<void>;
|
|
4182
4335
|
type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
|
|
4183
4336
|
interface ChatState<UI_MESSAGE extends UIMessage> {
|
|
4184
4337
|
status: ChatStatus;
|
|
@@ -4327,10 +4480,19 @@ declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
|
|
|
4327
4480
|
* Clear the error state and set the status to ready if the chat is in an error state.
|
|
4328
4481
|
*/
|
|
4329
4482
|
clearError: () => void;
|
|
4330
|
-
|
|
4483
|
+
addToolApprovalResponse: ChatAddToolApproveResponseFunction;
|
|
4484
|
+
addToolResult: <TOOL extends keyof InferUIMessageTools<UI_MESSAGE>>({ state, tool, toolCallId, output, errorText, }: {
|
|
4485
|
+
state?: "output-available";
|
|
4331
4486
|
tool: TOOL;
|
|
4332
4487
|
toolCallId: string;
|
|
4333
4488
|
output: InferUIMessageTools<UI_MESSAGE>[TOOL]["output"];
|
|
4489
|
+
errorText?: never;
|
|
4490
|
+
} | {
|
|
4491
|
+
state: "output-error";
|
|
4492
|
+
tool: TOOL;
|
|
4493
|
+
toolCallId: string;
|
|
4494
|
+
output?: never;
|
|
4495
|
+
errorText: string;
|
|
4334
4496
|
}) => Promise<void>;
|
|
4335
4497
|
/**
|
|
4336
4498
|
* Abort the current request immediately, keep the generated tokens if any.
|
|
@@ -4475,6 +4637,15 @@ declare class DefaultChatTransport<UI_MESSAGE extends UIMessage> extends HttpCha
|
|
|
4475
4637
|
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
|
4476
4638
|
}
|
|
4477
4639
|
|
|
4640
|
+
/**
|
|
4641
|
+
Check if the last message is an assistant message with completed tool call approvals.
|
|
4642
|
+
The last step of the message must have at least one tool approval response and
|
|
4643
|
+
all tool approvals must have a response.
|
|
4644
|
+
*/
|
|
4645
|
+
declare function lastAssistantMessageIsCompleteWithApprovalResponses({ messages, }: {
|
|
4646
|
+
messages: UIMessage[];
|
|
4647
|
+
}): boolean;
|
|
4648
|
+
|
|
4478
4649
|
/**
|
|
4479
4650
|
Check if the message is an assistant message with completed tool calls.
|
|
4480
4651
|
The last step of the message must have at least one tool invocation and
|
|
@@ -4610,7 +4781,7 @@ declare global {
|
|
|
4610
4781
|
*
|
|
4611
4782
|
* @see https://ai-sdk.dev/docs/ai-sdk-core/provider-management#global-provider-configuration
|
|
4612
4783
|
*/
|
|
4613
|
-
var AI_SDK_DEFAULT_PROVIDER:
|
|
4784
|
+
var AI_SDK_DEFAULT_PROVIDER: ProviderV3 | undefined;
|
|
4614
4785
|
/**
|
|
4615
4786
|
* The warning logger to use for the AI SDK.
|
|
4616
4787
|
*
|
|
@@ -4621,4 +4792,4 @@ declare global {
|
|
|
4621
4792
|
var AI_SDK_LOG_WARNINGS: LogWarningsFunction | undefined | false;
|
|
4622
4793
|
}
|
|
4623
4794
|
|
|
4624
|
-
export { AbstractChat, AsyncIterableStream, CallSettings, CallWarning, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, Agent as Experimental_Agent, AgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, Warning as Experimental_Warning, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StaticToolCall, StaticToolError, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, TypedToolCall, TypedToolError, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, MCPClient as experimental_MCPClient, MCPClientConfig as experimental_MCPClientConfig, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDeepEqualData, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, readUIMessageStream, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapLanguageModel, wrapProvider };
|
|
4795
|
+
export { AbstractChat, Agent, AgentOnFinishCallback, AgentOnStepFinishCallback, AgentSettings, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, Agent as Experimental_Agent, AgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, Warning as Experimental_Warning, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnFinishCallback, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferAgentUIMessage, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoOutputSpecifiedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolApprovalRequestOutput, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, MCPClient as experimental_MCPClient, MCPClientConfig as experimental_MCPClientConfig, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDeepEqualData, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, readUIMessageStream, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapLanguageModel, wrapProvider };
|