@ai-sdk/provider 0.0.0-4115c213-20260122152721
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +961 -0
- package/LICENSE +13 -0
- package/README.md +1 -0
- package/dist/index.d.mts +3796 -0
- package/dist/index.d.ts +3796 -0
- package/dist/index.js +421 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +377 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +63 -0
- package/src/embedding-model/index.ts +2 -0
- package/src/embedding-model/v2/embedding-model-v2-embedding.ts +5 -0
- package/src/embedding-model/v2/embedding-model-v2.ts +113 -0
- package/src/embedding-model/v2/index.ts +2 -0
- package/src/embedding-model/v3/embedding-model-v3-call-options.ts +26 -0
- package/src/embedding-model/v3/embedding-model-v3-embedding.ts +5 -0
- package/src/embedding-model/v3/embedding-model-v3-result.ts +48 -0
- package/src/embedding-model/v3/embedding-model-v3.ts +54 -0
- package/src/embedding-model/v3/index.ts +4 -0
- package/src/embedding-model-middleware/index.ts +1 -0
- package/src/embedding-model-middleware/v3/embedding-model-v3-middleware.ts +69 -0
- package/src/embedding-model-middleware/v3/index.ts +1 -0
- package/src/errors/ai-sdk-error.ts +62 -0
- package/src/errors/api-call-error.ts +59 -0
- package/src/errors/empty-response-body-error.ts +17 -0
- package/src/errors/get-error-message.ts +15 -0
- package/src/errors/index.ts +15 -0
- package/src/errors/invalid-argument-error.ts +32 -0
- package/src/errors/invalid-prompt-error.ts +33 -0
- package/src/errors/invalid-response-data-error.ts +31 -0
- package/src/errors/json-parse-error.ts +29 -0
- package/src/errors/load-api-key-error.ts +17 -0
- package/src/errors/load-setting-error.ts +17 -0
- package/src/errors/no-content-generated-error.ts +22 -0
- package/src/errors/no-such-model-error.ts +45 -0
- package/src/errors/too-many-embedding-values-for-call-error.ts +40 -0
- package/src/errors/type-validation-error.ts +51 -0
- package/src/errors/unsupported-functionality-error.ts +26 -0
- package/src/image-model/index.ts +2 -0
- package/src/image-model/v2/image-model-v2-call-options.ts +60 -0
- package/src/image-model/v2/image-model-v2-call-warning.ts +16 -0
- package/src/image-model/v2/image-model-v2.ts +104 -0
- package/src/image-model/v2/index.ts +6 -0
- package/src/image-model/v3/image-model-v3-call-options.ts +74 -0
- package/src/image-model/v3/image-model-v3-file.ts +44 -0
- package/src/image-model/v3/image-model-v3-usage.ts +19 -0
- package/src/image-model/v3/image-model-v3.ts +110 -0
- package/src/image-model/v3/index.ts +7 -0
- package/src/image-model-middleware/index.ts +1 -0
- package/src/image-model-middleware/v3/image-model-v3-middleware.ts +61 -0
- package/src/image-model-middleware/v3/index.ts +1 -0
- package/src/index.ts +15 -0
- package/src/json-value/index.ts +2 -0
- package/src/json-value/is-json.ts +40 -0
- package/src/json-value/json-value.ts +17 -0
- package/src/language-model/index.ts +2 -0
- package/src/language-model/v2/index.ts +18 -0
- package/src/language-model/v2/language-model-v2-call-options.ts +127 -0
- package/src/language-model/v2/language-model-v2-call-warning.ts +23 -0
- package/src/language-model/v2/language-model-v2-content.ts +14 -0
- package/src/language-model/v2/language-model-v2-data-content.ts +4 -0
- package/src/language-model/v2/language-model-v2-file.ts +25 -0
- package/src/language-model/v2/language-model-v2-finish-reason.ts +20 -0
- package/src/language-model/v2/language-model-v2-function-tool.ts +37 -0
- package/src/language-model/v2/language-model-v2-prompt.ts +218 -0
- package/src/language-model/v2/language-model-v2-provider-defined-tool.ts +24 -0
- package/src/language-model/v2/language-model-v2-reasoning.ts +14 -0
- package/src/language-model/v2/language-model-v2-response-metadata.ts +16 -0
- package/src/language-model/v2/language-model-v2-source.ts +67 -0
- package/src/language-model/v2/language-model-v2-stream-part.ts +102 -0
- package/src/language-model/v2/language-model-v2-text.ts +15 -0
- package/src/language-model/v2/language-model-v2-tool-call.ts +35 -0
- package/src/language-model/v2/language-model-v2-tool-choice.ts +5 -0
- package/src/language-model/v2/language-model-v2-tool-result.ts +40 -0
- package/src/language-model/v2/language-model-v2-usage.ts +34 -0
- package/src/language-model/v2/language-model-v2.ts +137 -0
- package/src/language-model/v3/index.ts +21 -0
- package/src/language-model/v3/language-model-v3-call-options.ts +125 -0
- package/src/language-model/v3/language-model-v3-content.ts +16 -0
- package/src/language-model/v3/language-model-v3-data-content.ts +4 -0
- package/src/language-model/v3/language-model-v3-file.ts +32 -0
- package/src/language-model/v3/language-model-v3-finish-reason.ts +33 -0
- package/src/language-model/v3/language-model-v3-function-tool.ts +53 -0
- package/src/language-model/v3/language-model-v3-generate-result.ts +63 -0
- package/src/language-model/v3/language-model-v3-prompt.ts +422 -0
- package/src/language-model/v3/language-model-v3-provider-tool.ts +28 -0
- package/src/language-model/v3/language-model-v3-reasoning.ts +14 -0
- package/src/language-model/v3/language-model-v3-response-metadata.ts +16 -0
- package/src/language-model/v3/language-model-v3-source.ts +67 -0
- package/src/language-model/v3/language-model-v3-stream-part.ts +106 -0
- package/src/language-model/v3/language-model-v3-stream-result.ts +32 -0
- package/src/language-model/v3/language-model-v3-text.ts +15 -0
- package/src/language-model/v3/language-model-v3-tool-approval-request.ts +27 -0
- package/src/language-model/v3/language-model-v3-tool-call.ts +41 -0
- package/src/language-model/v3/language-model-v3-tool-choice.ts +5 -0
- package/src/language-model/v3/language-model-v3-tool-result.ts +51 -0
- package/src/language-model/v3/language-model-v3-usage.ts +59 -0
- package/src/language-model/v3/language-model-v3.ts +61 -0
- package/src/language-model-middleware/index.ts +2 -0
- package/src/language-model-middleware/v2/index.ts +1 -0
- package/src/language-model-middleware/v2/language-model-v2-middleware.ts +82 -0
- package/src/language-model-middleware/v3/index.ts +1 -0
- package/src/language-model-middleware/v3/language-model-v3-middleware.ts +84 -0
- package/src/provider/index.ts +2 -0
- package/src/provider/v2/index.ts +1 -0
- package/src/provider/v2/provider-v2.ts +64 -0
- package/src/provider/v3/index.ts +1 -0
- package/src/provider/v3/provider-v3.ts +93 -0
- package/src/reranking-model/index.ts +1 -0
- package/src/reranking-model/v3/index.ts +2 -0
- package/src/reranking-model/v3/reranking-model-v3-call-options.ts +40 -0
- package/src/reranking-model/v3/reranking-model-v3.ts +90 -0
- package/src/shared/index.ts +2 -0
- package/src/shared/v2/index.ts +3 -0
- package/src/shared/v2/shared-v2-headers.ts +1 -0
- package/src/shared/v2/shared-v2-provider-metadata.ts +27 -0
- package/src/shared/v2/shared-v2-provider-options.ts +24 -0
- package/src/shared/v3/index.ts +4 -0
- package/src/shared/v3/shared-v3-headers.ts +1 -0
- package/src/shared/v3/shared-v3-provider-metadata.ts +24 -0
- package/src/shared/v3/shared-v3-provider-options.ts +24 -0
- package/src/shared/v3/shared-v3-warning.ts +50 -0
- package/src/speech-model/index.ts +2 -0
- package/src/speech-model/v2/index.ts +3 -0
- package/src/speech-model/v2/speech-model-v2-call-options.ts +62 -0
- package/src/speech-model/v2/speech-model-v2-call-warning.ts +16 -0
- package/src/speech-model/v2/speech-model-v2.ts +89 -0
- package/src/speech-model/v3/index.ts +2 -0
- package/src/speech-model/v3/speech-model-v3-call-options.ts +62 -0
- package/src/speech-model/v3/speech-model-v3.ts +89 -0
- package/src/transcription-model/index.ts +2 -0
- package/src/transcription-model/v2/index.ts +3 -0
- package/src/transcription-model/v2/transcription-model-v2-call-options.ts +48 -0
- package/src/transcription-model/v2/transcription-model-v2-call-warning.ts +16 -0
- package/src/transcription-model/v2/transcription-model-v2.ts +117 -0
- package/src/transcription-model/v3/index.ts +2 -0
- package/src/transcription-model/v3/transcription-model-v3-call-options.ts +45 -0
- package/src/transcription-model/v3/transcription-model-v3.ts +117 -0
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,3796 @@
|
|
|
1
|
+
import { JSONSchema7 } from 'json-schema';
|
|
2
|
+
export { JSONSchema7, JSONSchema7Definition } from 'json-schema';
|
|
3
|
+
|
|
4
|
+
type SharedV3Headers = Record<string, string>;
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
A JSON value can be a string, number, boolean, object, array, or null.
|
|
8
|
+
JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
|
|
9
|
+
*/
|
|
10
|
+
type JSONValue = null | string | number | boolean | JSONObject | JSONArray;
|
|
11
|
+
type JSONObject = {
|
|
12
|
+
[key: string]: JSONValue | undefined;
|
|
13
|
+
};
|
|
14
|
+
type JSONArray = JSONValue[];
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Additional provider-specific metadata.
|
|
18
|
+
* Metadata are additional outputs from the provider.
|
|
19
|
+
* They are passed through to the provider from the AI SDK
|
|
20
|
+
* and enable provider-specific functionality
|
|
21
|
+
* that can be fully encapsulated in the provider.
|
|
22
|
+
*
|
|
23
|
+
* This enables us to quickly ship provider-specific functionality
|
|
24
|
+
* without affecting the core AI SDK.
|
|
25
|
+
*
|
|
26
|
+
* The outer record is keyed by the provider name, and the inner
|
|
27
|
+
* record is keyed by the provider-specific metadata key.
|
|
28
|
+
*
|
|
29
|
+
* ```ts
|
|
30
|
+
* {
|
|
31
|
+
* "anthropic": {
|
|
32
|
+
* "cacheControl": { "type": "ephemeral" }
|
|
33
|
+
* }
|
|
34
|
+
* }
|
|
35
|
+
* ```
|
|
36
|
+
*/
|
|
37
|
+
type SharedV3ProviderMetadata = Record<string, JSONObject>;
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Additional provider-specific options.
|
|
41
|
+
* Options are additional input to the provider.
|
|
42
|
+
* They are passed through to the provider from the AI SDK
|
|
43
|
+
* and enable provider-specific functionality
|
|
44
|
+
* that can be fully encapsulated in the provider.
|
|
45
|
+
*
|
|
46
|
+
* This enables us to quickly ship provider-specific functionality
|
|
47
|
+
* without affecting the core AI SDK.
|
|
48
|
+
*
|
|
49
|
+
* The outer record is keyed by the provider name, and the inner
|
|
50
|
+
* record is keyed by the provider-specific metadata key.
|
|
51
|
+
*
|
|
52
|
+
* ```ts
|
|
53
|
+
* {
|
|
54
|
+
* "anthropic": {
|
|
55
|
+
* "cacheControl": { "type": "ephemeral" }
|
|
56
|
+
* }
|
|
57
|
+
* }
|
|
58
|
+
* ```
|
|
59
|
+
*/
|
|
60
|
+
type SharedV3ProviderOptions = Record<string, JSONObject>;
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Warning from the model.
|
|
64
|
+
*
|
|
65
|
+
* For example, that certain features are unsupported or compatibility
|
|
66
|
+
* functionality is used (which might lead to suboptimal results).
|
|
67
|
+
*/
|
|
68
|
+
type SharedV3Warning = {
|
|
69
|
+
/**
|
|
70
|
+
* A feature is not supported by the model.
|
|
71
|
+
*/
|
|
72
|
+
type: 'unsupported';
|
|
73
|
+
/**
|
|
74
|
+
* The feature that is not supported.
|
|
75
|
+
*/
|
|
76
|
+
feature: string;
|
|
77
|
+
/**
|
|
78
|
+
* Additional details about the warning.
|
|
79
|
+
*/
|
|
80
|
+
details?: string;
|
|
81
|
+
} | {
|
|
82
|
+
/**
|
|
83
|
+
* A compatibility feature is used that might lead to suboptimal results.
|
|
84
|
+
*/
|
|
85
|
+
type: 'compatibility';
|
|
86
|
+
/**
|
|
87
|
+
* The feature that is used in a compatibility mode.
|
|
88
|
+
*/
|
|
89
|
+
feature: string;
|
|
90
|
+
/**
|
|
91
|
+
* Additional details about the warning.
|
|
92
|
+
*/
|
|
93
|
+
details?: string;
|
|
94
|
+
} | {
|
|
95
|
+
/**
|
|
96
|
+
* Other warning.
|
|
97
|
+
*/
|
|
98
|
+
type: 'other';
|
|
99
|
+
/**
|
|
100
|
+
* The message of the warning.
|
|
101
|
+
*/
|
|
102
|
+
message: string;
|
|
103
|
+
};
|
|
104
|
+
|
|
105
|
+
type SharedV2Headers = Record<string, string>;
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* Additional provider-specific metadata.
|
|
109
|
+
* Metadata are additional outputs from the provider.
|
|
110
|
+
* They are passed through to the provider from the AI SDK
|
|
111
|
+
* and enable provider-specific functionality
|
|
112
|
+
* that can be fully encapsulated in the provider.
|
|
113
|
+
*
|
|
114
|
+
* This enables us to quickly ship provider-specific functionality
|
|
115
|
+
* without affecting the core AI SDK.
|
|
116
|
+
*
|
|
117
|
+
* The outer record is keyed by the provider name, and the inner
|
|
118
|
+
* record is keyed by the provider-specific metadata key.
|
|
119
|
+
*
|
|
120
|
+
* ```ts
|
|
121
|
+
* {
|
|
122
|
+
* "anthropic": {
|
|
123
|
+
* "cacheControl": { "type": "ephemeral" }
|
|
124
|
+
* }
|
|
125
|
+
* }
|
|
126
|
+
* ```
|
|
127
|
+
*/
|
|
128
|
+
type SharedV2ProviderMetadata = Record<string, Record<string, JSONValue>>;
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Additional provider-specific options.
|
|
132
|
+
* Options are additional input to the provider.
|
|
133
|
+
* They are passed through to the provider from the AI SDK
|
|
134
|
+
* and enable provider-specific functionality
|
|
135
|
+
* that can be fully encapsulated in the provider.
|
|
136
|
+
*
|
|
137
|
+
* This enables us to quickly ship provider-specific functionality
|
|
138
|
+
* without affecting the core AI SDK.
|
|
139
|
+
*
|
|
140
|
+
* The outer record is keyed by the provider name, and the inner
|
|
141
|
+
* record is keyed by the provider-specific metadata key.
|
|
142
|
+
*
|
|
143
|
+
* ```ts
|
|
144
|
+
* {
|
|
145
|
+
* "anthropic": {
|
|
146
|
+
* "cacheControl": { "type": "ephemeral" }
|
|
147
|
+
* }
|
|
148
|
+
* }
|
|
149
|
+
* ```
|
|
150
|
+
*/
|
|
151
|
+
type SharedV2ProviderOptions = Record<string, Record<string, JSONValue>>;
|
|
152
|
+
|
|
153
|
+
type EmbeddingModelV3CallOptions = {
|
|
154
|
+
/**
|
|
155
|
+
List of text values to generate embeddings for.
|
|
156
|
+
*/
|
|
157
|
+
values: Array<string>;
|
|
158
|
+
/**
|
|
159
|
+
Abort signal for cancelling the operation.
|
|
160
|
+
*/
|
|
161
|
+
abortSignal?: AbortSignal;
|
|
162
|
+
/**
|
|
163
|
+
Additional provider-specific options. They are passed through
|
|
164
|
+
to the provider from the AI SDK and enable provider-specific
|
|
165
|
+
functionality that can be fully encapsulated in the provider.
|
|
166
|
+
*/
|
|
167
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
168
|
+
/**
|
|
169
|
+
Additional HTTP headers to be sent with the request.
|
|
170
|
+
Only applicable for HTTP-based providers.
|
|
171
|
+
*/
|
|
172
|
+
headers?: SharedV3Headers;
|
|
173
|
+
};
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
An embedding is a vector, i.e. an array of numbers.
|
|
177
|
+
It is e.g. used to represent a text as a vector of word embeddings.
|
|
178
|
+
*/
|
|
179
|
+
type EmbeddingModelV3Embedding = Array<number>;
|
|
180
|
+
|
|
181
|
+
/**
|
|
182
|
+
* The result of a embedding model doEmbed call.
|
|
183
|
+
*/
|
|
184
|
+
type EmbeddingModelV3Result = {
|
|
185
|
+
/**
|
|
186
|
+
* Generated embeddings. They are in the same order as the input values.
|
|
187
|
+
*/
|
|
188
|
+
embeddings: Array<EmbeddingModelV3Embedding>;
|
|
189
|
+
/**
|
|
190
|
+
* Token usage. We only have input tokens for embeddings.
|
|
191
|
+
*/
|
|
192
|
+
usage?: {
|
|
193
|
+
tokens: number;
|
|
194
|
+
};
|
|
195
|
+
/**
|
|
196
|
+
* Additional provider-specific metadata. They are passed through
|
|
197
|
+
* from the provider to the AI SDK and enable provider-specific
|
|
198
|
+
* results that can be fully encapsulated in the provider.
|
|
199
|
+
*/
|
|
200
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
201
|
+
/**
|
|
202
|
+
* Optional response information for debugging purposes.
|
|
203
|
+
*/
|
|
204
|
+
response?: {
|
|
205
|
+
/**
|
|
206
|
+
* Response headers.
|
|
207
|
+
*/
|
|
208
|
+
headers?: SharedV3Headers;
|
|
209
|
+
/**
|
|
210
|
+
The response body.
|
|
211
|
+
*/
|
|
212
|
+
body?: unknown;
|
|
213
|
+
};
|
|
214
|
+
/**
|
|
215
|
+
* Warnings for the call, e.g. unsupported settings.
|
|
216
|
+
*/
|
|
217
|
+
warnings: Array<SharedV3Warning>;
|
|
218
|
+
};
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
Specification for an embedding model that implements the embedding model
|
|
222
|
+
interface version 3.
|
|
223
|
+
|
|
224
|
+
It is specific to text embeddings.
|
|
225
|
+
*/
|
|
226
|
+
type EmbeddingModelV3 = {
|
|
227
|
+
/**
|
|
228
|
+
The embedding model must specify which embedding model interface
|
|
229
|
+
version it implements. This will allow us to evolve the embedding
|
|
230
|
+
model interface and retain backwards compatibility. The different
|
|
231
|
+
implementation versions can be handled as a discriminated union
|
|
232
|
+
on our side.
|
|
233
|
+
*/
|
|
234
|
+
readonly specificationVersion: 'v3';
|
|
235
|
+
/**
|
|
236
|
+
Name of the provider for logging purposes.
|
|
237
|
+
*/
|
|
238
|
+
readonly provider: string;
|
|
239
|
+
/**
|
|
240
|
+
Provider-specific model ID for logging purposes.
|
|
241
|
+
*/
|
|
242
|
+
readonly modelId: string;
|
|
243
|
+
/**
|
|
244
|
+
Limit of how many embeddings can be generated in a single API call.
|
|
245
|
+
|
|
246
|
+
Use Infinity for models that do not have a limit.
|
|
247
|
+
*/
|
|
248
|
+
readonly maxEmbeddingsPerCall: PromiseLike<number | undefined> | number | undefined;
|
|
249
|
+
/**
|
|
250
|
+
True if the model can handle multiple embedding calls in parallel.
|
|
251
|
+
*/
|
|
252
|
+
readonly supportsParallelCalls: PromiseLike<boolean> | boolean;
|
|
253
|
+
/**
|
|
254
|
+
Generates a list of embeddings for the given input text.
|
|
255
|
+
|
|
256
|
+
Naming: "do" prefix to prevent accidental direct usage of the method
|
|
257
|
+
by the user.
|
|
258
|
+
*/
|
|
259
|
+
doEmbed(options: EmbeddingModelV3CallOptions): PromiseLike<EmbeddingModelV3Result>;
|
|
260
|
+
};
|
|
261
|
+
|
|
262
|
+
/**
|
|
263
|
+
An embedding is a vector, i.e. an array of numbers.
|
|
264
|
+
It is e.g. used to represent a text as a vector of word embeddings.
|
|
265
|
+
*/
|
|
266
|
+
type EmbeddingModelV2Embedding = Array<number>;
|
|
267
|
+
|
|
268
|
+
/**
|
|
269
|
+
Specification for an embedding model that implements the embedding model
|
|
270
|
+
interface version 2.
|
|
271
|
+
|
|
272
|
+
VALUE is the type of the values that the model can embed.
|
|
273
|
+
This will allow us to go beyond text embeddings in the future,
|
|
274
|
+
e.g. to support image embeddings
|
|
275
|
+
*/
|
|
276
|
+
type EmbeddingModelV2<VALUE> = {
|
|
277
|
+
/**
|
|
278
|
+
The embedding model must specify which embedding model interface
|
|
279
|
+
version it implements. This will allow us to evolve the embedding
|
|
280
|
+
model interface and retain backwards compatibility. The different
|
|
281
|
+
implementation versions can be handled as a discriminated union
|
|
282
|
+
on our side.
|
|
283
|
+
*/
|
|
284
|
+
readonly specificationVersion: 'v2';
|
|
285
|
+
/**
|
|
286
|
+
Name of the provider for logging purposes.
|
|
287
|
+
*/
|
|
288
|
+
readonly provider: string;
|
|
289
|
+
/**
|
|
290
|
+
Provider-specific model ID for logging purposes.
|
|
291
|
+
*/
|
|
292
|
+
readonly modelId: string;
|
|
293
|
+
/**
|
|
294
|
+
Limit of how many embeddings can be generated in a single API call.
|
|
295
|
+
|
|
296
|
+
Use Infinity for models that do not have a limit.
|
|
297
|
+
*/
|
|
298
|
+
readonly maxEmbeddingsPerCall: PromiseLike<number | undefined> | number | undefined;
|
|
299
|
+
/**
|
|
300
|
+
True if the model can handle multiple embedding calls in parallel.
|
|
301
|
+
*/
|
|
302
|
+
readonly supportsParallelCalls: PromiseLike<boolean> | boolean;
|
|
303
|
+
/**
|
|
304
|
+
Generates a list of embeddings for the given input text.
|
|
305
|
+
|
|
306
|
+
Naming: "do" prefix to prevent accidental direct usage of the method
|
|
307
|
+
by the user.
|
|
308
|
+
*/
|
|
309
|
+
doEmbed(options: {
|
|
310
|
+
/**
|
|
311
|
+
List of values to embed.
|
|
312
|
+
*/
|
|
313
|
+
values: Array<VALUE>;
|
|
314
|
+
/**
|
|
315
|
+
Abort signal for cancelling the operation.
|
|
316
|
+
*/
|
|
317
|
+
abortSignal?: AbortSignal;
|
|
318
|
+
/**
|
|
319
|
+
Additional provider-specific options. They are passed through
|
|
320
|
+
to the provider from the AI SDK and enable provider-specific
|
|
321
|
+
functionality that can be fully encapsulated in the provider.
|
|
322
|
+
*/
|
|
323
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
324
|
+
/**
|
|
325
|
+
Additional HTTP headers to be sent with the request.
|
|
326
|
+
Only applicable for HTTP-based providers.
|
|
327
|
+
*/
|
|
328
|
+
headers?: Record<string, string | undefined>;
|
|
329
|
+
}): PromiseLike<{
|
|
330
|
+
/**
|
|
331
|
+
Generated embeddings. They are in the same order as the input values.
|
|
332
|
+
*/
|
|
333
|
+
embeddings: Array<EmbeddingModelV2Embedding>;
|
|
334
|
+
/**
|
|
335
|
+
Token usage. We only have input tokens for embeddings.
|
|
336
|
+
*/
|
|
337
|
+
usage?: {
|
|
338
|
+
tokens: number;
|
|
339
|
+
};
|
|
340
|
+
/**
|
|
341
|
+
Additional provider-specific metadata. They are passed through
|
|
342
|
+
from the provider to the AI SDK and enable provider-specific
|
|
343
|
+
results that can be fully encapsulated in the provider.
|
|
344
|
+
*/
|
|
345
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
346
|
+
/**
|
|
347
|
+
Optional response information for debugging purposes.
|
|
348
|
+
*/
|
|
349
|
+
response?: {
|
|
350
|
+
/**
|
|
351
|
+
Response headers.
|
|
352
|
+
*/
|
|
353
|
+
headers?: SharedV2Headers;
|
|
354
|
+
/**
|
|
355
|
+
The response body.
|
|
356
|
+
*/
|
|
357
|
+
body?: unknown;
|
|
358
|
+
};
|
|
359
|
+
}>;
|
|
360
|
+
};
|
|
361
|
+
|
|
362
|
+
declare const symbol$d: unique symbol;
|
|
363
|
+
/**
|
|
364
|
+
* Custom error class for AI SDK related errors.
|
|
365
|
+
* @extends Error
|
|
366
|
+
*/
|
|
367
|
+
declare class AISDKError extends Error {
|
|
368
|
+
private readonly [symbol$d];
|
|
369
|
+
/**
|
|
370
|
+
* The underlying cause of the error, if any.
|
|
371
|
+
*/
|
|
372
|
+
readonly cause?: unknown;
|
|
373
|
+
/**
|
|
374
|
+
* Creates an AI SDK Error.
|
|
375
|
+
*
|
|
376
|
+
* @param {Object} params - The parameters for creating the error.
|
|
377
|
+
* @param {string} params.name - The name of the error.
|
|
378
|
+
* @param {string} params.message - The error message.
|
|
379
|
+
* @param {unknown} [params.cause] - The underlying cause of the error.
|
|
380
|
+
*/
|
|
381
|
+
constructor({ name, message, cause, }: {
|
|
382
|
+
name: string;
|
|
383
|
+
message: string;
|
|
384
|
+
cause?: unknown;
|
|
385
|
+
});
|
|
386
|
+
/**
|
|
387
|
+
* Checks if the given error is an AI SDK Error.
|
|
388
|
+
* @param {unknown} error - The error to check.
|
|
389
|
+
* @returns {boolean} True if the error is an AI SDK Error, false otherwise.
|
|
390
|
+
*/
|
|
391
|
+
static isInstance(error: unknown): error is AISDKError;
|
|
392
|
+
protected static hasMarker(error: unknown, marker: string): boolean;
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
declare const symbol$c: unique symbol;
|
|
396
|
+
declare class APICallError extends AISDKError {
|
|
397
|
+
private readonly [symbol$c];
|
|
398
|
+
readonly url: string;
|
|
399
|
+
readonly requestBodyValues: unknown;
|
|
400
|
+
readonly statusCode?: number;
|
|
401
|
+
readonly responseHeaders?: Record<string, string>;
|
|
402
|
+
readonly responseBody?: string;
|
|
403
|
+
readonly isRetryable: boolean;
|
|
404
|
+
readonly data?: unknown;
|
|
405
|
+
constructor({ message, url, requestBodyValues, statusCode, responseHeaders, responseBody, cause, isRetryable, // server error
|
|
406
|
+
data, }: {
|
|
407
|
+
message: string;
|
|
408
|
+
url: string;
|
|
409
|
+
requestBodyValues: unknown;
|
|
410
|
+
statusCode?: number;
|
|
411
|
+
responseHeaders?: Record<string, string>;
|
|
412
|
+
responseBody?: string;
|
|
413
|
+
cause?: unknown;
|
|
414
|
+
isRetryable?: boolean;
|
|
415
|
+
data?: unknown;
|
|
416
|
+
});
|
|
417
|
+
static isInstance(error: unknown): error is APICallError;
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
declare const symbol$b: unique symbol;
|
|
421
|
+
declare class EmptyResponseBodyError extends AISDKError {
|
|
422
|
+
private readonly [symbol$b];
|
|
423
|
+
constructor({ message }?: {
|
|
424
|
+
message?: string;
|
|
425
|
+
});
|
|
426
|
+
static isInstance(error: unknown): error is EmptyResponseBodyError;
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
declare function getErrorMessage(error: unknown | undefined): string;
|
|
430
|
+
|
|
431
|
+
declare const symbol$a: unique symbol;
|
|
432
|
+
/**
|
|
433
|
+
* A function argument is invalid.
|
|
434
|
+
*/
|
|
435
|
+
declare class InvalidArgumentError extends AISDKError {
|
|
436
|
+
private readonly [symbol$a];
|
|
437
|
+
readonly argument: string;
|
|
438
|
+
constructor({ message, cause, argument, }: {
|
|
439
|
+
argument: string;
|
|
440
|
+
message: string;
|
|
441
|
+
cause?: unknown;
|
|
442
|
+
});
|
|
443
|
+
static isInstance(error: unknown): error is InvalidArgumentError;
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
declare const symbol$9: unique symbol;
|
|
447
|
+
/**
|
|
448
|
+
* A prompt is invalid. This error should be thrown by providers when they cannot
|
|
449
|
+
* process a prompt.
|
|
450
|
+
*/
|
|
451
|
+
declare class InvalidPromptError extends AISDKError {
|
|
452
|
+
private readonly [symbol$9];
|
|
453
|
+
readonly prompt: unknown;
|
|
454
|
+
constructor({ prompt, message, cause, }: {
|
|
455
|
+
prompt: unknown;
|
|
456
|
+
message: string;
|
|
457
|
+
cause?: unknown;
|
|
458
|
+
});
|
|
459
|
+
static isInstance(error: unknown): error is InvalidPromptError;
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
declare const symbol$8: unique symbol;
|
|
463
|
+
/**
|
|
464
|
+
* Server returned a response with invalid data content.
|
|
465
|
+
* This should be thrown by providers when they cannot parse the response from the API.
|
|
466
|
+
*/
|
|
467
|
+
declare class InvalidResponseDataError extends AISDKError {
|
|
468
|
+
private readonly [symbol$8];
|
|
469
|
+
readonly data: unknown;
|
|
470
|
+
constructor({ data, message, }: {
|
|
471
|
+
data: unknown;
|
|
472
|
+
message?: string;
|
|
473
|
+
});
|
|
474
|
+
static isInstance(error: unknown): error is InvalidResponseDataError;
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
declare const symbol$7: unique symbol;
|
|
478
|
+
declare class JSONParseError extends AISDKError {
|
|
479
|
+
private readonly [symbol$7];
|
|
480
|
+
readonly text: string;
|
|
481
|
+
constructor({ text, cause }: {
|
|
482
|
+
text: string;
|
|
483
|
+
cause: unknown;
|
|
484
|
+
});
|
|
485
|
+
static isInstance(error: unknown): error is JSONParseError;
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
declare const symbol$6: unique symbol;
|
|
489
|
+
declare class LoadAPIKeyError extends AISDKError {
|
|
490
|
+
private readonly [symbol$6];
|
|
491
|
+
constructor({ message }: {
|
|
492
|
+
message: string;
|
|
493
|
+
});
|
|
494
|
+
static isInstance(error: unknown): error is LoadAPIKeyError;
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
declare const symbol$5: unique symbol;
|
|
498
|
+
declare class LoadSettingError extends AISDKError {
|
|
499
|
+
private readonly [symbol$5];
|
|
500
|
+
constructor({ message }: {
|
|
501
|
+
message: string;
|
|
502
|
+
});
|
|
503
|
+
static isInstance(error: unknown): error is LoadSettingError;
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
declare const symbol$4: unique symbol;
|
|
507
|
+
/**
|
|
508
|
+
Thrown when the AI provider fails to generate any content.
|
|
509
|
+
*/
|
|
510
|
+
declare class NoContentGeneratedError extends AISDKError {
|
|
511
|
+
private readonly [symbol$4];
|
|
512
|
+
constructor({ message, }?: {
|
|
513
|
+
message?: string;
|
|
514
|
+
});
|
|
515
|
+
static isInstance(error: unknown): error is NoContentGeneratedError;
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
declare const symbol$3: unique symbol;
|
|
519
|
+
declare class NoSuchModelError extends AISDKError {
|
|
520
|
+
private readonly [symbol$3];
|
|
521
|
+
readonly modelId: string;
|
|
522
|
+
readonly modelType: 'languageModel' | 'embeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel' | 'rerankingModel';
|
|
523
|
+
constructor({ errorName, modelId, modelType, message, }: {
|
|
524
|
+
errorName?: string;
|
|
525
|
+
modelId: string;
|
|
526
|
+
modelType: 'languageModel' | 'embeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel' | 'rerankingModel';
|
|
527
|
+
message?: string;
|
|
528
|
+
});
|
|
529
|
+
static isInstance(error: unknown): error is NoSuchModelError;
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
declare const symbol$2: unique symbol;
|
|
533
|
+
declare class TooManyEmbeddingValuesForCallError extends AISDKError {
|
|
534
|
+
private readonly [symbol$2];
|
|
535
|
+
readonly provider: string;
|
|
536
|
+
readonly modelId: string;
|
|
537
|
+
readonly maxEmbeddingsPerCall: number;
|
|
538
|
+
readonly values: Array<unknown>;
|
|
539
|
+
constructor(options: {
|
|
540
|
+
provider: string;
|
|
541
|
+
modelId: string;
|
|
542
|
+
maxEmbeddingsPerCall: number;
|
|
543
|
+
values: Array<unknown>;
|
|
544
|
+
});
|
|
545
|
+
static isInstance(error: unknown): error is TooManyEmbeddingValuesForCallError;
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
declare const symbol$1: unique symbol;
|
|
549
|
+
declare class TypeValidationError extends AISDKError {
|
|
550
|
+
private readonly [symbol$1];
|
|
551
|
+
readonly value: unknown;
|
|
552
|
+
constructor({ value, cause }: {
|
|
553
|
+
value: unknown;
|
|
554
|
+
cause: unknown;
|
|
555
|
+
});
|
|
556
|
+
static isInstance(error: unknown): error is TypeValidationError;
|
|
557
|
+
/**
|
|
558
|
+
* Wraps an error into a TypeValidationError.
|
|
559
|
+
* If the cause is already a TypeValidationError with the same value, it returns the cause.
|
|
560
|
+
* Otherwise, it creates a new TypeValidationError.
|
|
561
|
+
*
|
|
562
|
+
* @param {Object} params - The parameters for wrapping the error.
|
|
563
|
+
* @param {unknown} params.value - The value that failed validation.
|
|
564
|
+
* @param {unknown} params.cause - The original error or cause of the validation failure.
|
|
565
|
+
* @returns {TypeValidationError} A TypeValidationError instance.
|
|
566
|
+
*/
|
|
567
|
+
static wrap({ value, cause, }: {
|
|
568
|
+
value: unknown;
|
|
569
|
+
cause: unknown;
|
|
570
|
+
}): TypeValidationError;
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
declare const symbol: unique symbol;
|
|
574
|
+
declare class UnsupportedFunctionalityError extends AISDKError {
|
|
575
|
+
private readonly [symbol];
|
|
576
|
+
readonly functionality: string;
|
|
577
|
+
constructor({ functionality, message, }: {
|
|
578
|
+
functionality: string;
|
|
579
|
+
message?: string;
|
|
580
|
+
});
|
|
581
|
+
static isInstance(error: unknown): error is UnsupportedFunctionalityError;
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
declare function isJSONValue(value: unknown): value is JSONValue;
|
|
585
|
+
declare function isJSONArray(value: unknown): value is JSONArray;
|
|
586
|
+
declare function isJSONObject(value: unknown): value is JSONObject;
|
|
587
|
+
|
|
588
|
+
/**
|
|
589
|
+
Usage information for an image model call.
|
|
590
|
+
*/
|
|
591
|
+
type ImageModelV3Usage = {
|
|
592
|
+
/**
|
|
593
|
+
The number of input (prompt) tokens used.
|
|
594
|
+
*/
|
|
595
|
+
inputTokens: number | undefined;
|
|
596
|
+
/**
|
|
597
|
+
The number of output tokens used, if reported by the provider.
|
|
598
|
+
*/
|
|
599
|
+
outputTokens: number | undefined;
|
|
600
|
+
/**
|
|
601
|
+
The total number of tokens as reported by the provider.
|
|
602
|
+
*/
|
|
603
|
+
totalTokens: number | undefined;
|
|
604
|
+
};
|
|
605
|
+
|
|
606
|
+
/**
|
|
607
|
+
* An image file that can be used for image editing or variation generation.
|
|
608
|
+
*/
|
|
609
|
+
type ImageModelV3File = {
|
|
610
|
+
type: 'file';
|
|
611
|
+
/**
|
|
612
|
+
* The IANA media type of the file, e.g. `image/png`. Any string is supported.
|
|
613
|
+
*
|
|
614
|
+
* @see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
615
|
+
*/
|
|
616
|
+
mediaType: string;
|
|
617
|
+
/**
|
|
618
|
+
* Generated file data as base64 encoded strings or binary data.
|
|
619
|
+
*
|
|
620
|
+
* The file data should be returned without any unnecessary conversion.
|
|
621
|
+
* If the API returns base64 encoded strings, the file data should be returned
|
|
622
|
+
* as base64 encoded strings. If the API returns binary data, the file data should
|
|
623
|
+
* be returned as binary data.
|
|
624
|
+
*/
|
|
625
|
+
data: string | Uint8Array;
|
|
626
|
+
/**
|
|
627
|
+
* Optional provider-specific metadata for the file part.
|
|
628
|
+
*/
|
|
629
|
+
providerOptions?: SharedV3ProviderMetadata;
|
|
630
|
+
} | {
|
|
631
|
+
type: 'url';
|
|
632
|
+
/**
|
|
633
|
+
* The URL of the image file.
|
|
634
|
+
*/
|
|
635
|
+
url: string;
|
|
636
|
+
/**
|
|
637
|
+
* Optional provider-specific metadata for the file part.
|
|
638
|
+
*/
|
|
639
|
+
providerOptions?: SharedV3ProviderMetadata;
|
|
640
|
+
};
|
|
641
|
+
|
|
642
|
+
type ImageModelV3CallOptions = {
|
|
643
|
+
/**
|
|
644
|
+
* Prompt for the image generation. Some operations, like upscaling, may not require a prompt.
|
|
645
|
+
*/
|
|
646
|
+
prompt: string | undefined;
|
|
647
|
+
/**
|
|
648
|
+
* Number of images to generate.
|
|
649
|
+
*/
|
|
650
|
+
n: number;
|
|
651
|
+
/**
|
|
652
|
+
* Size of the images to generate.
|
|
653
|
+
* Must have the format `{width}x{height}`.
|
|
654
|
+
* `undefined` will use the provider's default size.
|
|
655
|
+
*/
|
|
656
|
+
size: `${number}x${number}` | undefined;
|
|
657
|
+
/**
|
|
658
|
+
* Aspect ratio of the images to generate.
|
|
659
|
+
* Must have the format `{width}:{height}`.
|
|
660
|
+
* `undefined` will use the provider's default aspect ratio.
|
|
661
|
+
*/
|
|
662
|
+
aspectRatio: `${number}:${number}` | undefined;
|
|
663
|
+
/**
|
|
664
|
+
* Seed for the image generation.
|
|
665
|
+
* `undefined` will use the provider's default seed.
|
|
666
|
+
*/
|
|
667
|
+
seed: number | undefined;
|
|
668
|
+
/**
|
|
669
|
+
* Array of images for image editing or variation generation.
|
|
670
|
+
* The images should be provided as base64 encoded strings or binary data.
|
|
671
|
+
*/
|
|
672
|
+
files: ImageModelV3File[] | undefined;
|
|
673
|
+
/**
|
|
674
|
+
* Mask image for inpainting operations.
|
|
675
|
+
* The mask should be provided as base64 encoded strings or binary data.
|
|
676
|
+
*/
|
|
677
|
+
mask: ImageModelV3File | undefined;
|
|
678
|
+
/**
|
|
679
|
+
* Additional provider-specific options that are passed through to the provider
|
|
680
|
+
* as body parameters.
|
|
681
|
+
*
|
|
682
|
+
* The outer record is keyed by the provider name, and the inner
|
|
683
|
+
* record is keyed by the provider-specific metadata key.
|
|
684
|
+
*
|
|
685
|
+
* ```ts
|
|
686
|
+
* {
|
|
687
|
+
* "openai": {
|
|
688
|
+
* "style": "vivid"
|
|
689
|
+
* }
|
|
690
|
+
* }
|
|
691
|
+
* ```
|
|
692
|
+
*/
|
|
693
|
+
providerOptions: SharedV3ProviderOptions;
|
|
694
|
+
/**
|
|
695
|
+
* Abort signal for cancelling the operation.
|
|
696
|
+
*/
|
|
697
|
+
abortSignal?: AbortSignal;
|
|
698
|
+
/**
|
|
699
|
+
* Additional HTTP headers to be sent with the request.
|
|
700
|
+
* Only applicable for HTTP-based providers.
|
|
701
|
+
*/
|
|
702
|
+
headers?: Record<string, string | undefined>;
|
|
703
|
+
};
|
|
704
|
+
|
|
705
|
+
type ImageModelV3ProviderMetadata = Record<string, {
|
|
706
|
+
images: JSONArray;
|
|
707
|
+
} & JSONValue>;
|
|
708
|
+
type GetMaxImagesPerCallFunction$1 = (options: {
|
|
709
|
+
modelId: string;
|
|
710
|
+
}) => PromiseLike<number | undefined> | number | undefined;
|
|
711
|
+
/**
|
|
712
|
+
Image generation model specification version 3.
|
|
713
|
+
*/
|
|
714
|
+
type ImageModelV3 = {
|
|
715
|
+
/**
|
|
716
|
+
The image model must specify which image model interface
|
|
717
|
+
version it implements. This will allow us to evolve the image
|
|
718
|
+
model interface and retain backwards compatibility. The different
|
|
719
|
+
implementation versions can be handled as a discriminated union
|
|
720
|
+
on our side.
|
|
721
|
+
*/
|
|
722
|
+
readonly specificationVersion: 'v3';
|
|
723
|
+
/**
|
|
724
|
+
Name of the provider for logging purposes.
|
|
725
|
+
*/
|
|
726
|
+
readonly provider: string;
|
|
727
|
+
/**
|
|
728
|
+
Provider-specific model ID for logging purposes.
|
|
729
|
+
*/
|
|
730
|
+
readonly modelId: string;
|
|
731
|
+
/**
|
|
732
|
+
Limit of how many images can be generated in a single API call.
|
|
733
|
+
Can be set to a number for a fixed limit, to undefined to use
|
|
734
|
+
the global limit, or a function that returns a number or undefined,
|
|
735
|
+
optionally as a promise.
|
|
736
|
+
*/
|
|
737
|
+
readonly maxImagesPerCall: number | undefined | GetMaxImagesPerCallFunction$1;
|
|
738
|
+
/**
|
|
739
|
+
Generates an array of images.
|
|
740
|
+
*/
|
|
741
|
+
doGenerate(options: ImageModelV3CallOptions): PromiseLike<{
|
|
742
|
+
/**
|
|
743
|
+
Generated images as base64 encoded strings or binary data.
|
|
744
|
+
The images should be returned without any unnecessary conversion.
|
|
745
|
+
If the API returns base64 encoded strings, the images should be returned
|
|
746
|
+
as base64 encoded strings. If the API returns binary data, the images should
|
|
747
|
+
be returned as binary data.
|
|
748
|
+
*/
|
|
749
|
+
images: Array<string> | Array<Uint8Array>;
|
|
750
|
+
/**
|
|
751
|
+
Warnings for the call, e.g. unsupported features.
|
|
752
|
+
*/
|
|
753
|
+
warnings: Array<SharedV3Warning>;
|
|
754
|
+
/**
|
|
755
|
+
Additional provider-specific metadata. They are passed through
|
|
756
|
+
from the provider to the AI SDK and enable provider-specific
|
|
757
|
+
results that can be fully encapsulated in the provider.
|
|
758
|
+
|
|
759
|
+
The outer record is keyed by the provider name, and the inner
|
|
760
|
+
record is provider-specific metadata. It always includes an
|
|
761
|
+
`images` key with image-specific metadata
|
|
762
|
+
|
|
763
|
+
```ts
|
|
764
|
+
{
|
|
765
|
+
"openai": {
|
|
766
|
+
"images": ["revisedPrompt": "Revised prompt here."]
|
|
767
|
+
}
|
|
768
|
+
}
|
|
769
|
+
```
|
|
770
|
+
*/
|
|
771
|
+
providerMetadata?: ImageModelV3ProviderMetadata;
|
|
772
|
+
/**
|
|
773
|
+
Response information for telemetry and debugging purposes.
|
|
774
|
+
*/
|
|
775
|
+
response: {
|
|
776
|
+
/**
|
|
777
|
+
Timestamp for the start of the generated response.
|
|
778
|
+
*/
|
|
779
|
+
timestamp: Date;
|
|
780
|
+
/**
|
|
781
|
+
The ID of the response model that was used to generate the response.
|
|
782
|
+
*/
|
|
783
|
+
modelId: string;
|
|
784
|
+
/**
|
|
785
|
+
Response headers.
|
|
786
|
+
*/
|
|
787
|
+
headers: Record<string, string> | undefined;
|
|
788
|
+
};
|
|
789
|
+
/**
|
|
790
|
+
Optional token usage for the image generation call (if the provider reports it).
|
|
791
|
+
*/
|
|
792
|
+
usage?: ImageModelV3Usage;
|
|
793
|
+
}>;
|
|
794
|
+
};
|
|
795
|
+
|
|
796
|
+
type ImageModelV2CallOptions = {
|
|
797
|
+
/**
|
|
798
|
+
Prompt for the image generation.
|
|
799
|
+
*/
|
|
800
|
+
prompt: string;
|
|
801
|
+
/**
|
|
802
|
+
Number of images to generate.
|
|
803
|
+
*/
|
|
804
|
+
n: number;
|
|
805
|
+
/**
|
|
806
|
+
Size of the images to generate.
|
|
807
|
+
Must have the format `{width}x{height}`.
|
|
808
|
+
`undefined` will use the provider's default size.
|
|
809
|
+
*/
|
|
810
|
+
size: `${number}x${number}` | undefined;
|
|
811
|
+
/**
|
|
812
|
+
Aspect ratio of the images to generate.
|
|
813
|
+
Must have the format `{width}:{height}`.
|
|
814
|
+
`undefined` will use the provider's default aspect ratio.
|
|
815
|
+
*/
|
|
816
|
+
aspectRatio: `${number}:${number}` | undefined;
|
|
817
|
+
/**
|
|
818
|
+
Seed for the image generation.
|
|
819
|
+
`undefined` will use the provider's default seed.
|
|
820
|
+
*/
|
|
821
|
+
seed: number | undefined;
|
|
822
|
+
/**
|
|
823
|
+
Additional provider-specific options that are passed through to the provider
|
|
824
|
+
as body parameters.
|
|
825
|
+
|
|
826
|
+
The outer record is keyed by the provider name, and the inner
|
|
827
|
+
record is keyed by the provider-specific metadata key.
|
|
828
|
+
```ts
|
|
829
|
+
{
|
|
830
|
+
"openai": {
|
|
831
|
+
"style": "vivid"
|
|
832
|
+
}
|
|
833
|
+
}
|
|
834
|
+
```
|
|
835
|
+
*/
|
|
836
|
+
providerOptions: SharedV2ProviderOptions;
|
|
837
|
+
/**
|
|
838
|
+
Abort signal for cancelling the operation.
|
|
839
|
+
*/
|
|
840
|
+
abortSignal?: AbortSignal;
|
|
841
|
+
/**
|
|
842
|
+
Additional HTTP headers to be sent with the request.
|
|
843
|
+
Only applicable for HTTP-based providers.
|
|
844
|
+
*/
|
|
845
|
+
headers?: Record<string, string | undefined>;
|
|
846
|
+
};
|
|
847
|
+
|
|
848
|
+
/**
|
|
849
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
850
|
+
some settings might not be supported, which can lead to suboptimal results.
|
|
851
|
+
*/
|
|
852
|
+
type ImageModelV2CallWarning = {
|
|
853
|
+
type: 'unsupported-setting';
|
|
854
|
+
setting: keyof ImageModelV2CallOptions;
|
|
855
|
+
details?: string;
|
|
856
|
+
} | {
|
|
857
|
+
type: 'other';
|
|
858
|
+
message: string;
|
|
859
|
+
};
|
|
860
|
+
|
|
861
|
+
type ImageModelV2ProviderMetadata = Record<string, {
|
|
862
|
+
images: JSONArray;
|
|
863
|
+
} & JSONValue>;
|
|
864
|
+
type GetMaxImagesPerCallFunction = (options: {
|
|
865
|
+
modelId: string;
|
|
866
|
+
}) => PromiseLike<number | undefined> | number | undefined;
|
|
867
|
+
/**
|
|
868
|
+
Image generation model specification version 2.
|
|
869
|
+
*/
|
|
870
|
+
type ImageModelV2 = {
|
|
871
|
+
/**
|
|
872
|
+
The image model must specify which image model interface
|
|
873
|
+
version it implements. This will allow us to evolve the image
|
|
874
|
+
model interface and retain backwards compatibility. The different
|
|
875
|
+
implementation versions can be handled as a discriminated union
|
|
876
|
+
on our side.
|
|
877
|
+
*/
|
|
878
|
+
readonly specificationVersion: 'v2';
|
|
879
|
+
/**
|
|
880
|
+
Name of the provider for logging purposes.
|
|
881
|
+
*/
|
|
882
|
+
readonly provider: string;
|
|
883
|
+
/**
|
|
884
|
+
Provider-specific model ID for logging purposes.
|
|
885
|
+
*/
|
|
886
|
+
readonly modelId: string;
|
|
887
|
+
/**
|
|
888
|
+
Limit of how many images can be generated in a single API call.
|
|
889
|
+
Can be set to a number for a fixed limit, to undefined to use
|
|
890
|
+
the global limit, or a function that returns a number or undefined,
|
|
891
|
+
optionally as a promise.
|
|
892
|
+
*/
|
|
893
|
+
readonly maxImagesPerCall: number | undefined | GetMaxImagesPerCallFunction;
|
|
894
|
+
/**
|
|
895
|
+
Generates an array of images.
|
|
896
|
+
*/
|
|
897
|
+
doGenerate(options: ImageModelV2CallOptions): PromiseLike<{
|
|
898
|
+
/**
|
|
899
|
+
Generated images as base64 encoded strings or binary data.
|
|
900
|
+
The images should be returned without any unnecessary conversion.
|
|
901
|
+
If the API returns base64 encoded strings, the images should be returned
|
|
902
|
+
as base64 encoded strings. If the API returns binary data, the images should
|
|
903
|
+
be returned as binary data.
|
|
904
|
+
*/
|
|
905
|
+
images: Array<string> | Array<Uint8Array>;
|
|
906
|
+
/**
|
|
907
|
+
Warnings for the call, e.g. unsupported settings.
|
|
908
|
+
*/
|
|
909
|
+
warnings: Array<ImageModelV2CallWarning>;
|
|
910
|
+
/**
|
|
911
|
+
Additional provider-specific metadata. They are passed through
|
|
912
|
+
from the provider to the AI SDK and enable provider-specific
|
|
913
|
+
results that can be fully encapsulated in the provider.
|
|
914
|
+
|
|
915
|
+
The outer record is keyed by the provider name, and the inner
|
|
916
|
+
record is provider-specific metadata. It always includes an
|
|
917
|
+
`images` key with image-specific metadata
|
|
918
|
+
|
|
919
|
+
```ts
|
|
920
|
+
{
|
|
921
|
+
"openai": {
|
|
922
|
+
"images": ["revisedPrompt": "Revised prompt here."]
|
|
923
|
+
}
|
|
924
|
+
}
|
|
925
|
+
```
|
|
926
|
+
*/
|
|
927
|
+
providerMetadata?: ImageModelV2ProviderMetadata;
|
|
928
|
+
/**
|
|
929
|
+
Response information for telemetry and debugging purposes.
|
|
930
|
+
*/
|
|
931
|
+
response: {
|
|
932
|
+
/**
|
|
933
|
+
Timestamp for the start of the generated response.
|
|
934
|
+
*/
|
|
935
|
+
timestamp: Date;
|
|
936
|
+
/**
|
|
937
|
+
The ID of the response model that was used to generate the response.
|
|
938
|
+
*/
|
|
939
|
+
modelId: string;
|
|
940
|
+
/**
|
|
941
|
+
Response headers.
|
|
942
|
+
*/
|
|
943
|
+
headers: Record<string, string> | undefined;
|
|
944
|
+
};
|
|
945
|
+
}>;
|
|
946
|
+
};
|
|
947
|
+
|
|
948
|
+
/**
|
|
949
|
+
* Middleware for ImageModelV3.
|
|
950
|
+
* This type defines the structure for middleware that can be used to modify
|
|
951
|
+
* the behavior of ImageModelV3 operations.
|
|
952
|
+
*/
|
|
953
|
+
type ImageModelV3Middleware = {
|
|
954
|
+
/**
|
|
955
|
+
* Middleware specification version. Use `v3` for the current version.
|
|
956
|
+
*/
|
|
957
|
+
readonly specificationVersion: 'v3';
|
|
958
|
+
/**
|
|
959
|
+
* Override the provider name if desired.
|
|
960
|
+
* @param options.model - The image model instance.
|
|
961
|
+
*/
|
|
962
|
+
overrideProvider?: (options: {
|
|
963
|
+
model: ImageModelV3;
|
|
964
|
+
}) => string;
|
|
965
|
+
/**
|
|
966
|
+
* Override the model ID if desired.
|
|
967
|
+
* @param options.model - The image model instance.
|
|
968
|
+
*/
|
|
969
|
+
overrideModelId?: (options: {
|
|
970
|
+
model: ImageModelV3;
|
|
971
|
+
}) => string;
|
|
972
|
+
/**
|
|
973
|
+
* Override the limit of how many images can be generated in a single API call if desired.
|
|
974
|
+
* @param options.model - The image model instance.
|
|
975
|
+
*/
|
|
976
|
+
overrideMaxImagesPerCall?: (options: {
|
|
977
|
+
model: ImageModelV3;
|
|
978
|
+
}) => ImageModelV3['maxImagesPerCall'];
|
|
979
|
+
/**
|
|
980
|
+
* Transforms the parameters before they are passed to the image model.
|
|
981
|
+
* @param options - Object containing the parameters.
|
|
982
|
+
* @param options.params - The original parameters for the image model call.
|
|
983
|
+
* @returns A promise that resolves to the transformed parameters.
|
|
984
|
+
*/
|
|
985
|
+
transformParams?: (options: {
|
|
986
|
+
params: ImageModelV3CallOptions;
|
|
987
|
+
model: ImageModelV3;
|
|
988
|
+
}) => PromiseLike<ImageModelV3CallOptions>;
|
|
989
|
+
/**
|
|
990
|
+
* Wraps the generate operation of the image model.
|
|
991
|
+
*
|
|
992
|
+
* @param options - Object containing the generate function, parameters, and model.
|
|
993
|
+
* @param options.doGenerate - The original generate function.
|
|
994
|
+
* @param options.params - The parameters for the generate call. If the
|
|
995
|
+
* `transformParams` middleware is used, this will be the transformed parameters.
|
|
996
|
+
* @param options.model - The image model instance.
|
|
997
|
+
* @returns A promise that resolves to the result of the generate operation.
|
|
998
|
+
*/
|
|
999
|
+
wrapGenerate?: (options: {
|
|
1000
|
+
doGenerate: () => ReturnType<ImageModelV3['doGenerate']>;
|
|
1001
|
+
params: ImageModelV3CallOptions;
|
|
1002
|
+
model: ImageModelV3;
|
|
1003
|
+
}) => Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
|
|
1004
|
+
};
|
|
1005
|
+
|
|
1006
|
+
/**
|
|
1007
|
+
A tool has a name, a description, and a set of parameters.
|
|
1008
|
+
|
|
1009
|
+
Note: this is **not** the user-facing tool definition. The AI SDK methods will
|
|
1010
|
+
map the user-facing tool definitions to this format.
|
|
1011
|
+
*/
|
|
1012
|
+
type LanguageModelV3FunctionTool = {
|
|
1013
|
+
/**
|
|
1014
|
+
The type of the tool (always 'function').
|
|
1015
|
+
*/
|
|
1016
|
+
type: 'function';
|
|
1017
|
+
/**
|
|
1018
|
+
The name of the tool. Unique within this model call.
|
|
1019
|
+
*/
|
|
1020
|
+
name: string;
|
|
1021
|
+
/**
|
|
1022
|
+
A description of the tool. The language model uses this to understand the
|
|
1023
|
+
tool's purpose and to provide better completion suggestions.
|
|
1024
|
+
*/
|
|
1025
|
+
description?: string;
|
|
1026
|
+
/**
|
|
1027
|
+
The parameters that the tool expects. The language model uses this to
|
|
1028
|
+
understand the tool's input requirements and to provide matching suggestions.
|
|
1029
|
+
*/
|
|
1030
|
+
inputSchema: JSONSchema7;
|
|
1031
|
+
/**
|
|
1032
|
+
* An optional list of input examples that show the language
|
|
1033
|
+
* model what the input should look like.
|
|
1034
|
+
*/
|
|
1035
|
+
inputExamples?: Array<{
|
|
1036
|
+
input: JSONObject;
|
|
1037
|
+
}>;
|
|
1038
|
+
/**
|
|
1039
|
+
* Strict mode setting for the tool.
|
|
1040
|
+
*
|
|
1041
|
+
* Providers that support strict mode will use this setting to determine
|
|
1042
|
+
* how the input should be generated. Strict mode will always produce
|
|
1043
|
+
* valid inputs, but it might limit what input schemas are supported.
|
|
1044
|
+
*/
|
|
1045
|
+
strict?: boolean;
|
|
1046
|
+
/**
|
|
1047
|
+
The provider-specific options for the tool.
|
|
1048
|
+
*/
|
|
1049
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1050
|
+
};
|
|
1051
|
+
|
|
1052
|
+
/**
|
|
1053
|
+
Data content. Can be a Uint8Array, base64 encoded data as a string or a URL.
|
|
1054
|
+
*/
|
|
1055
|
+
type LanguageModelV3DataContent = Uint8Array | string | URL;
|
|
1056
|
+
|
|
1057
|
+
/**
|
|
1058
|
+
A prompt is a list of messages.
|
|
1059
|
+
|
|
1060
|
+
Note: Not all models and prompt formats support multi-modal inputs and
|
|
1061
|
+
tool calls. The validation happens at runtime.
|
|
1062
|
+
|
|
1063
|
+
Note: This is not a user-facing prompt. The AI SDK methods will map the
|
|
1064
|
+
user-facing prompt types such as chat or instruction prompts to this format.
|
|
1065
|
+
*/
|
|
1066
|
+
type LanguageModelV3Prompt = Array<LanguageModelV3Message>;
|
|
1067
|
+
type LanguageModelV3Message = ({
|
|
1068
|
+
role: 'system';
|
|
1069
|
+
content: string;
|
|
1070
|
+
} | {
|
|
1071
|
+
role: 'user';
|
|
1072
|
+
content: Array<LanguageModelV3TextPart | LanguageModelV3FilePart>;
|
|
1073
|
+
} | {
|
|
1074
|
+
role: 'assistant';
|
|
1075
|
+
content: Array<LanguageModelV3TextPart | LanguageModelV3FilePart | LanguageModelV3ReasoningPart | LanguageModelV3ToolCallPart | LanguageModelV3ToolResultPart>;
|
|
1076
|
+
} | {
|
|
1077
|
+
role: 'tool';
|
|
1078
|
+
content: Array<LanguageModelV3ToolResultPart | LanguageModelV3ToolApprovalResponsePart>;
|
|
1079
|
+
}) & {
|
|
1080
|
+
/**
|
|
1081
|
+
* Additional provider-specific options. They are passed through
|
|
1082
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1083
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1084
|
+
*/
|
|
1085
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1086
|
+
};
|
|
1087
|
+
/**
|
|
1088
|
+
Text content part of a prompt. It contains a string of text.
|
|
1089
|
+
*/
|
|
1090
|
+
interface LanguageModelV3TextPart {
|
|
1091
|
+
type: 'text';
|
|
1092
|
+
/**
|
|
1093
|
+
The text content.
|
|
1094
|
+
*/
|
|
1095
|
+
text: string;
|
|
1096
|
+
/**
|
|
1097
|
+
* Additional provider-specific options. They are passed through
|
|
1098
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1099
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1100
|
+
*/
|
|
1101
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1102
|
+
}
|
|
1103
|
+
/**
|
|
1104
|
+
Reasoning content part of a prompt. It contains a string of reasoning text.
|
|
1105
|
+
*/
|
|
1106
|
+
interface LanguageModelV3ReasoningPart {
|
|
1107
|
+
type: 'reasoning';
|
|
1108
|
+
/**
|
|
1109
|
+
The reasoning text.
|
|
1110
|
+
*/
|
|
1111
|
+
text: string;
|
|
1112
|
+
/**
|
|
1113
|
+
* Additional provider-specific options. They are passed through
|
|
1114
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1115
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1116
|
+
*/
|
|
1117
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1118
|
+
}
|
|
1119
|
+
/**
|
|
1120
|
+
File content part of a prompt. It contains a file.
|
|
1121
|
+
*/
|
|
1122
|
+
interface LanguageModelV3FilePart {
|
|
1123
|
+
type: 'file';
|
|
1124
|
+
/**
|
|
1125
|
+
* Optional filename of the file.
|
|
1126
|
+
*/
|
|
1127
|
+
filename?: string;
|
|
1128
|
+
/**
|
|
1129
|
+
File data. Can be a Uint8Array, base64 encoded data as a string or a URL.
|
|
1130
|
+
*/
|
|
1131
|
+
data: LanguageModelV3DataContent;
|
|
1132
|
+
/**
|
|
1133
|
+
IANA media type of the file.
|
|
1134
|
+
|
|
1135
|
+
Can support wildcards, e.g. `image/*` (in which case the provider needs to take appropriate action).
|
|
1136
|
+
|
|
1137
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
1138
|
+
*/
|
|
1139
|
+
mediaType: string;
|
|
1140
|
+
/**
|
|
1141
|
+
* Additional provider-specific options. They are passed through
|
|
1142
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1143
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1144
|
+
*/
|
|
1145
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1146
|
+
}
|
|
1147
|
+
/**
|
|
1148
|
+
Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
|
|
1149
|
+
*/
|
|
1150
|
+
interface LanguageModelV3ToolCallPart {
|
|
1151
|
+
type: 'tool-call';
|
|
1152
|
+
/**
|
|
1153
|
+
ID of the tool call. This ID is used to match the tool call with the tool result.
|
|
1154
|
+
*/
|
|
1155
|
+
toolCallId: string;
|
|
1156
|
+
/**
|
|
1157
|
+
Name of the tool that is being called.
|
|
1158
|
+
*/
|
|
1159
|
+
toolName: string;
|
|
1160
|
+
/**
|
|
1161
|
+
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
|
|
1162
|
+
*/
|
|
1163
|
+
input: unknown;
|
|
1164
|
+
/**
|
|
1165
|
+
* Whether the tool call will be executed by the provider.
|
|
1166
|
+
* If this flag is not set or is false, the tool call will be executed by the client.
|
|
1167
|
+
*/
|
|
1168
|
+
providerExecuted?: boolean;
|
|
1169
|
+
/**
|
|
1170
|
+
* Additional provider-specific options. They are passed through
|
|
1171
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1172
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1173
|
+
*/
|
|
1174
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1175
|
+
}
|
|
1176
|
+
/**
|
|
1177
|
+
Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
|
|
1178
|
+
*/
|
|
1179
|
+
interface LanguageModelV3ToolResultPart {
|
|
1180
|
+
type: 'tool-result';
|
|
1181
|
+
/**
|
|
1182
|
+
ID of the tool call that this result is associated with.
|
|
1183
|
+
*/
|
|
1184
|
+
toolCallId: string;
|
|
1185
|
+
/**
|
|
1186
|
+
Name of the tool that generated this result.
|
|
1187
|
+
*/
|
|
1188
|
+
toolName: string;
|
|
1189
|
+
/**
|
|
1190
|
+
Result of the tool call.
|
|
1191
|
+
*/
|
|
1192
|
+
output: LanguageModelV3ToolResultOutput;
|
|
1193
|
+
/**
|
|
1194
|
+
* Additional provider-specific options. They are passed through
|
|
1195
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1196
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1197
|
+
*/
|
|
1198
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1199
|
+
}
|
|
1200
|
+
/**
|
|
1201
|
+
* Tool approval response content part of a prompt. It contains the user's
|
|
1202
|
+
* decision to approve or deny a provider-executed tool call.
|
|
1203
|
+
*/
|
|
1204
|
+
interface LanguageModelV3ToolApprovalResponsePart {
|
|
1205
|
+
type: 'tool-approval-response';
|
|
1206
|
+
/**
|
|
1207
|
+
* ID of the approval request that this response refers to.
|
|
1208
|
+
*/
|
|
1209
|
+
approvalId: string;
|
|
1210
|
+
/**
|
|
1211
|
+
* Whether the approval was granted (true) or denied (false).
|
|
1212
|
+
*/
|
|
1213
|
+
approved: boolean;
|
|
1214
|
+
/**
|
|
1215
|
+
* Optional reason for approval or denial.
|
|
1216
|
+
*/
|
|
1217
|
+
reason?: string;
|
|
1218
|
+
/**
|
|
1219
|
+
* Additional provider-specific options. They are passed through
|
|
1220
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1221
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1222
|
+
*/
|
|
1223
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1224
|
+
}
|
|
1225
|
+
/**
|
|
1226
|
+
* Result of a tool call.
|
|
1227
|
+
*/
|
|
1228
|
+
type LanguageModelV3ToolResultOutput = {
|
|
1229
|
+
/**
|
|
1230
|
+
* Text tool output that should be directly sent to the API.
|
|
1231
|
+
*/
|
|
1232
|
+
type: 'text';
|
|
1233
|
+
value: string;
|
|
1234
|
+
/**
|
|
1235
|
+
* Provider-specific options.
|
|
1236
|
+
*/
|
|
1237
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1238
|
+
} | {
|
|
1239
|
+
type: 'json';
|
|
1240
|
+
value: JSONValue;
|
|
1241
|
+
/**
|
|
1242
|
+
* Provider-specific options.
|
|
1243
|
+
*/
|
|
1244
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1245
|
+
} | {
|
|
1246
|
+
/**
|
|
1247
|
+
* Type when the user has denied the execution of the tool call.
|
|
1248
|
+
*/
|
|
1249
|
+
type: 'execution-denied';
|
|
1250
|
+
/**
|
|
1251
|
+
* Optional reason for the execution denial.
|
|
1252
|
+
*/
|
|
1253
|
+
reason?: string;
|
|
1254
|
+
/**
|
|
1255
|
+
* Provider-specific options.
|
|
1256
|
+
*/
|
|
1257
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1258
|
+
} | {
|
|
1259
|
+
type: 'error-text';
|
|
1260
|
+
value: string;
|
|
1261
|
+
/**
|
|
1262
|
+
* Provider-specific options.
|
|
1263
|
+
*/
|
|
1264
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1265
|
+
} | {
|
|
1266
|
+
type: 'error-json';
|
|
1267
|
+
value: JSONValue;
|
|
1268
|
+
/**
|
|
1269
|
+
* Provider-specific options.
|
|
1270
|
+
*/
|
|
1271
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1272
|
+
} | {
|
|
1273
|
+
type: 'content';
|
|
1274
|
+
value: Array<{
|
|
1275
|
+
type: 'text';
|
|
1276
|
+
/**
|
|
1277
|
+
Text content.
|
|
1278
|
+
*/
|
|
1279
|
+
text: string;
|
|
1280
|
+
/**
|
|
1281
|
+
* Provider-specific options.
|
|
1282
|
+
*/
|
|
1283
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1284
|
+
} | {
|
|
1285
|
+
type: 'file-data';
|
|
1286
|
+
/**
|
|
1287
|
+
Base-64 encoded media data.
|
|
1288
|
+
*/
|
|
1289
|
+
data: string;
|
|
1290
|
+
/**
|
|
1291
|
+
IANA media type.
|
|
1292
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
1293
|
+
*/
|
|
1294
|
+
mediaType: string;
|
|
1295
|
+
/**
|
|
1296
|
+
* Optional filename of the file.
|
|
1297
|
+
*/
|
|
1298
|
+
filename?: string;
|
|
1299
|
+
/**
|
|
1300
|
+
* Provider-specific options.
|
|
1301
|
+
*/
|
|
1302
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1303
|
+
} | {
|
|
1304
|
+
type: 'file-url';
|
|
1305
|
+
/**
|
|
1306
|
+
* URL of the file.
|
|
1307
|
+
*/
|
|
1308
|
+
url: string;
|
|
1309
|
+
/**
|
|
1310
|
+
* Provider-specific options.
|
|
1311
|
+
*/
|
|
1312
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1313
|
+
} | {
|
|
1314
|
+
type: 'file-id';
|
|
1315
|
+
/**
|
|
1316
|
+
* ID of the file.
|
|
1317
|
+
*
|
|
1318
|
+
* If you use multiple providers, you need to
|
|
1319
|
+
* specify the provider specific ids using
|
|
1320
|
+
* the Record option. The key is the provider
|
|
1321
|
+
* name, e.g. 'openai' or 'anthropic'.
|
|
1322
|
+
*/
|
|
1323
|
+
fileId: string | Record<string, string>;
|
|
1324
|
+
/**
|
|
1325
|
+
* Provider-specific options.
|
|
1326
|
+
*/
|
|
1327
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1328
|
+
} | {
|
|
1329
|
+
/**
|
|
1330
|
+
* Images that are referenced using base64 encoded data.
|
|
1331
|
+
*/
|
|
1332
|
+
type: 'image-data';
|
|
1333
|
+
/**
|
|
1334
|
+
Base-64 encoded image data.
|
|
1335
|
+
*/
|
|
1336
|
+
data: string;
|
|
1337
|
+
/**
|
|
1338
|
+
IANA media type.
|
|
1339
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
1340
|
+
*/
|
|
1341
|
+
mediaType: string;
|
|
1342
|
+
/**
|
|
1343
|
+
* Provider-specific options.
|
|
1344
|
+
*/
|
|
1345
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1346
|
+
} | {
|
|
1347
|
+
/**
|
|
1348
|
+
* Images that are referenced using a URL.
|
|
1349
|
+
*/
|
|
1350
|
+
type: 'image-url';
|
|
1351
|
+
/**
|
|
1352
|
+
* URL of the image.
|
|
1353
|
+
*/
|
|
1354
|
+
url: string;
|
|
1355
|
+
/**
|
|
1356
|
+
* Provider-specific options.
|
|
1357
|
+
*/
|
|
1358
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1359
|
+
} | {
|
|
1360
|
+
/**
|
|
1361
|
+
* Images that are referenced using a provider file id.
|
|
1362
|
+
*/
|
|
1363
|
+
type: 'image-file-id';
|
|
1364
|
+
/**
|
|
1365
|
+
* Image that is referenced using a provider file id.
|
|
1366
|
+
*
|
|
1367
|
+
* If you use multiple providers, you need to
|
|
1368
|
+
* specify the provider specific ids using
|
|
1369
|
+
* the Record option. The key is the provider
|
|
1370
|
+
* name, e.g. 'openai' or 'anthropic'.
|
|
1371
|
+
*/
|
|
1372
|
+
fileId: string | Record<string, string>;
|
|
1373
|
+
/**
|
|
1374
|
+
* Provider-specific options.
|
|
1375
|
+
*/
|
|
1376
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1377
|
+
} | {
|
|
1378
|
+
/**
|
|
1379
|
+
* Custom content part. This can be used to implement
|
|
1380
|
+
* provider-specific content parts.
|
|
1381
|
+
*/
|
|
1382
|
+
type: 'custom';
|
|
1383
|
+
/**
|
|
1384
|
+
* Provider-specific options.
|
|
1385
|
+
*/
|
|
1386
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1387
|
+
}>;
|
|
1388
|
+
};
|
|
1389
|
+
|
|
1390
|
+
/**
|
|
1391
|
+
* The configuration of a provider tool.
|
|
1392
|
+
*
|
|
1393
|
+
* Provider tools are tools that are specific to a certain provider.
|
|
1394
|
+
* The input and output schemas are defined be the provider, and
|
|
1395
|
+
* some of the tools are also executed on the provider systems.
|
|
1396
|
+
*/
|
|
1397
|
+
type LanguageModelV3ProviderTool = {
|
|
1398
|
+
/**
|
|
1399
|
+
* The type of the tool (always 'provider').
|
|
1400
|
+
*/
|
|
1401
|
+
type: 'provider';
|
|
1402
|
+
/**
|
|
1403
|
+
* The ID of the tool. Should follow the format `<provider-id>.<unique-tool-name>`.
|
|
1404
|
+
*/
|
|
1405
|
+
id: `${string}.${string}`;
|
|
1406
|
+
/**
|
|
1407
|
+
* The name of the tool. Unique within this model call.
|
|
1408
|
+
*/
|
|
1409
|
+
name: string;
|
|
1410
|
+
/**
|
|
1411
|
+
* The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
|
|
1412
|
+
*/
|
|
1413
|
+
args: Record<string, unknown>;
|
|
1414
|
+
};
|
|
1415
|
+
|
|
1416
|
+
type LanguageModelV3ToolChoice = {
|
|
1417
|
+
type: 'auto';
|
|
1418
|
+
} | {
|
|
1419
|
+
type: 'none';
|
|
1420
|
+
} | {
|
|
1421
|
+
type: 'required';
|
|
1422
|
+
} | {
|
|
1423
|
+
type: 'tool';
|
|
1424
|
+
toolName: string;
|
|
1425
|
+
};
|
|
1426
|
+
|
|
1427
|
+
type LanguageModelV3CallOptions = {
|
|
1428
|
+
/**
|
|
1429
|
+
A language mode prompt is a standardized prompt type.
|
|
1430
|
+
|
|
1431
|
+
Note: This is **not** the user-facing prompt. The AI SDK methods will map the
|
|
1432
|
+
user-facing prompt types such as chat or instruction prompts to this format.
|
|
1433
|
+
That approach allows us to evolve the user facing prompts without breaking
|
|
1434
|
+
the language model interface.
|
|
1435
|
+
*/
|
|
1436
|
+
prompt: LanguageModelV3Prompt;
|
|
1437
|
+
/**
|
|
1438
|
+
Maximum number of tokens to generate.
|
|
1439
|
+
*/
|
|
1440
|
+
maxOutputTokens?: number;
|
|
1441
|
+
/**
|
|
1442
|
+
Temperature setting. The range depends on the provider and model.
|
|
1443
|
+
*/
|
|
1444
|
+
temperature?: number;
|
|
1445
|
+
/**
|
|
1446
|
+
Stop sequences.
|
|
1447
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
|
1448
|
+
Providers may have limits on the number of stop sequences.
|
|
1449
|
+
*/
|
|
1450
|
+
stopSequences?: string[];
|
|
1451
|
+
/**
|
|
1452
|
+
Nucleus sampling.
|
|
1453
|
+
*/
|
|
1454
|
+
topP?: number;
|
|
1455
|
+
/**
|
|
1456
|
+
Only sample from the top K options for each subsequent token.
|
|
1457
|
+
|
|
1458
|
+
Used to remove "long tail" low probability responses.
|
|
1459
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
|
1460
|
+
*/
|
|
1461
|
+
topK?: number;
|
|
1462
|
+
/**
|
|
1463
|
+
Presence penalty setting. It affects the likelihood of the model to
|
|
1464
|
+
repeat information that is already in the prompt.
|
|
1465
|
+
*/
|
|
1466
|
+
presencePenalty?: number;
|
|
1467
|
+
/**
|
|
1468
|
+
Frequency penalty setting. It affects the likelihood of the model
|
|
1469
|
+
to repeatedly use the same words or phrases.
|
|
1470
|
+
*/
|
|
1471
|
+
frequencyPenalty?: number;
|
|
1472
|
+
/**
|
|
1473
|
+
Response format. The output can either be text or JSON. Default is text.
|
|
1474
|
+
|
|
1475
|
+
If JSON is selected, a schema can optionally be provided to guide the LLM.
|
|
1476
|
+
*/
|
|
1477
|
+
responseFormat?: {
|
|
1478
|
+
type: 'text';
|
|
1479
|
+
} | {
|
|
1480
|
+
type: 'json';
|
|
1481
|
+
/**
|
|
1482
|
+
* JSON schema that the generated output should conform to.
|
|
1483
|
+
*/
|
|
1484
|
+
schema?: JSONSchema7;
|
|
1485
|
+
/**
|
|
1486
|
+
* Name of output that should be generated. Used by some providers for additional LLM guidance.
|
|
1487
|
+
*/
|
|
1488
|
+
name?: string;
|
|
1489
|
+
/**
|
|
1490
|
+
* Description of the output that should be generated. Used by some providers for additional LLM guidance.
|
|
1491
|
+
*/
|
|
1492
|
+
description?: string;
|
|
1493
|
+
};
|
|
1494
|
+
/**
|
|
1495
|
+
The seed (integer) to use for random sampling. If set and supported
|
|
1496
|
+
by the model, calls will generate deterministic results.
|
|
1497
|
+
*/
|
|
1498
|
+
seed?: number;
|
|
1499
|
+
/**
|
|
1500
|
+
The tools that are available for the model.
|
|
1501
|
+
*/
|
|
1502
|
+
tools?: Array<LanguageModelV3FunctionTool | LanguageModelV3ProviderTool>;
|
|
1503
|
+
/**
|
|
1504
|
+
Specifies how the tool should be selected. Defaults to 'auto'.
|
|
1505
|
+
*/
|
|
1506
|
+
toolChoice?: LanguageModelV3ToolChoice;
|
|
1507
|
+
/**
|
|
1508
|
+
Include raw chunks in the stream. Only applicable for streaming calls.
|
|
1509
|
+
*/
|
|
1510
|
+
includeRawChunks?: boolean;
|
|
1511
|
+
/**
|
|
1512
|
+
Abort signal for cancelling the operation.
|
|
1513
|
+
*/
|
|
1514
|
+
abortSignal?: AbortSignal;
|
|
1515
|
+
/**
|
|
1516
|
+
Additional HTTP headers to be sent with the request.
|
|
1517
|
+
Only applicable for HTTP-based providers.
|
|
1518
|
+
*/
|
|
1519
|
+
headers?: Record<string, string | undefined>;
|
|
1520
|
+
/**
|
|
1521
|
+
* Additional provider-specific options. They are passed through
|
|
1522
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1523
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1524
|
+
*/
|
|
1525
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
1526
|
+
};
|
|
1527
|
+
|
|
1528
|
+
/**
|
|
1529
|
+
A file that has been generated by the model.
|
|
1530
|
+
Generated files as base64 encoded strings or binary data.
|
|
1531
|
+
The files should be returned without any unnecessary conversion.
|
|
1532
|
+
*/
|
|
1533
|
+
type LanguageModelV3File = {
|
|
1534
|
+
type: 'file';
|
|
1535
|
+
/**
|
|
1536
|
+
The IANA media type of the file, e.g. `image/png` or `audio/mp3`.
|
|
1537
|
+
|
|
1538
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
1539
|
+
*/
|
|
1540
|
+
mediaType: string;
|
|
1541
|
+
/**
|
|
1542
|
+
Generated file data as base64 encoded strings or binary data.
|
|
1543
|
+
|
|
1544
|
+
The file data should be returned without any unnecessary conversion.
|
|
1545
|
+
If the API returns base64 encoded strings, the file data should be returned
|
|
1546
|
+
as base64 encoded strings. If the API returns binary data, the file data should
|
|
1547
|
+
be returned as binary data.
|
|
1548
|
+
*/
|
|
1549
|
+
data: string | Uint8Array;
|
|
1550
|
+
/**
|
|
1551
|
+
* Optional provider-specific metadata for the file part.
|
|
1552
|
+
*/
|
|
1553
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1554
|
+
};
|
|
1555
|
+
|
|
1556
|
+
/**
|
|
1557
|
+
Reasoning that the model has generated.
|
|
1558
|
+
*/
|
|
1559
|
+
type LanguageModelV3Reasoning = {
|
|
1560
|
+
type: 'reasoning';
|
|
1561
|
+
text: string;
|
|
1562
|
+
/**
|
|
1563
|
+
* Optional provider-specific metadata for the reasoning part.
|
|
1564
|
+
*/
|
|
1565
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1566
|
+
};
|
|
1567
|
+
|
|
1568
|
+
/**
|
|
1569
|
+
A source that has been used as input to generate the response.
|
|
1570
|
+
*/
|
|
1571
|
+
type LanguageModelV3Source = {
|
|
1572
|
+
type: 'source';
|
|
1573
|
+
/**
|
|
1574
|
+
* The type of source - URL sources reference web content.
|
|
1575
|
+
*/
|
|
1576
|
+
sourceType: 'url';
|
|
1577
|
+
/**
|
|
1578
|
+
* The ID of the source.
|
|
1579
|
+
*/
|
|
1580
|
+
id: string;
|
|
1581
|
+
/**
|
|
1582
|
+
* The URL of the source.
|
|
1583
|
+
*/
|
|
1584
|
+
url: string;
|
|
1585
|
+
/**
|
|
1586
|
+
* The title of the source.
|
|
1587
|
+
*/
|
|
1588
|
+
title?: string;
|
|
1589
|
+
/**
|
|
1590
|
+
* Additional provider metadata for the source.
|
|
1591
|
+
*/
|
|
1592
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1593
|
+
} | {
|
|
1594
|
+
type: 'source';
|
|
1595
|
+
/**
|
|
1596
|
+
* The type of source - document sources reference files/documents.
|
|
1597
|
+
*/
|
|
1598
|
+
sourceType: 'document';
|
|
1599
|
+
/**
|
|
1600
|
+
* The ID of the source.
|
|
1601
|
+
*/
|
|
1602
|
+
id: string;
|
|
1603
|
+
/**
|
|
1604
|
+
* IANA media type of the document (e.g., 'application/pdf').
|
|
1605
|
+
*/
|
|
1606
|
+
mediaType: string;
|
|
1607
|
+
/**
|
|
1608
|
+
* The title of the document.
|
|
1609
|
+
*/
|
|
1610
|
+
title: string;
|
|
1611
|
+
/**
|
|
1612
|
+
* Optional filename of the document.
|
|
1613
|
+
*/
|
|
1614
|
+
filename?: string;
|
|
1615
|
+
/**
|
|
1616
|
+
* Additional provider metadata for the source.
|
|
1617
|
+
*/
|
|
1618
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1619
|
+
};
|
|
1620
|
+
|
|
1621
|
+
/**
|
|
1622
|
+
Text that the model has generated.
|
|
1623
|
+
*/
|
|
1624
|
+
type LanguageModelV3Text = {
|
|
1625
|
+
type: 'text';
|
|
1626
|
+
/**
|
|
1627
|
+
The text content.
|
|
1628
|
+
*/
|
|
1629
|
+
text: string;
|
|
1630
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1631
|
+
};
|
|
1632
|
+
|
|
1633
|
+
/**
|
|
1634
|
+
* Tool approval request emitted by a provider for a provider-executed tool call.
|
|
1635
|
+
*
|
|
1636
|
+
* This is used for flows where the provider executes the tool (e.g. MCP tools)
|
|
1637
|
+
* but requires an explicit user approval before continuing.
|
|
1638
|
+
*/
|
|
1639
|
+
type LanguageModelV3ToolApprovalRequest = {
|
|
1640
|
+
type: 'tool-approval-request';
|
|
1641
|
+
/**
|
|
1642
|
+
* ID of the approval request. This ID is referenced by the subsequent
|
|
1643
|
+
* tool-approval-response (tool message) to approve or deny execution.
|
|
1644
|
+
*/
|
|
1645
|
+
approvalId: string;
|
|
1646
|
+
/**
|
|
1647
|
+
* The tool call ID that this approval request is for.
|
|
1648
|
+
*/
|
|
1649
|
+
toolCallId: string;
|
|
1650
|
+
/**
|
|
1651
|
+
* Additional provider-specific metadata for the approval request.
|
|
1652
|
+
*/
|
|
1653
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1654
|
+
};
|
|
1655
|
+
|
|
1656
|
+
/**
|
|
1657
|
+
* Tool calls that the model has generated.
|
|
1658
|
+
*/
|
|
1659
|
+
type LanguageModelV3ToolCall = {
|
|
1660
|
+
type: 'tool-call';
|
|
1661
|
+
/**
|
|
1662
|
+
* The identifier of the tool call. It must be unique across all tool calls.
|
|
1663
|
+
*/
|
|
1664
|
+
toolCallId: string;
|
|
1665
|
+
/**
|
|
1666
|
+
* The name of the tool that should be called.
|
|
1667
|
+
*/
|
|
1668
|
+
toolName: string;
|
|
1669
|
+
/**
|
|
1670
|
+
* Stringified JSON object with the tool call arguments. Must match the
|
|
1671
|
+
* parameters schema of the tool.
|
|
1672
|
+
*/
|
|
1673
|
+
input: string;
|
|
1674
|
+
/**
|
|
1675
|
+
* Whether the tool call will be executed by the provider.
|
|
1676
|
+
* If this flag is not set or is false, the tool call will be executed by the client.
|
|
1677
|
+
*/
|
|
1678
|
+
providerExecuted?: boolean;
|
|
1679
|
+
/**
|
|
1680
|
+
* Whether the tool is dynamic, i.e. defined at runtime.
|
|
1681
|
+
* For example, MCP (Model Context Protocol) tools that are executed by the provider.
|
|
1682
|
+
*/
|
|
1683
|
+
dynamic?: boolean;
|
|
1684
|
+
/**
|
|
1685
|
+
* Additional provider-specific metadata for the tool call.
|
|
1686
|
+
*/
|
|
1687
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1688
|
+
};
|
|
1689
|
+
|
|
1690
|
+
/**
|
|
1691
|
+
Result of a tool call that has been executed by the provider.
|
|
1692
|
+
*/
|
|
1693
|
+
type LanguageModelV3ToolResult = {
|
|
1694
|
+
type: 'tool-result';
|
|
1695
|
+
/**
|
|
1696
|
+
* The ID of the tool call that this result is associated with.
|
|
1697
|
+
*/
|
|
1698
|
+
toolCallId: string;
|
|
1699
|
+
/**
|
|
1700
|
+
* Name of the tool that generated this result.
|
|
1701
|
+
*/
|
|
1702
|
+
toolName: string;
|
|
1703
|
+
/**
|
|
1704
|
+
* Result of the tool call. This is a JSON-serializable object.
|
|
1705
|
+
*/
|
|
1706
|
+
result: NonNullable<JSONValue>;
|
|
1707
|
+
/**
|
|
1708
|
+
* Optional flag if the result is an error or an error message.
|
|
1709
|
+
*/
|
|
1710
|
+
isError?: boolean;
|
|
1711
|
+
/**
|
|
1712
|
+
* Whether the tool result is preliminary.
|
|
1713
|
+
*
|
|
1714
|
+
* Preliminary tool results replace each other, e.g. image previews.
|
|
1715
|
+
* There always has to be a final, non-preliminary tool result.
|
|
1716
|
+
*
|
|
1717
|
+
* If this flag is set to true, the tool result is preliminary.
|
|
1718
|
+
* If this flag is not set or is false, the tool result is not preliminary.
|
|
1719
|
+
*/
|
|
1720
|
+
preliminary?: boolean;
|
|
1721
|
+
/**
|
|
1722
|
+
* Whether the tool is dynamic, i.e. defined at runtime.
|
|
1723
|
+
* For example, MCP (Model Context Protocol) tools that are executed by the provider.
|
|
1724
|
+
*/
|
|
1725
|
+
dynamic?: boolean;
|
|
1726
|
+
/**
|
|
1727
|
+
* Additional provider-specific metadata for the tool result.
|
|
1728
|
+
*/
|
|
1729
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1730
|
+
};
|
|
1731
|
+
|
|
1732
|
+
type LanguageModelV3Content = LanguageModelV3Text | LanguageModelV3Reasoning | LanguageModelV3File | LanguageModelV3ToolApprovalRequest | LanguageModelV3Source | LanguageModelV3ToolCall | LanguageModelV3ToolResult;
|
|
1733
|
+
|
|
1734
|
+
/**
|
|
1735
|
+
* Reason why a language model finished generating a response.
|
|
1736
|
+
*
|
|
1737
|
+
* Contains both a unified finish reason and a raw finish reason from the provider.
|
|
1738
|
+
* The unified finish reason is used to provide a consistent finish reason across different providers.
|
|
1739
|
+
* The raw finish reason is used to provide the original finish reason from the provider.
|
|
1740
|
+
*/
|
|
1741
|
+
type LanguageModelV3FinishReason = {
|
|
1742
|
+
/**
|
|
1743
|
+
* Unified finish reason. This enables using the same finish reason across different providers.
|
|
1744
|
+
*
|
|
1745
|
+
* Can be one of the following:
|
|
1746
|
+
* - `stop`: model generated stop sequence
|
|
1747
|
+
* - `length`: model generated maximum number of tokens
|
|
1748
|
+
* - `content-filter`: content filter violation stopped the model
|
|
1749
|
+
* - `tool-calls`: model triggered tool calls
|
|
1750
|
+
* - `error`: model stopped because of an error
|
|
1751
|
+
* - `other`: model stopped for other reasons
|
|
1752
|
+
*/
|
|
1753
|
+
unified: 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
|
|
1754
|
+
/**
|
|
1755
|
+
* Raw finish reason from the provider.
|
|
1756
|
+
* This is the original finish reason from the provider.
|
|
1757
|
+
*/
|
|
1758
|
+
raw: string | undefined;
|
|
1759
|
+
};
|
|
1760
|
+
|
|
1761
|
+
interface LanguageModelV3ResponseMetadata {
|
|
1762
|
+
/**
|
|
1763
|
+
ID for the generated response, if the provider sends one.
|
|
1764
|
+
*/
|
|
1765
|
+
id?: string;
|
|
1766
|
+
/**
|
|
1767
|
+
Timestamp for the start of the generated response, if the provider sends one.
|
|
1768
|
+
*/
|
|
1769
|
+
timestamp?: Date;
|
|
1770
|
+
/**
|
|
1771
|
+
The ID of the response model that was used to generate the response, if the provider sends one.
|
|
1772
|
+
*/
|
|
1773
|
+
modelId?: string;
|
|
1774
|
+
}
|
|
1775
|
+
|
|
1776
|
+
/**
|
|
1777
|
+
* Usage information for a language model call.
|
|
1778
|
+
*/
|
|
1779
|
+
type LanguageModelV3Usage = {
|
|
1780
|
+
/**
|
|
1781
|
+
* Information about the input tokens.
|
|
1782
|
+
*/
|
|
1783
|
+
inputTokens: {
|
|
1784
|
+
/**
|
|
1785
|
+
*The total number of input (prompt) tokens used.
|
|
1786
|
+
*/
|
|
1787
|
+
total: number | undefined;
|
|
1788
|
+
/**
|
|
1789
|
+
* The number of non-cached input (prompt) tokens used.
|
|
1790
|
+
*/
|
|
1791
|
+
noCache: number | undefined;
|
|
1792
|
+
/**
|
|
1793
|
+
* The number of cached input (prompt) tokens read.
|
|
1794
|
+
*/
|
|
1795
|
+
cacheRead: number | undefined;
|
|
1796
|
+
/**
|
|
1797
|
+
* The number of cached input (prompt) tokens written.
|
|
1798
|
+
*/
|
|
1799
|
+
cacheWrite: number | undefined;
|
|
1800
|
+
};
|
|
1801
|
+
/**
|
|
1802
|
+
* Information about the output tokens.
|
|
1803
|
+
*/
|
|
1804
|
+
outputTokens: {
|
|
1805
|
+
/**
|
|
1806
|
+
* The total number of output (completion) tokens used.
|
|
1807
|
+
*/
|
|
1808
|
+
total: number | undefined;
|
|
1809
|
+
/**
|
|
1810
|
+
* The number of text tokens used.
|
|
1811
|
+
*/
|
|
1812
|
+
text: number | undefined;
|
|
1813
|
+
/**
|
|
1814
|
+
* The number of reasoning tokens used.
|
|
1815
|
+
*/
|
|
1816
|
+
reasoning: number | undefined;
|
|
1817
|
+
};
|
|
1818
|
+
/**
|
|
1819
|
+
* Raw usage information from the provider.
|
|
1820
|
+
*
|
|
1821
|
+
* This is the usage information in the shape that the provider returns.
|
|
1822
|
+
* It can include additional information that is not part of the standard usage information.
|
|
1823
|
+
*/
|
|
1824
|
+
raw?: JSONObject;
|
|
1825
|
+
};
|
|
1826
|
+
|
|
1827
|
+
/**
|
|
1828
|
+
* The result of a language model doGenerate call.
|
|
1829
|
+
*/
|
|
1830
|
+
type LanguageModelV3GenerateResult = {
|
|
1831
|
+
/**
|
|
1832
|
+
* Ordered content that the model has generated.
|
|
1833
|
+
*/
|
|
1834
|
+
content: Array<LanguageModelV3Content>;
|
|
1835
|
+
/**
|
|
1836
|
+
* The finish reason.
|
|
1837
|
+
*/
|
|
1838
|
+
finishReason: LanguageModelV3FinishReason;
|
|
1839
|
+
/**
|
|
1840
|
+
* The usage information.
|
|
1841
|
+
*/
|
|
1842
|
+
usage: LanguageModelV3Usage;
|
|
1843
|
+
/**
|
|
1844
|
+
* Additional provider-specific metadata. They are passed through
|
|
1845
|
+
* from the provider to the AI SDK and enable provider-specific
|
|
1846
|
+
* results that can be fully encapsulated in the provider.
|
|
1847
|
+
*/
|
|
1848
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1849
|
+
/**
|
|
1850
|
+
* Optional request information for telemetry and debugging purposes.
|
|
1851
|
+
*/
|
|
1852
|
+
request?: {
|
|
1853
|
+
/**
|
|
1854
|
+
* Request HTTP body that was sent to the provider API.
|
|
1855
|
+
*/
|
|
1856
|
+
body?: unknown;
|
|
1857
|
+
};
|
|
1858
|
+
/**
|
|
1859
|
+
* Optional response information for telemetry and debugging purposes.
|
|
1860
|
+
*/
|
|
1861
|
+
response?: LanguageModelV3ResponseMetadata & {
|
|
1862
|
+
/**
|
|
1863
|
+
* Response headers.
|
|
1864
|
+
*/
|
|
1865
|
+
headers?: SharedV3Headers;
|
|
1866
|
+
/**
|
|
1867
|
+
* Response HTTP body.
|
|
1868
|
+
*/
|
|
1869
|
+
body?: unknown;
|
|
1870
|
+
};
|
|
1871
|
+
/**
|
|
1872
|
+
* Warnings for the call, e.g. unsupported settings.
|
|
1873
|
+
*/
|
|
1874
|
+
warnings: Array<SharedV3Warning>;
|
|
1875
|
+
};
|
|
1876
|
+
|
|
1877
|
+
type LanguageModelV3StreamPart = {
|
|
1878
|
+
type: 'text-start';
|
|
1879
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1880
|
+
id: string;
|
|
1881
|
+
} | {
|
|
1882
|
+
type: 'text-delta';
|
|
1883
|
+
id: string;
|
|
1884
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1885
|
+
delta: string;
|
|
1886
|
+
} | {
|
|
1887
|
+
type: 'text-end';
|
|
1888
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1889
|
+
id: string;
|
|
1890
|
+
} | {
|
|
1891
|
+
type: 'reasoning-start';
|
|
1892
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1893
|
+
id: string;
|
|
1894
|
+
} | {
|
|
1895
|
+
type: 'reasoning-delta';
|
|
1896
|
+
id: string;
|
|
1897
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1898
|
+
delta: string;
|
|
1899
|
+
} | {
|
|
1900
|
+
type: 'reasoning-end';
|
|
1901
|
+
id: string;
|
|
1902
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1903
|
+
} | {
|
|
1904
|
+
type: 'tool-input-start';
|
|
1905
|
+
id: string;
|
|
1906
|
+
toolName: string;
|
|
1907
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1908
|
+
providerExecuted?: boolean;
|
|
1909
|
+
dynamic?: boolean;
|
|
1910
|
+
title?: string;
|
|
1911
|
+
} | {
|
|
1912
|
+
type: 'tool-input-delta';
|
|
1913
|
+
id: string;
|
|
1914
|
+
delta: string;
|
|
1915
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1916
|
+
} | {
|
|
1917
|
+
type: 'tool-input-end';
|
|
1918
|
+
id: string;
|
|
1919
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1920
|
+
} | LanguageModelV3ToolApprovalRequest | LanguageModelV3ToolCall | LanguageModelV3ToolResult | LanguageModelV3File | LanguageModelV3Source | {
|
|
1921
|
+
type: 'stream-start';
|
|
1922
|
+
warnings: Array<SharedV3Warning>;
|
|
1923
|
+
} | ({
|
|
1924
|
+
type: 'response-metadata';
|
|
1925
|
+
} & LanguageModelV3ResponseMetadata) | {
|
|
1926
|
+
type: 'finish';
|
|
1927
|
+
usage: LanguageModelV3Usage;
|
|
1928
|
+
finishReason: LanguageModelV3FinishReason;
|
|
1929
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
1930
|
+
} | {
|
|
1931
|
+
type: 'raw';
|
|
1932
|
+
rawValue: unknown;
|
|
1933
|
+
} | {
|
|
1934
|
+
type: 'error';
|
|
1935
|
+
error: unknown;
|
|
1936
|
+
};
|
|
1937
|
+
|
|
1938
|
+
/**
|
|
1939
|
+
* The result of a language model doStream call.
|
|
1940
|
+
*/
|
|
1941
|
+
type LanguageModelV3StreamResult = {
|
|
1942
|
+
/**
|
|
1943
|
+
* The stream.
|
|
1944
|
+
*/
|
|
1945
|
+
stream: ReadableStream<LanguageModelV3StreamPart>;
|
|
1946
|
+
/**
|
|
1947
|
+
* Optional request information for telemetry and debugging purposes.
|
|
1948
|
+
*/
|
|
1949
|
+
request?: {
|
|
1950
|
+
/**
|
|
1951
|
+
* Request HTTP body that was sent to the provider API.
|
|
1952
|
+
*/
|
|
1953
|
+
body?: unknown;
|
|
1954
|
+
};
|
|
1955
|
+
/**
|
|
1956
|
+
* Optional response data.
|
|
1957
|
+
*/
|
|
1958
|
+
response?: {
|
|
1959
|
+
/**
|
|
1960
|
+
* Response headers.
|
|
1961
|
+
*/
|
|
1962
|
+
headers?: SharedV3Headers;
|
|
1963
|
+
};
|
|
1964
|
+
};
|
|
1965
|
+
|
|
1966
|
+
/**
|
|
1967
|
+
* Specification for a language model that implements the language model interface version 3.
|
|
1968
|
+
*/
|
|
1969
|
+
type LanguageModelV3 = {
|
|
1970
|
+
/**
|
|
1971
|
+
* The language model must specify which language model interface version it implements.
|
|
1972
|
+
*/
|
|
1973
|
+
readonly specificationVersion: 'v3';
|
|
1974
|
+
/**
|
|
1975
|
+
* Provider ID.
|
|
1976
|
+
*/
|
|
1977
|
+
readonly provider: string;
|
|
1978
|
+
/**
|
|
1979
|
+
* Provider-specific model ID.
|
|
1980
|
+
*/
|
|
1981
|
+
readonly modelId: string;
|
|
1982
|
+
/**
|
|
1983
|
+
* Supported URL patterns by media type for the provider.
|
|
1984
|
+
*
|
|
1985
|
+
* The keys are media type patterns or full media types (e.g. `*\/*` for everything, `audio/*`, `video/*`, or `application/pdf`).
|
|
1986
|
+
* and the values are arrays of regular expressions that match the URL paths.
|
|
1987
|
+
*
|
|
1988
|
+
* The matching should be against lower-case URLs.
|
|
1989
|
+
*
|
|
1990
|
+
* Matched URLs are supported natively by the model and are not downloaded.
|
|
1991
|
+
*
|
|
1992
|
+
* @returns A map of supported URL patterns by media type (as a promise or a plain object).
|
|
1993
|
+
*/
|
|
1994
|
+
supportedUrls: PromiseLike<Record<string, RegExp[]>> | Record<string, RegExp[]>;
|
|
1995
|
+
/**
|
|
1996
|
+
* Generates a language model output (non-streaming).
|
|
1997
|
+
|
|
1998
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
|
1999
|
+
* by the user.
|
|
2000
|
+
*/
|
|
2001
|
+
doGenerate(options: LanguageModelV3CallOptions): PromiseLike<LanguageModelV3GenerateResult>;
|
|
2002
|
+
/**
|
|
2003
|
+
* Generates a language model output (streaming).
|
|
2004
|
+
*
|
|
2005
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
|
2006
|
+
* by the user.
|
|
2007
|
+
*
|
|
2008
|
+
* @return A stream of higher-level language model output parts.
|
|
2009
|
+
*/
|
|
2010
|
+
doStream(options: LanguageModelV3CallOptions): PromiseLike<LanguageModelV3StreamResult>;
|
|
2011
|
+
};
|
|
2012
|
+
|
|
2013
|
+
/**
|
|
2014
|
+
* Experimental middleware for LanguageModelV3.
|
|
2015
|
+
* This type defines the structure for middleware that can be used to modify
|
|
2016
|
+
* the behavior of LanguageModelV3 operations.
|
|
2017
|
+
*/
|
|
2018
|
+
type LanguageModelV3Middleware = {
|
|
2019
|
+
/**
|
|
2020
|
+
* Middleware specification version. Use `v3` for the current version.
|
|
2021
|
+
*/
|
|
2022
|
+
readonly specificationVersion: 'v3';
|
|
2023
|
+
/**
|
|
2024
|
+
* Override the provider name if desired.
|
|
2025
|
+
* @param options.model - The language model instance.
|
|
2026
|
+
*/
|
|
2027
|
+
overrideProvider?: (options: {
|
|
2028
|
+
model: LanguageModelV3;
|
|
2029
|
+
}) => string;
|
|
2030
|
+
/**
|
|
2031
|
+
* Override the model ID if desired.
|
|
2032
|
+
* @param options.model - The language model instance.
|
|
2033
|
+
*/
|
|
2034
|
+
overrideModelId?: (options: {
|
|
2035
|
+
model: LanguageModelV3;
|
|
2036
|
+
}) => string;
|
|
2037
|
+
/**
|
|
2038
|
+
* Override the supported URLs if desired.
|
|
2039
|
+
* @param options.model - The language model instance.
|
|
2040
|
+
*/
|
|
2041
|
+
overrideSupportedUrls?: (options: {
|
|
2042
|
+
model: LanguageModelV3;
|
|
2043
|
+
}) => PromiseLike<Record<string, RegExp[]>> | Record<string, RegExp[]>;
|
|
2044
|
+
/**
|
|
2045
|
+
* Transforms the parameters before they are passed to the language model.
|
|
2046
|
+
* @param options - Object containing the type of operation and the parameters.
|
|
2047
|
+
* @param options.type - The type of operation ('generate' or 'stream').
|
|
2048
|
+
* @param options.params - The original parameters for the language model call.
|
|
2049
|
+
* @returns A promise that resolves to the transformed parameters.
|
|
2050
|
+
*/
|
|
2051
|
+
transformParams?: (options: {
|
|
2052
|
+
type: 'generate' | 'stream';
|
|
2053
|
+
params: LanguageModelV3CallOptions;
|
|
2054
|
+
model: LanguageModelV3;
|
|
2055
|
+
}) => PromiseLike<LanguageModelV3CallOptions>;
|
|
2056
|
+
/**
|
|
2057
|
+
* Wraps the generate operation of the language model.
|
|
2058
|
+
* @param options - Object containing the generate function, parameters, and model.
|
|
2059
|
+
* @param options.doGenerate - The original generate function.
|
|
2060
|
+
* @param options.doStream - The original stream function.
|
|
2061
|
+
* @param options.params - The parameters for the generate call. If the
|
|
2062
|
+
* `transformParams` middleware is used, this will be the transformed parameters.
|
|
2063
|
+
* @param options.model - The language model instance.
|
|
2064
|
+
* @returns A promise that resolves to the result of the generate operation.
|
|
2065
|
+
*/
|
|
2066
|
+
wrapGenerate?: (options: {
|
|
2067
|
+
doGenerate: () => PromiseLike<LanguageModelV3GenerateResult>;
|
|
2068
|
+
doStream: () => PromiseLike<LanguageModelV3StreamResult>;
|
|
2069
|
+
params: LanguageModelV3CallOptions;
|
|
2070
|
+
model: LanguageModelV3;
|
|
2071
|
+
}) => PromiseLike<LanguageModelV3GenerateResult>;
|
|
2072
|
+
/**
|
|
2073
|
+
* Wraps the stream operation of the language model.
|
|
2074
|
+
*
|
|
2075
|
+
* @param options - Object containing the stream function, parameters, and model.
|
|
2076
|
+
* @param options.doGenerate - The original generate function.
|
|
2077
|
+
* @param options.doStream - The original stream function.
|
|
2078
|
+
* @param options.params - The parameters for the stream call. If the
|
|
2079
|
+
* `transformParams` middleware is used, this will be the transformed parameters.
|
|
2080
|
+
* @param options.model - The language model instance.
|
|
2081
|
+
* @returns A promise that resolves to the result of the stream operation.
|
|
2082
|
+
*/
|
|
2083
|
+
wrapStream?: (options: {
|
|
2084
|
+
doGenerate: () => PromiseLike<LanguageModelV3GenerateResult>;
|
|
2085
|
+
doStream: () => PromiseLike<LanguageModelV3StreamResult>;
|
|
2086
|
+
params: LanguageModelV3CallOptions;
|
|
2087
|
+
model: LanguageModelV3;
|
|
2088
|
+
}) => PromiseLike<LanguageModelV3StreamResult>;
|
|
2089
|
+
};
|
|
2090
|
+
|
|
2091
|
+
/**
|
|
2092
|
+
A tool has a name, a description, and a set of parameters.
|
|
2093
|
+
|
|
2094
|
+
Note: this is **not** the user-facing tool definition. The AI SDK methods will
|
|
2095
|
+
map the user-facing tool definitions to this format.
|
|
2096
|
+
*/
|
|
2097
|
+
type LanguageModelV2FunctionTool = {
|
|
2098
|
+
/**
|
|
2099
|
+
The type of the tool (always 'function').
|
|
2100
|
+
*/
|
|
2101
|
+
type: 'function';
|
|
2102
|
+
/**
|
|
2103
|
+
The name of the tool. Unique within this model call.
|
|
2104
|
+
*/
|
|
2105
|
+
name: string;
|
|
2106
|
+
/**
|
|
2107
|
+
A description of the tool. The language model uses this to understand the
|
|
2108
|
+
tool's purpose and to provide better completion suggestions.
|
|
2109
|
+
*/
|
|
2110
|
+
description?: string;
|
|
2111
|
+
/**
|
|
2112
|
+
The parameters that the tool expects. The language model uses this to
|
|
2113
|
+
understand the tool's input requirements and to provide matching suggestions.
|
|
2114
|
+
*/
|
|
2115
|
+
inputSchema: JSONSchema7;
|
|
2116
|
+
/**
|
|
2117
|
+
The provider-specific options for the tool.
|
|
2118
|
+
*/
|
|
2119
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
2120
|
+
};
|
|
2121
|
+
|
|
2122
|
+
/**
|
|
2123
|
+
Data content. Can be a Uint8Array, base64 encoded data as a string or a URL.
|
|
2124
|
+
*/
|
|
2125
|
+
type LanguageModelV2DataContent = Uint8Array | string | URL;
|
|
2126
|
+
|
|
2127
|
+
/**
|
|
2128
|
+
A prompt is a list of messages.
|
|
2129
|
+
|
|
2130
|
+
Note: Not all models and prompt formats support multi-modal inputs and
|
|
2131
|
+
tool calls. The validation happens at runtime.
|
|
2132
|
+
|
|
2133
|
+
Note: This is not a user-facing prompt. The AI SDK methods will map the
|
|
2134
|
+
user-facing prompt types such as chat or instruction prompts to this format.
|
|
2135
|
+
*/
|
|
2136
|
+
type LanguageModelV2Prompt = Array<LanguageModelV2Message>;
|
|
2137
|
+
type LanguageModelV2Message = ({
|
|
2138
|
+
role: 'system';
|
|
2139
|
+
content: string;
|
|
2140
|
+
} | {
|
|
2141
|
+
role: 'user';
|
|
2142
|
+
content: Array<LanguageModelV2TextPart | LanguageModelV2FilePart>;
|
|
2143
|
+
} | {
|
|
2144
|
+
role: 'assistant';
|
|
2145
|
+
content: Array<LanguageModelV2TextPart | LanguageModelV2FilePart | LanguageModelV2ReasoningPart | LanguageModelV2ToolCallPart | LanguageModelV2ToolResultPart>;
|
|
2146
|
+
} | {
|
|
2147
|
+
role: 'tool';
|
|
2148
|
+
content: Array<LanguageModelV2ToolResultPart>;
|
|
2149
|
+
}) & {
|
|
2150
|
+
/**
|
|
2151
|
+
* Additional provider-specific options. They are passed through
|
|
2152
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
2153
|
+
* functionality that can be fully encapsulated in the provider.
|
|
2154
|
+
*/
|
|
2155
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
2156
|
+
};
|
|
2157
|
+
/**
|
|
2158
|
+
Text content part of a prompt. It contains a string of text.
|
|
2159
|
+
*/
|
|
2160
|
+
interface LanguageModelV2TextPart {
|
|
2161
|
+
type: 'text';
|
|
2162
|
+
/**
|
|
2163
|
+
The text content.
|
|
2164
|
+
*/
|
|
2165
|
+
text: string;
|
|
2166
|
+
/**
|
|
2167
|
+
* Additional provider-specific options. They are passed through
|
|
2168
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
2169
|
+
* functionality that can be fully encapsulated in the provider.
|
|
2170
|
+
*/
|
|
2171
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
2172
|
+
}
|
|
2173
|
+
/**
|
|
2174
|
+
Reasoning content part of a prompt. It contains a string of reasoning text.
|
|
2175
|
+
*/
|
|
2176
|
+
interface LanguageModelV2ReasoningPart {
|
|
2177
|
+
type: 'reasoning';
|
|
2178
|
+
/**
|
|
2179
|
+
The reasoning text.
|
|
2180
|
+
*/
|
|
2181
|
+
text: string;
|
|
2182
|
+
/**
|
|
2183
|
+
* Additional provider-specific options. They are passed through
|
|
2184
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
2185
|
+
* functionality that can be fully encapsulated in the provider.
|
|
2186
|
+
*/
|
|
2187
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
2188
|
+
}
|
|
2189
|
+
/**
|
|
2190
|
+
File content part of a prompt. It contains a file.
|
|
2191
|
+
*/
|
|
2192
|
+
interface LanguageModelV2FilePart {
|
|
2193
|
+
type: 'file';
|
|
2194
|
+
/**
|
|
2195
|
+
* Optional filename of the file.
|
|
2196
|
+
*/
|
|
2197
|
+
filename?: string;
|
|
2198
|
+
/**
|
|
2199
|
+
File data. Can be a Uint8Array, base64 encoded data as a string or a URL.
|
|
2200
|
+
*/
|
|
2201
|
+
data: LanguageModelV2DataContent;
|
|
2202
|
+
/**
|
|
2203
|
+
IANA media type of the file.
|
|
2204
|
+
|
|
2205
|
+
Can support wildcards, e.g. `image/*` (in which case the provider needs to take appropriate action).
|
|
2206
|
+
|
|
2207
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
2208
|
+
*/
|
|
2209
|
+
mediaType: string;
|
|
2210
|
+
/**
|
|
2211
|
+
* Additional provider-specific options. They are passed through
|
|
2212
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
2213
|
+
* functionality that can be fully encapsulated in the provider.
|
|
2214
|
+
*/
|
|
2215
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
2216
|
+
}
|
|
2217
|
+
/**
|
|
2218
|
+
Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
|
|
2219
|
+
*/
|
|
2220
|
+
interface LanguageModelV2ToolCallPart {
|
|
2221
|
+
type: 'tool-call';
|
|
2222
|
+
/**
|
|
2223
|
+
ID of the tool call. This ID is used to match the tool call with the tool result.
|
|
2224
|
+
*/
|
|
2225
|
+
toolCallId: string;
|
|
2226
|
+
/**
|
|
2227
|
+
Name of the tool that is being called.
|
|
2228
|
+
*/
|
|
2229
|
+
toolName: string;
|
|
2230
|
+
/**
|
|
2231
|
+
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
|
|
2232
|
+
*/
|
|
2233
|
+
input: unknown;
|
|
2234
|
+
/**
|
|
2235
|
+
* Whether the tool call will be executed by the provider.
|
|
2236
|
+
* If this flag is not set or is false, the tool call will be executed by the client.
|
|
2237
|
+
*/
|
|
2238
|
+
providerExecuted?: boolean;
|
|
2239
|
+
/**
|
|
2240
|
+
* Additional provider-specific options. They are passed through
|
|
2241
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
2242
|
+
* functionality that can be fully encapsulated in the provider.
|
|
2243
|
+
*/
|
|
2244
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
2245
|
+
}
|
|
2246
|
+
/**
|
|
2247
|
+
Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
|
|
2248
|
+
*/
|
|
2249
|
+
interface LanguageModelV2ToolResultPart {
|
|
2250
|
+
type: 'tool-result';
|
|
2251
|
+
/**
|
|
2252
|
+
ID of the tool call that this result is associated with.
|
|
2253
|
+
*/
|
|
2254
|
+
toolCallId: string;
|
|
2255
|
+
/**
|
|
2256
|
+
Name of the tool that generated this result.
|
|
2257
|
+
*/
|
|
2258
|
+
toolName: string;
|
|
2259
|
+
/**
|
|
2260
|
+
Result of the tool call.
|
|
2261
|
+
*/
|
|
2262
|
+
output: LanguageModelV2ToolResultOutput;
|
|
2263
|
+
/**
|
|
2264
|
+
* Additional provider-specific options. They are passed through
|
|
2265
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
2266
|
+
* functionality that can be fully encapsulated in the provider.
|
|
2267
|
+
*/
|
|
2268
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
2269
|
+
}
|
|
2270
|
+
type LanguageModelV2ToolResultOutput = {
|
|
2271
|
+
type: 'text';
|
|
2272
|
+
value: string;
|
|
2273
|
+
} | {
|
|
2274
|
+
type: 'json';
|
|
2275
|
+
value: JSONValue;
|
|
2276
|
+
} | {
|
|
2277
|
+
type: 'error-text';
|
|
2278
|
+
value: string;
|
|
2279
|
+
} | {
|
|
2280
|
+
type: 'error-json';
|
|
2281
|
+
value: JSONValue;
|
|
2282
|
+
} | {
|
|
2283
|
+
type: 'content';
|
|
2284
|
+
value: Array<{
|
|
2285
|
+
type: 'text';
|
|
2286
|
+
/**
|
|
2287
|
+
Text content.
|
|
2288
|
+
*/
|
|
2289
|
+
text: string;
|
|
2290
|
+
} | {
|
|
2291
|
+
type: 'media';
|
|
2292
|
+
/**
|
|
2293
|
+
Base-64 encoded media data.
|
|
2294
|
+
*/
|
|
2295
|
+
data: string;
|
|
2296
|
+
/**
|
|
2297
|
+
IANA media type.
|
|
2298
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
2299
|
+
*/
|
|
2300
|
+
mediaType: string;
|
|
2301
|
+
}>;
|
|
2302
|
+
};
|
|
2303
|
+
|
|
2304
|
+
/**
|
|
2305
|
+
The configuration of a tool that is defined by the provider.
|
|
2306
|
+
*/
|
|
2307
|
+
type LanguageModelV2ProviderDefinedTool = {
|
|
2308
|
+
/**
|
|
2309
|
+
The type of the tool (always 'provider-defined').
|
|
2310
|
+
*/
|
|
2311
|
+
type: 'provider-defined';
|
|
2312
|
+
/**
|
|
2313
|
+
The ID of the tool. Should follow the format `<provider-name>.<unique-tool-name>`.
|
|
2314
|
+
*/
|
|
2315
|
+
id: `${string}.${string}`;
|
|
2316
|
+
/**
|
|
2317
|
+
The name of the tool that the user must use in the tool set.
|
|
2318
|
+
*/
|
|
2319
|
+
name: string;
|
|
2320
|
+
/**
|
|
2321
|
+
The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
|
|
2322
|
+
*/
|
|
2323
|
+
args: Record<string, unknown>;
|
|
2324
|
+
};
|
|
2325
|
+
|
|
2326
|
+
type LanguageModelV2ToolChoice = {
|
|
2327
|
+
type: 'auto';
|
|
2328
|
+
} | {
|
|
2329
|
+
type: 'none';
|
|
2330
|
+
} | {
|
|
2331
|
+
type: 'required';
|
|
2332
|
+
} | {
|
|
2333
|
+
type: 'tool';
|
|
2334
|
+
toolName: string;
|
|
2335
|
+
};
|
|
2336
|
+
|
|
2337
|
+
type LanguageModelV2CallOptions = {
|
|
2338
|
+
/**
|
|
2339
|
+
A language mode prompt is a standardized prompt type.
|
|
2340
|
+
|
|
2341
|
+
Note: This is **not** the user-facing prompt. The AI SDK methods will map the
|
|
2342
|
+
user-facing prompt types such as chat or instruction prompts to this format.
|
|
2343
|
+
That approach allows us to evolve the user facing prompts without breaking
|
|
2344
|
+
the language model interface.
|
|
2345
|
+
*/
|
|
2346
|
+
prompt: LanguageModelV2Prompt;
|
|
2347
|
+
/**
|
|
2348
|
+
Maximum number of tokens to generate.
|
|
2349
|
+
*/
|
|
2350
|
+
maxOutputTokens?: number;
|
|
2351
|
+
/**
|
|
2352
|
+
Temperature setting. The range depends on the provider and model.
|
|
2353
|
+
*/
|
|
2354
|
+
temperature?: number;
|
|
2355
|
+
/**
|
|
2356
|
+
Stop sequences.
|
|
2357
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
|
2358
|
+
Providers may have limits on the number of stop sequences.
|
|
2359
|
+
*/
|
|
2360
|
+
stopSequences?: string[];
|
|
2361
|
+
/**
|
|
2362
|
+
Nucleus sampling.
|
|
2363
|
+
*/
|
|
2364
|
+
topP?: number;
|
|
2365
|
+
/**
|
|
2366
|
+
Only sample from the top K options for each subsequent token.
|
|
2367
|
+
|
|
2368
|
+
Used to remove "long tail" low probability responses.
|
|
2369
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
|
2370
|
+
*/
|
|
2371
|
+
topK?: number;
|
|
2372
|
+
/**
|
|
2373
|
+
Presence penalty setting. It affects the likelihood of the model to
|
|
2374
|
+
repeat information that is already in the prompt.
|
|
2375
|
+
*/
|
|
2376
|
+
presencePenalty?: number;
|
|
2377
|
+
/**
|
|
2378
|
+
Frequency penalty setting. It affects the likelihood of the model
|
|
2379
|
+
to repeatedly use the same words or phrases.
|
|
2380
|
+
*/
|
|
2381
|
+
frequencyPenalty?: number;
|
|
2382
|
+
/**
|
|
2383
|
+
Response format. The output can either be text or JSON. Default is text.
|
|
2384
|
+
|
|
2385
|
+
If JSON is selected, a schema can optionally be provided to guide the LLM.
|
|
2386
|
+
*/
|
|
2387
|
+
responseFormat?: {
|
|
2388
|
+
type: 'text';
|
|
2389
|
+
} | {
|
|
2390
|
+
type: 'json';
|
|
2391
|
+
/**
|
|
2392
|
+
* JSON schema that the generated output should conform to.
|
|
2393
|
+
*/
|
|
2394
|
+
schema?: JSONSchema7;
|
|
2395
|
+
/**
|
|
2396
|
+
* Name of output that should be generated. Used by some providers for additional LLM guidance.
|
|
2397
|
+
*/
|
|
2398
|
+
name?: string;
|
|
2399
|
+
/**
|
|
2400
|
+
* Description of the output that should be generated. Used by some providers for additional LLM guidance.
|
|
2401
|
+
*/
|
|
2402
|
+
description?: string;
|
|
2403
|
+
};
|
|
2404
|
+
/**
|
|
2405
|
+
The seed (integer) to use for random sampling. If set and supported
|
|
2406
|
+
by the model, calls will generate deterministic results.
|
|
2407
|
+
*/
|
|
2408
|
+
seed?: number;
|
|
2409
|
+
/**
|
|
2410
|
+
The tools that are available for the model.
|
|
2411
|
+
*/
|
|
2412
|
+
tools?: Array<LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool>;
|
|
2413
|
+
/**
|
|
2414
|
+
Specifies how the tool should be selected. Defaults to 'auto'.
|
|
2415
|
+
*/
|
|
2416
|
+
toolChoice?: LanguageModelV2ToolChoice;
|
|
2417
|
+
/**
|
|
2418
|
+
Include raw chunks in the stream. Only applicable for streaming calls.
|
|
2419
|
+
*/
|
|
2420
|
+
includeRawChunks?: boolean;
|
|
2421
|
+
/**
|
|
2422
|
+
Abort signal for cancelling the operation.
|
|
2423
|
+
*/
|
|
2424
|
+
abortSignal?: AbortSignal;
|
|
2425
|
+
/**
|
|
2426
|
+
Additional HTTP headers to be sent with the request.
|
|
2427
|
+
Only applicable for HTTP-based providers.
|
|
2428
|
+
*/
|
|
2429
|
+
headers?: Record<string, string | undefined>;
|
|
2430
|
+
/**
|
|
2431
|
+
* Additional provider-specific options. They are passed through
|
|
2432
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
2433
|
+
* functionality that can be fully encapsulated in the provider.
|
|
2434
|
+
*/
|
|
2435
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
2436
|
+
};
|
|
2437
|
+
|
|
2438
|
+
/**
|
|
2439
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
2440
|
+
some settings might not be supported, which can lead to suboptimal results.
|
|
2441
|
+
*/
|
|
2442
|
+
type LanguageModelV2CallWarning = {
|
|
2443
|
+
type: 'unsupported-setting';
|
|
2444
|
+
setting: Omit<keyof LanguageModelV2CallOptions, 'prompt'>;
|
|
2445
|
+
details?: string;
|
|
2446
|
+
} | {
|
|
2447
|
+
type: 'unsupported-tool';
|
|
2448
|
+
tool: LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool;
|
|
2449
|
+
details?: string;
|
|
2450
|
+
} | {
|
|
2451
|
+
type: 'other';
|
|
2452
|
+
message: string;
|
|
2453
|
+
};
|
|
2454
|
+
|
|
2455
|
+
/**
|
|
2456
|
+
A file that has been generated by the model.
|
|
2457
|
+
Generated files as base64 encoded strings or binary data.
|
|
2458
|
+
The files should be returned without any unnecessary conversion.
|
|
2459
|
+
*/
|
|
2460
|
+
type LanguageModelV2File = {
|
|
2461
|
+
type: 'file';
|
|
2462
|
+
/**
|
|
2463
|
+
The IANA media type of the file, e.g. `image/png` or `audio/mp3`.
|
|
2464
|
+
|
|
2465
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
2466
|
+
*/
|
|
2467
|
+
mediaType: string;
|
|
2468
|
+
/**
|
|
2469
|
+
Generated file data as base64 encoded strings or binary data.
|
|
2470
|
+
|
|
2471
|
+
The file data should be returned without any unnecessary conversion.
|
|
2472
|
+
If the API returns base64 encoded strings, the file data should be returned
|
|
2473
|
+
as base64 encoded strings. If the API returns binary data, the file data should
|
|
2474
|
+
be returned as binary data.
|
|
2475
|
+
*/
|
|
2476
|
+
data: string | Uint8Array;
|
|
2477
|
+
};
|
|
2478
|
+
|
|
2479
|
+
/**
|
|
2480
|
+
Reasoning that the model has generated.
|
|
2481
|
+
*/
|
|
2482
|
+
type LanguageModelV2Reasoning = {
|
|
2483
|
+
type: 'reasoning';
|
|
2484
|
+
text: string;
|
|
2485
|
+
/**
|
|
2486
|
+
* Optional provider-specific metadata for the reasoning part.
|
|
2487
|
+
*/
|
|
2488
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2489
|
+
};
|
|
2490
|
+
|
|
2491
|
+
/**
|
|
2492
|
+
A source that has been used as input to generate the response.
|
|
2493
|
+
*/
|
|
2494
|
+
type LanguageModelV2Source = {
|
|
2495
|
+
type: 'source';
|
|
2496
|
+
/**
|
|
2497
|
+
* The type of source - URL sources reference web content.
|
|
2498
|
+
*/
|
|
2499
|
+
sourceType: 'url';
|
|
2500
|
+
/**
|
|
2501
|
+
* The ID of the source.
|
|
2502
|
+
*/
|
|
2503
|
+
id: string;
|
|
2504
|
+
/**
|
|
2505
|
+
* The URL of the source.
|
|
2506
|
+
*/
|
|
2507
|
+
url: string;
|
|
2508
|
+
/**
|
|
2509
|
+
* The title of the source.
|
|
2510
|
+
*/
|
|
2511
|
+
title?: string;
|
|
2512
|
+
/**
|
|
2513
|
+
* Additional provider metadata for the source.
|
|
2514
|
+
*/
|
|
2515
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2516
|
+
} | {
|
|
2517
|
+
type: 'source';
|
|
2518
|
+
/**
|
|
2519
|
+
* The type of source - document sources reference files/documents.
|
|
2520
|
+
*/
|
|
2521
|
+
sourceType: 'document';
|
|
2522
|
+
/**
|
|
2523
|
+
* The ID of the source.
|
|
2524
|
+
*/
|
|
2525
|
+
id: string;
|
|
2526
|
+
/**
|
|
2527
|
+
* IANA media type of the document (e.g., 'application/pdf').
|
|
2528
|
+
*/
|
|
2529
|
+
mediaType: string;
|
|
2530
|
+
/**
|
|
2531
|
+
* The title of the document.
|
|
2532
|
+
*/
|
|
2533
|
+
title: string;
|
|
2534
|
+
/**
|
|
2535
|
+
* Optional filename of the document.
|
|
2536
|
+
*/
|
|
2537
|
+
filename?: string;
|
|
2538
|
+
/**
|
|
2539
|
+
* Additional provider metadata for the source.
|
|
2540
|
+
*/
|
|
2541
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2542
|
+
};
|
|
2543
|
+
|
|
2544
|
+
/**
|
|
2545
|
+
Text that the model has generated.
|
|
2546
|
+
*/
|
|
2547
|
+
type LanguageModelV2Text = {
|
|
2548
|
+
type: 'text';
|
|
2549
|
+
/**
|
|
2550
|
+
The text content.
|
|
2551
|
+
*/
|
|
2552
|
+
text: string;
|
|
2553
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2554
|
+
};
|
|
2555
|
+
|
|
2556
|
+
/**
|
|
2557
|
+
* Tool calls that the model has generated.
|
|
2558
|
+
*/
|
|
2559
|
+
type LanguageModelV2ToolCall = {
|
|
2560
|
+
type: 'tool-call';
|
|
2561
|
+
/**
|
|
2562
|
+
* The identifier of the tool call. It must be unique across all tool calls.
|
|
2563
|
+
*/
|
|
2564
|
+
toolCallId: string;
|
|
2565
|
+
/**
|
|
2566
|
+
* The name of the tool that should be called.
|
|
2567
|
+
*/
|
|
2568
|
+
toolName: string;
|
|
2569
|
+
/**
|
|
2570
|
+
* Stringified JSON object with the tool call arguments. Must match the
|
|
2571
|
+
* parameters schema of the tool.
|
|
2572
|
+
*/
|
|
2573
|
+
input: string;
|
|
2574
|
+
/**
|
|
2575
|
+
* Whether the tool call will be executed by the provider.
|
|
2576
|
+
* If this flag is not set or is false, the tool call will be executed by the client.
|
|
2577
|
+
*/
|
|
2578
|
+
providerExecuted?: boolean;
|
|
2579
|
+
/**
|
|
2580
|
+
* Additional provider-specific metadata for the tool call.
|
|
2581
|
+
*/
|
|
2582
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2583
|
+
};
|
|
2584
|
+
|
|
2585
|
+
/**
|
|
2586
|
+
Result of a tool call that has been executed by the provider.
|
|
2587
|
+
*/
|
|
2588
|
+
type LanguageModelV2ToolResult = {
|
|
2589
|
+
type: 'tool-result';
|
|
2590
|
+
/**
|
|
2591
|
+
* The ID of the tool call that this result is associated with.
|
|
2592
|
+
*/
|
|
2593
|
+
toolCallId: string;
|
|
2594
|
+
/**
|
|
2595
|
+
* Name of the tool that generated this result.
|
|
2596
|
+
*/
|
|
2597
|
+
toolName: string;
|
|
2598
|
+
/**
|
|
2599
|
+
* Result of the tool call. This is a JSON-serializable object.
|
|
2600
|
+
*/
|
|
2601
|
+
result: unknown;
|
|
2602
|
+
/**
|
|
2603
|
+
* Optional flag if the result is an error or an error message.
|
|
2604
|
+
*/
|
|
2605
|
+
isError?: boolean;
|
|
2606
|
+
/**
|
|
2607
|
+
* Whether the tool result was generated by the provider.
|
|
2608
|
+
* If this flag is set to true, the tool result was generated by the provider.
|
|
2609
|
+
* If this flag is not set or is false, the tool result was generated by the client.
|
|
2610
|
+
*/
|
|
2611
|
+
providerExecuted?: boolean;
|
|
2612
|
+
/**
|
|
2613
|
+
* Additional provider-specific metadata for the tool result.
|
|
2614
|
+
*/
|
|
2615
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2616
|
+
};
|
|
2617
|
+
|
|
2618
|
+
type LanguageModelV2Content = LanguageModelV2Text | LanguageModelV2Reasoning | LanguageModelV2File | LanguageModelV2Source | LanguageModelV2ToolCall | LanguageModelV2ToolResult;
|
|
2619
|
+
|
|
2620
|
+
/**
|
|
2621
|
+
Reason why a language model finished generating a response.
|
|
2622
|
+
|
|
2623
|
+
Can be one of the following:
|
|
2624
|
+
- `stop`: model generated stop sequence
|
|
2625
|
+
- `length`: model generated maximum number of tokens
|
|
2626
|
+
- `content-filter`: content filter violation stopped the model
|
|
2627
|
+
- `tool-calls`: model triggered tool calls
|
|
2628
|
+
- `error`: model stopped because of an error
|
|
2629
|
+
- `other`: model stopped for other reasons
|
|
2630
|
+
- `unknown`: the model has not transmitted a finish reason
|
|
2631
|
+
*/
|
|
2632
|
+
type LanguageModelV2FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other' | 'unknown';
|
|
2633
|
+
|
|
2634
|
+
interface LanguageModelV2ResponseMetadata {
|
|
2635
|
+
/**
|
|
2636
|
+
ID for the generated response, if the provider sends one.
|
|
2637
|
+
*/
|
|
2638
|
+
id?: string;
|
|
2639
|
+
/**
|
|
2640
|
+
Timestamp for the start of the generated response, if the provider sends one.
|
|
2641
|
+
*/
|
|
2642
|
+
timestamp?: Date;
|
|
2643
|
+
/**
|
|
2644
|
+
The ID of the response model that was used to generate the response, if the provider sends one.
|
|
2645
|
+
*/
|
|
2646
|
+
modelId?: string;
|
|
2647
|
+
}
|
|
2648
|
+
|
|
2649
|
+
/**
|
|
2650
|
+
Usage information for a language model call.
|
|
2651
|
+
|
|
2652
|
+
If your API return additional usage information, you can add it to the
|
|
2653
|
+
provider metadata under your provider's key.
|
|
2654
|
+
*/
|
|
2655
|
+
type LanguageModelV2Usage = {
|
|
2656
|
+
/**
|
|
2657
|
+
The number of input (prompt) tokens used.
|
|
2658
|
+
*/
|
|
2659
|
+
inputTokens: number | undefined;
|
|
2660
|
+
/**
|
|
2661
|
+
The number of output (completion) tokens used.
|
|
2662
|
+
*/
|
|
2663
|
+
outputTokens: number | undefined;
|
|
2664
|
+
/**
|
|
2665
|
+
The total number of tokens as reported by the provider.
|
|
2666
|
+
This number might be different from the sum of `inputTokens` and `outputTokens`
|
|
2667
|
+
and e.g. include reasoning tokens or other overhead.
|
|
2668
|
+
*/
|
|
2669
|
+
totalTokens: number | undefined;
|
|
2670
|
+
/**
|
|
2671
|
+
The number of reasoning tokens used.
|
|
2672
|
+
*/
|
|
2673
|
+
reasoningTokens?: number | undefined;
|
|
2674
|
+
/**
|
|
2675
|
+
The number of cached input tokens.
|
|
2676
|
+
*/
|
|
2677
|
+
cachedInputTokens?: number | undefined;
|
|
2678
|
+
};
|
|
2679
|
+
|
|
2680
|
+
type LanguageModelV2StreamPart = {
|
|
2681
|
+
type: 'text-start';
|
|
2682
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2683
|
+
id: string;
|
|
2684
|
+
} | {
|
|
2685
|
+
type: 'text-delta';
|
|
2686
|
+
id: string;
|
|
2687
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2688
|
+
delta: string;
|
|
2689
|
+
} | {
|
|
2690
|
+
type: 'text-end';
|
|
2691
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2692
|
+
id: string;
|
|
2693
|
+
} | {
|
|
2694
|
+
type: 'reasoning-start';
|
|
2695
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2696
|
+
id: string;
|
|
2697
|
+
} | {
|
|
2698
|
+
type: 'reasoning-delta';
|
|
2699
|
+
id: string;
|
|
2700
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2701
|
+
delta: string;
|
|
2702
|
+
} | {
|
|
2703
|
+
type: 'reasoning-end';
|
|
2704
|
+
id: string;
|
|
2705
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2706
|
+
} | {
|
|
2707
|
+
type: 'tool-input-start';
|
|
2708
|
+
id: string;
|
|
2709
|
+
toolName: string;
|
|
2710
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2711
|
+
providerExecuted?: boolean;
|
|
2712
|
+
} | {
|
|
2713
|
+
type: 'tool-input-delta';
|
|
2714
|
+
id: string;
|
|
2715
|
+
delta: string;
|
|
2716
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2717
|
+
} | {
|
|
2718
|
+
type: 'tool-input-end';
|
|
2719
|
+
id: string;
|
|
2720
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2721
|
+
} | LanguageModelV2ToolCall | LanguageModelV2ToolResult | LanguageModelV2File | LanguageModelV2Source | {
|
|
2722
|
+
type: 'stream-start';
|
|
2723
|
+
warnings: Array<LanguageModelV2CallWarning>;
|
|
2724
|
+
} | ({
|
|
2725
|
+
type: 'response-metadata';
|
|
2726
|
+
} & LanguageModelV2ResponseMetadata) | {
|
|
2727
|
+
type: 'finish';
|
|
2728
|
+
usage: LanguageModelV2Usage;
|
|
2729
|
+
finishReason: LanguageModelV2FinishReason;
|
|
2730
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2731
|
+
} | {
|
|
2732
|
+
type: 'raw';
|
|
2733
|
+
rawValue: unknown;
|
|
2734
|
+
} | {
|
|
2735
|
+
type: 'error';
|
|
2736
|
+
error: unknown;
|
|
2737
|
+
};
|
|
2738
|
+
|
|
2739
|
+
/**
|
|
2740
|
+
Specification for a language model that implements the language model interface version 2.
|
|
2741
|
+
*/
|
|
2742
|
+
type LanguageModelV2 = {
|
|
2743
|
+
/**
|
|
2744
|
+
The language model must specify which language model interface version it implements.
|
|
2745
|
+
*/
|
|
2746
|
+
readonly specificationVersion: 'v2';
|
|
2747
|
+
/**
|
|
2748
|
+
Name of the provider for logging purposes.
|
|
2749
|
+
*/
|
|
2750
|
+
readonly provider: string;
|
|
2751
|
+
/**
|
|
2752
|
+
Provider-specific model ID for logging purposes.
|
|
2753
|
+
*/
|
|
2754
|
+
readonly modelId: string;
|
|
2755
|
+
/**
|
|
2756
|
+
Supported URL patterns by media type for the provider.
|
|
2757
|
+
|
|
2758
|
+
The keys are media type patterns or full media types (e.g. `*\/*` for everything, `audio/*`, `video/*`, or `application/pdf`).
|
|
2759
|
+
and the values are arrays of regular expressions that match the URL paths.
|
|
2760
|
+
|
|
2761
|
+
The matching should be against lower-case URLs.
|
|
2762
|
+
|
|
2763
|
+
Matched URLs are supported natively by the model and are not downloaded.
|
|
2764
|
+
|
|
2765
|
+
@returns A map of supported URL patterns by media type (as a promise or a plain object).
|
|
2766
|
+
*/
|
|
2767
|
+
supportedUrls: PromiseLike<Record<string, RegExp[]>> | Record<string, RegExp[]>;
|
|
2768
|
+
/**
|
|
2769
|
+
Generates a language model output (non-streaming).
|
|
2770
|
+
|
|
2771
|
+
Naming: "do" prefix to prevent accidental direct usage of the method
|
|
2772
|
+
by the user.
|
|
2773
|
+
*/
|
|
2774
|
+
doGenerate(options: LanguageModelV2CallOptions): PromiseLike<{
|
|
2775
|
+
/**
|
|
2776
|
+
Ordered content that the model has generated.
|
|
2777
|
+
*/
|
|
2778
|
+
content: Array<LanguageModelV2Content>;
|
|
2779
|
+
/**
|
|
2780
|
+
Finish reason.
|
|
2781
|
+
*/
|
|
2782
|
+
finishReason: LanguageModelV2FinishReason;
|
|
2783
|
+
/**
|
|
2784
|
+
Usage information.
|
|
2785
|
+
*/
|
|
2786
|
+
usage: LanguageModelV2Usage;
|
|
2787
|
+
/**
|
|
2788
|
+
Additional provider-specific metadata. They are passed through
|
|
2789
|
+
from the provider to the AI SDK and enable provider-specific
|
|
2790
|
+
results that can be fully encapsulated in the provider.
|
|
2791
|
+
*/
|
|
2792
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
2793
|
+
/**
|
|
2794
|
+
Optional request information for telemetry and debugging purposes.
|
|
2795
|
+
*/
|
|
2796
|
+
request?: {
|
|
2797
|
+
/**
|
|
2798
|
+
Request HTTP body that was sent to the provider API.
|
|
2799
|
+
*/
|
|
2800
|
+
body?: unknown;
|
|
2801
|
+
};
|
|
2802
|
+
/**
|
|
2803
|
+
Optional response information for telemetry and debugging purposes.
|
|
2804
|
+
*/
|
|
2805
|
+
response?: LanguageModelV2ResponseMetadata & {
|
|
2806
|
+
/**
|
|
2807
|
+
Response headers.
|
|
2808
|
+
*/
|
|
2809
|
+
headers?: SharedV2Headers;
|
|
2810
|
+
/**
|
|
2811
|
+
Response HTTP body.
|
|
2812
|
+
*/
|
|
2813
|
+
body?: unknown;
|
|
2814
|
+
};
|
|
2815
|
+
/**
|
|
2816
|
+
Warnings for the call, e.g. unsupported settings.
|
|
2817
|
+
*/
|
|
2818
|
+
warnings: Array<LanguageModelV2CallWarning>;
|
|
2819
|
+
}>;
|
|
2820
|
+
/**
|
|
2821
|
+
Generates a language model output (streaming).
|
|
2822
|
+
|
|
2823
|
+
Naming: "do" prefix to prevent accidental direct usage of the method
|
|
2824
|
+
by the user.
|
|
2825
|
+
*
|
|
2826
|
+
@return A stream of higher-level language model output parts.
|
|
2827
|
+
*/
|
|
2828
|
+
doStream(options: LanguageModelV2CallOptions): PromiseLike<{
|
|
2829
|
+
stream: ReadableStream<LanguageModelV2StreamPart>;
|
|
2830
|
+
/**
|
|
2831
|
+
Optional request information for telemetry and debugging purposes.
|
|
2832
|
+
*/
|
|
2833
|
+
request?: {
|
|
2834
|
+
/**
|
|
2835
|
+
Request HTTP body that was sent to the provider API.
|
|
2836
|
+
*/
|
|
2837
|
+
body?: unknown;
|
|
2838
|
+
};
|
|
2839
|
+
/**
|
|
2840
|
+
Optional response data.
|
|
2841
|
+
*/
|
|
2842
|
+
response?: {
|
|
2843
|
+
/**
|
|
2844
|
+
Response headers.
|
|
2845
|
+
*/
|
|
2846
|
+
headers?: SharedV2Headers;
|
|
2847
|
+
};
|
|
2848
|
+
}>;
|
|
2849
|
+
};
|
|
2850
|
+
|
|
2851
|
+
/**
|
|
2852
|
+
* Experimental middleware for LanguageModelV2.
|
|
2853
|
+
* This type defines the structure for middleware that can be used to modify
|
|
2854
|
+
* the behavior of LanguageModelV2 operations.
|
|
2855
|
+
*/
|
|
2856
|
+
type LanguageModelV2Middleware = {
|
|
2857
|
+
/**
|
|
2858
|
+
* Middleware specification version. Use `v2` for the current version.
|
|
2859
|
+
*/
|
|
2860
|
+
middlewareVersion?: 'v2' | undefined;
|
|
2861
|
+
/**
|
|
2862
|
+
* Override the provider name if desired.
|
|
2863
|
+
* @param options.model - The language model instance.
|
|
2864
|
+
*/
|
|
2865
|
+
overrideProvider?: (options: {
|
|
2866
|
+
model: LanguageModelV2;
|
|
2867
|
+
}) => string;
|
|
2868
|
+
/**
|
|
2869
|
+
* Override the model ID if desired.
|
|
2870
|
+
* @param options.model - The language model instance.
|
|
2871
|
+
*/
|
|
2872
|
+
overrideModelId?: (options: {
|
|
2873
|
+
model: LanguageModelV2;
|
|
2874
|
+
}) => string;
|
|
2875
|
+
/**
|
|
2876
|
+
* Override the supported URLs if desired.
|
|
2877
|
+
* @param options.model - The language model instance.
|
|
2878
|
+
*/
|
|
2879
|
+
overrideSupportedUrls?: (options: {
|
|
2880
|
+
model: LanguageModelV2;
|
|
2881
|
+
}) => PromiseLike<Record<string, RegExp[]>> | Record<string, RegExp[]>;
|
|
2882
|
+
/**
|
|
2883
|
+
* Transforms the parameters before they are passed to the language model.
|
|
2884
|
+
* @param options - Object containing the type of operation and the parameters.
|
|
2885
|
+
* @param options.type - The type of operation ('generate' or 'stream').
|
|
2886
|
+
* @param options.params - The original parameters for the language model call.
|
|
2887
|
+
* @returns A promise that resolves to the transformed parameters.
|
|
2888
|
+
*/
|
|
2889
|
+
transformParams?: (options: {
|
|
2890
|
+
type: 'generate' | 'stream';
|
|
2891
|
+
params: LanguageModelV2CallOptions;
|
|
2892
|
+
model: LanguageModelV2;
|
|
2893
|
+
}) => PromiseLike<LanguageModelV2CallOptions>;
|
|
2894
|
+
/**
|
|
2895
|
+
* Wraps the generate operation of the language model.
|
|
2896
|
+
* @param options - Object containing the generate function, parameters, and model.
|
|
2897
|
+
* @param options.doGenerate - The original generate function.
|
|
2898
|
+
* @param options.doStream - The original stream function.
|
|
2899
|
+
* @param options.params - The parameters for the generate call. If the
|
|
2900
|
+
* `transformParams` middleware is used, this will be the transformed parameters.
|
|
2901
|
+
* @param options.model - The language model instance.
|
|
2902
|
+
* @returns A promise that resolves to the result of the generate operation.
|
|
2903
|
+
*/
|
|
2904
|
+
wrapGenerate?: (options: {
|
|
2905
|
+
doGenerate: () => ReturnType<LanguageModelV2['doGenerate']>;
|
|
2906
|
+
doStream: () => ReturnType<LanguageModelV2['doStream']>;
|
|
2907
|
+
params: LanguageModelV2CallOptions;
|
|
2908
|
+
model: LanguageModelV2;
|
|
2909
|
+
}) => Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
2910
|
+
/**
|
|
2911
|
+
* Wraps the stream operation of the language model.
|
|
2912
|
+
*
|
|
2913
|
+
* @param options - Object containing the stream function, parameters, and model.
|
|
2914
|
+
* @param options.doGenerate - The original generate function.
|
|
2915
|
+
* @param options.doStream - The original stream function.
|
|
2916
|
+
* @param options.params - The parameters for the stream call. If the
|
|
2917
|
+
* `transformParams` middleware is used, this will be the transformed parameters.
|
|
2918
|
+
* @param options.model - The language model instance.
|
|
2919
|
+
* @returns A promise that resolves to the result of the stream operation.
|
|
2920
|
+
*/
|
|
2921
|
+
wrapStream?: (options: {
|
|
2922
|
+
doGenerate: () => ReturnType<LanguageModelV2['doGenerate']>;
|
|
2923
|
+
doStream: () => ReturnType<LanguageModelV2['doStream']>;
|
|
2924
|
+
params: LanguageModelV2CallOptions;
|
|
2925
|
+
model: LanguageModelV2;
|
|
2926
|
+
}) => PromiseLike<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
2927
|
+
};
|
|
2928
|
+
|
|
2929
|
+
/**
|
|
2930
|
+
* Middleware for EmbeddingModelV3.
|
|
2931
|
+
* This type defines the structure for middleware that can be used to modify
|
|
2932
|
+
* the behavior of EmbeddingModelV3 operations.
|
|
2933
|
+
*/
|
|
2934
|
+
type EmbeddingModelV3Middleware = {
|
|
2935
|
+
/**
|
|
2936
|
+
* Middleware specification version. Use `v3` for the current version.
|
|
2937
|
+
*/
|
|
2938
|
+
readonly specificationVersion: 'v3';
|
|
2939
|
+
/**
|
|
2940
|
+
* Override the provider name if desired.
|
|
2941
|
+
* @param options.model - The embedding model instance.
|
|
2942
|
+
*/
|
|
2943
|
+
overrideProvider?: (options: {
|
|
2944
|
+
model: EmbeddingModelV3;
|
|
2945
|
+
}) => string;
|
|
2946
|
+
/**
|
|
2947
|
+
* Override the model ID if desired.
|
|
2948
|
+
* @param options.model - The embedding model instance.
|
|
2949
|
+
*/
|
|
2950
|
+
overrideModelId?: (options: {
|
|
2951
|
+
model: EmbeddingModelV3;
|
|
2952
|
+
}) => string;
|
|
2953
|
+
/**
|
|
2954
|
+
* Override the limit of how many embeddings can be generated in a single API call if desired.
|
|
2955
|
+
* @param options.model - The embedding model instance.
|
|
2956
|
+
*/
|
|
2957
|
+
overrideMaxEmbeddingsPerCall?: (options: {
|
|
2958
|
+
model: EmbeddingModelV3;
|
|
2959
|
+
}) => PromiseLike<number | undefined> | number | undefined;
|
|
2960
|
+
/**
|
|
2961
|
+
* Override support for handling multiple embedding calls in parallel, if desired..
|
|
2962
|
+
* @param options.model - The embedding model instance.
|
|
2963
|
+
*/
|
|
2964
|
+
overrideSupportsParallelCalls?: (options: {
|
|
2965
|
+
model: EmbeddingModelV3;
|
|
2966
|
+
}) => PromiseLike<boolean> | boolean;
|
|
2967
|
+
/**
|
|
2968
|
+
* Transforms the parameters before they are passed to the embed model.
|
|
2969
|
+
* @param options - Object containing the type of operation and the parameters.
|
|
2970
|
+
* @param options.params - The original parameters for the embedding model call.
|
|
2971
|
+
* @returns A promise that resolves to the transformed parameters.
|
|
2972
|
+
*/
|
|
2973
|
+
transformParams?: (options: {
|
|
2974
|
+
params: EmbeddingModelV3CallOptions;
|
|
2975
|
+
model: EmbeddingModelV3;
|
|
2976
|
+
}) => PromiseLike<EmbeddingModelV3CallOptions>;
|
|
2977
|
+
/**
|
|
2978
|
+
* Wraps the embed operation of the embedding model.
|
|
2979
|
+
*
|
|
2980
|
+
* @param options - Object containing the embed function, parameters, and model.
|
|
2981
|
+
* @param options.doEmbed - The original embed function.
|
|
2982
|
+
* @param options.params - The parameters for the embed call. If the
|
|
2983
|
+
* `transformParams` middleware is used, this will be the transformed parameters.
|
|
2984
|
+
* @param options.model - The embedding model instance.
|
|
2985
|
+
* @returns A promise that resolves to the result of the generate operation.
|
|
2986
|
+
*/
|
|
2987
|
+
wrapEmbed?: (options: {
|
|
2988
|
+
doEmbed: () => ReturnType<EmbeddingModelV3['doEmbed']>;
|
|
2989
|
+
params: EmbeddingModelV3CallOptions;
|
|
2990
|
+
model: EmbeddingModelV3;
|
|
2991
|
+
}) => Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
|
|
2992
|
+
};
|
|
2993
|
+
|
|
2994
|
+
type RerankingModelV3CallOptions = {
|
|
2995
|
+
/**
|
|
2996
|
+
* Documents to rerank.
|
|
2997
|
+
* Either a list of texts or a list of JSON objects.
|
|
2998
|
+
*/
|
|
2999
|
+
documents: {
|
|
3000
|
+
type: 'text';
|
|
3001
|
+
values: string[];
|
|
3002
|
+
} | {
|
|
3003
|
+
type: 'object';
|
|
3004
|
+
values: JSONObject[];
|
|
3005
|
+
};
|
|
3006
|
+
/**
|
|
3007
|
+
* The query is a string that represents the query to rerank the documents against.
|
|
3008
|
+
*/
|
|
3009
|
+
query: string;
|
|
3010
|
+
/**
|
|
3011
|
+
* Optional limit returned documents to the top n documents.
|
|
3012
|
+
*/
|
|
3013
|
+
topN?: number;
|
|
3014
|
+
/**
|
|
3015
|
+
* Abort signal for cancelling the operation.
|
|
3016
|
+
*/
|
|
3017
|
+
abortSignal?: AbortSignal;
|
|
3018
|
+
/**
|
|
3019
|
+
* Additional provider-specific options. They are passed through
|
|
3020
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
3021
|
+
* functionality that can be fully encapsulated in the provider.
|
|
3022
|
+
*/
|
|
3023
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
3024
|
+
/**
|
|
3025
|
+
* Additional HTTP headers to be sent with the request.
|
|
3026
|
+
* Only applicable for HTTP-based providers.
|
|
3027
|
+
*/
|
|
3028
|
+
headers?: SharedV3Headers;
|
|
3029
|
+
};
|
|
3030
|
+
|
|
3031
|
+
/**
|
|
3032
|
+
* Specification for a reranking model that implements the reranking model interface version 3.
|
|
3033
|
+
*/
|
|
3034
|
+
type RerankingModelV3 = {
|
|
3035
|
+
/**
|
|
3036
|
+
* The reranking model must specify which reranking model interface version it implements.
|
|
3037
|
+
*/
|
|
3038
|
+
readonly specificationVersion: 'v3';
|
|
3039
|
+
/**
|
|
3040
|
+
* Provider ID.
|
|
3041
|
+
*/
|
|
3042
|
+
readonly provider: string;
|
|
3043
|
+
/**
|
|
3044
|
+
* Provider-specific model ID.
|
|
3045
|
+
*/
|
|
3046
|
+
readonly modelId: string;
|
|
3047
|
+
/**
|
|
3048
|
+
* Reranking a list of documents using the query.
|
|
3049
|
+
*/
|
|
3050
|
+
doRerank(options: RerankingModelV3CallOptions): PromiseLike<{
|
|
3051
|
+
/**
|
|
3052
|
+
* Ordered list of reranked documents (via index before reranking).
|
|
3053
|
+
* The documents are sorted by the descending order of relevance scores.
|
|
3054
|
+
*/
|
|
3055
|
+
ranking: Array<{
|
|
3056
|
+
/**
|
|
3057
|
+
* The index of the document in the original list of documents before reranking.
|
|
3058
|
+
*/
|
|
3059
|
+
index: number;
|
|
3060
|
+
/**
|
|
3061
|
+
* The relevance score of the document after reranking.
|
|
3062
|
+
*/
|
|
3063
|
+
relevanceScore: number;
|
|
3064
|
+
}>;
|
|
3065
|
+
/**
|
|
3066
|
+
* Additional provider-specific metadata. They are passed through
|
|
3067
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
3068
|
+
* functionality that can be fully encapsulated in the provider.
|
|
3069
|
+
*/
|
|
3070
|
+
providerMetadata?: SharedV3ProviderMetadata;
|
|
3071
|
+
/**
|
|
3072
|
+
* Warnings for the call, e.g. unsupported settings.
|
|
3073
|
+
*/
|
|
3074
|
+
warnings?: Array<SharedV3Warning>;
|
|
3075
|
+
/**
|
|
3076
|
+
* Optional response information for debugging purposes.
|
|
3077
|
+
*/
|
|
3078
|
+
response?: {
|
|
3079
|
+
/**
|
|
3080
|
+
* ID for the generated response, if the provider sends one.
|
|
3081
|
+
*/
|
|
3082
|
+
id?: string;
|
|
3083
|
+
/**
|
|
3084
|
+
* Timestamp for the start of the generated response, if the provider sends one.
|
|
3085
|
+
*/
|
|
3086
|
+
timestamp?: Date;
|
|
3087
|
+
/**
|
|
3088
|
+
* The ID of the response model that was used to generate the response, if the provider sends one.
|
|
3089
|
+
*/
|
|
3090
|
+
modelId?: string;
|
|
3091
|
+
/**
|
|
3092
|
+
* Response headers.
|
|
3093
|
+
*/
|
|
3094
|
+
headers?: SharedV3Headers;
|
|
3095
|
+
/**
|
|
3096
|
+
* Response body.
|
|
3097
|
+
*/
|
|
3098
|
+
body?: unknown;
|
|
3099
|
+
};
|
|
3100
|
+
}>;
|
|
3101
|
+
};
|
|
3102
|
+
|
|
3103
|
+
type SpeechModelV3ProviderOptions = Record<string, JSONObject>;
|
|
3104
|
+
type SpeechModelV3CallOptions = {
|
|
3105
|
+
/**
|
|
3106
|
+
* Text to convert to speech.
|
|
3107
|
+
*/
|
|
3108
|
+
text: string;
|
|
3109
|
+
/**
|
|
3110
|
+
* The voice to use for speech synthesis.
|
|
3111
|
+
* This is provider-specific and may be a voice ID, name, or other identifier.
|
|
3112
|
+
*/
|
|
3113
|
+
voice?: string;
|
|
3114
|
+
/**
|
|
3115
|
+
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
|
3116
|
+
*/
|
|
3117
|
+
outputFormat?: string;
|
|
3118
|
+
/**
|
|
3119
|
+
* Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
|
3120
|
+
*/
|
|
3121
|
+
instructions?: string;
|
|
3122
|
+
/**
|
|
3123
|
+
* The speed of the speech generation.
|
|
3124
|
+
*/
|
|
3125
|
+
speed?: number;
|
|
3126
|
+
/**
|
|
3127
|
+
* The language for speech generation. This should be an ISO 639-1 language code (e.g. "en", "es", "fr")
|
|
3128
|
+
* or "auto" for automatic language detection. Provider support varies.
|
|
3129
|
+
*/
|
|
3130
|
+
language?: string;
|
|
3131
|
+
/**
|
|
3132
|
+
* Additional provider-specific options that are passed through to the provider
|
|
3133
|
+
* as body parameters.
|
|
3134
|
+
*
|
|
3135
|
+
* The outer record is keyed by the provider name, and the inner
|
|
3136
|
+
* record is keyed by the provider-specific metadata key.
|
|
3137
|
+
* ```ts
|
|
3138
|
+
* {
|
|
3139
|
+
* "openai": {}
|
|
3140
|
+
* }
|
|
3141
|
+
* ```
|
|
3142
|
+
*/
|
|
3143
|
+
providerOptions?: SpeechModelV3ProviderOptions;
|
|
3144
|
+
/**
|
|
3145
|
+
* Abort signal for cancelling the operation.
|
|
3146
|
+
*/
|
|
3147
|
+
abortSignal?: AbortSignal;
|
|
3148
|
+
/**
|
|
3149
|
+
* Additional HTTP headers to be sent with the request.
|
|
3150
|
+
* Only applicable for HTTP-based providers.
|
|
3151
|
+
*/
|
|
3152
|
+
headers?: Record<string, string | undefined>;
|
|
3153
|
+
};
|
|
3154
|
+
|
|
3155
|
+
/**
|
|
3156
|
+
* Speech model specification version 3.
|
|
3157
|
+
*/
|
|
3158
|
+
type SpeechModelV3 = {
|
|
3159
|
+
/**
|
|
3160
|
+
* The speech model must specify which speech model interface
|
|
3161
|
+
* version it implements. This will allow us to evolve the speech
|
|
3162
|
+
* model interface and retain backwards compatibility. The different
|
|
3163
|
+
* implementation versions can be handled as a discriminated union
|
|
3164
|
+
* on our side.
|
|
3165
|
+
*/
|
|
3166
|
+
readonly specificationVersion: 'v3';
|
|
3167
|
+
/**
|
|
3168
|
+
* Name of the provider for logging purposes.
|
|
3169
|
+
*/
|
|
3170
|
+
readonly provider: string;
|
|
3171
|
+
/**
|
|
3172
|
+
* Provider-specific model ID for logging purposes.
|
|
3173
|
+
*/
|
|
3174
|
+
readonly modelId: string;
|
|
3175
|
+
/**
|
|
3176
|
+
* Generates speech audio from text.
|
|
3177
|
+
*/
|
|
3178
|
+
doGenerate(options: SpeechModelV3CallOptions): PromiseLike<{
|
|
3179
|
+
/**
|
|
3180
|
+
* Generated audio as an ArrayBuffer.
|
|
3181
|
+
* The audio should be returned without any unnecessary conversion.
|
|
3182
|
+
* If the API returns base64 encoded strings, the audio should be returned
|
|
3183
|
+
* as base64 encoded strings. If the API returns binary data, the audio
|
|
3184
|
+
* should be returned as binary data.
|
|
3185
|
+
*/
|
|
3186
|
+
audio: string | Uint8Array;
|
|
3187
|
+
/**
|
|
3188
|
+
* Warnings for the call, e.g. unsupported settings.
|
|
3189
|
+
*/
|
|
3190
|
+
warnings: Array<SharedV3Warning>;
|
|
3191
|
+
/**
|
|
3192
|
+
* Optional request information for telemetry and debugging purposes.
|
|
3193
|
+
*/
|
|
3194
|
+
request?: {
|
|
3195
|
+
/**
|
|
3196
|
+
* Response body (available only for providers that use HTTP requests).
|
|
3197
|
+
*/
|
|
3198
|
+
body?: unknown;
|
|
3199
|
+
};
|
|
3200
|
+
/**
|
|
3201
|
+
* Response information for telemetry and debugging purposes.
|
|
3202
|
+
*/
|
|
3203
|
+
response: {
|
|
3204
|
+
/**
|
|
3205
|
+
* Timestamp for the start of the generated response.
|
|
3206
|
+
*/
|
|
3207
|
+
timestamp: Date;
|
|
3208
|
+
/**
|
|
3209
|
+
* The ID of the response model that was used to generate the response.
|
|
3210
|
+
*/
|
|
3211
|
+
modelId: string;
|
|
3212
|
+
/**
|
|
3213
|
+
* Response headers.
|
|
3214
|
+
*/
|
|
3215
|
+
headers?: SharedV2Headers;
|
|
3216
|
+
/**
|
|
3217
|
+
* Response body.
|
|
3218
|
+
*/
|
|
3219
|
+
body?: unknown;
|
|
3220
|
+
};
|
|
3221
|
+
/**
|
|
3222
|
+
* Additional provider-specific metadata. They are passed through
|
|
3223
|
+
* from the provider to the AI SDK and enable provider-specific
|
|
3224
|
+
* results that can be fully encapsulated in the provider.
|
|
3225
|
+
*/
|
|
3226
|
+
providerMetadata?: Record<string, JSONObject>;
|
|
3227
|
+
}>;
|
|
3228
|
+
};
|
|
3229
|
+
|
|
3230
|
+
type TranscriptionModelV3ProviderOptions = Record<string, JSONObject>;
|
|
3231
|
+
type TranscriptionModelV3CallOptions = {
|
|
3232
|
+
/**
|
|
3233
|
+
Audio data to transcribe.
|
|
3234
|
+
Accepts a `Uint8Array` or `string`, where `string` is a base64 encoded audio file.
|
|
3235
|
+
*/
|
|
3236
|
+
audio: Uint8Array | string;
|
|
3237
|
+
/**
|
|
3238
|
+
The IANA media type of the audio data.
|
|
3239
|
+
|
|
3240
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
3241
|
+
*/
|
|
3242
|
+
mediaType: string;
|
|
3243
|
+
/**
|
|
3244
|
+
Additional provider-specific options that are passed through to the provider
|
|
3245
|
+
as body parameters.
|
|
3246
|
+
|
|
3247
|
+
The outer record is keyed by the provider name, and the inner
|
|
3248
|
+
record is keyed by the provider-specific metadata key.
|
|
3249
|
+
```ts
|
|
3250
|
+
{
|
|
3251
|
+
"openai": {
|
|
3252
|
+
"timestampGranularities": ["word"]
|
|
3253
|
+
}
|
|
3254
|
+
}
|
|
3255
|
+
```
|
|
3256
|
+
*/
|
|
3257
|
+
providerOptions?: TranscriptionModelV3ProviderOptions;
|
|
3258
|
+
/**
|
|
3259
|
+
Abort signal for cancelling the operation.
|
|
3260
|
+
*/
|
|
3261
|
+
abortSignal?: AbortSignal;
|
|
3262
|
+
/**
|
|
3263
|
+
Additional HTTP headers to be sent with the request.
|
|
3264
|
+
Only applicable for HTTP-based providers.
|
|
3265
|
+
*/
|
|
3266
|
+
headers?: Record<string, string | undefined>;
|
|
3267
|
+
};
|
|
3268
|
+
|
|
3269
|
+
/**
|
|
3270
|
+
Transcription model specification version 3.
|
|
3271
|
+
*/
|
|
3272
|
+
type TranscriptionModelV3 = {
|
|
3273
|
+
/**
|
|
3274
|
+
The transcription model must specify which transcription model interface
|
|
3275
|
+
version it implements. This will allow us to evolve the transcription
|
|
3276
|
+
model interface and retain backwards compatibility. The different
|
|
3277
|
+
implementation versions can be handled as a discriminated union
|
|
3278
|
+
on our side.
|
|
3279
|
+
*/
|
|
3280
|
+
readonly specificationVersion: 'v3';
|
|
3281
|
+
/**
|
|
3282
|
+
Name of the provider for logging purposes.
|
|
3283
|
+
*/
|
|
3284
|
+
readonly provider: string;
|
|
3285
|
+
/**
|
|
3286
|
+
Provider-specific model ID for logging purposes.
|
|
3287
|
+
*/
|
|
3288
|
+
readonly modelId: string;
|
|
3289
|
+
/**
|
|
3290
|
+
Generates a transcript.
|
|
3291
|
+
*/
|
|
3292
|
+
doGenerate(options: TranscriptionModelV3CallOptions): PromiseLike<{
|
|
3293
|
+
/**
|
|
3294
|
+
* The complete transcribed text from the audio.
|
|
3295
|
+
*/
|
|
3296
|
+
text: string;
|
|
3297
|
+
/**
|
|
3298
|
+
* Array of transcript segments with timing information.
|
|
3299
|
+
* Each segment represents a portion of the transcribed text with start and end times.
|
|
3300
|
+
*/
|
|
3301
|
+
segments: Array<{
|
|
3302
|
+
/**
|
|
3303
|
+
* The text content of this segment.
|
|
3304
|
+
*/
|
|
3305
|
+
text: string;
|
|
3306
|
+
/**
|
|
3307
|
+
* The start time of this segment in seconds.
|
|
3308
|
+
*/
|
|
3309
|
+
startSecond: number;
|
|
3310
|
+
/**
|
|
3311
|
+
* The end time of this segment in seconds.
|
|
3312
|
+
*/
|
|
3313
|
+
endSecond: number;
|
|
3314
|
+
}>;
|
|
3315
|
+
/**
|
|
3316
|
+
* The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
|
|
3317
|
+
* May be undefined if the language couldn't be detected.
|
|
3318
|
+
*/
|
|
3319
|
+
language: string | undefined;
|
|
3320
|
+
/**
|
|
3321
|
+
* The total duration of the audio file in seconds.
|
|
3322
|
+
* May be undefined if the duration couldn't be determined.
|
|
3323
|
+
*/
|
|
3324
|
+
durationInSeconds: number | undefined;
|
|
3325
|
+
/**
|
|
3326
|
+
Warnings for the call, e.g. unsupported settings.
|
|
3327
|
+
*/
|
|
3328
|
+
warnings: Array<SharedV3Warning>;
|
|
3329
|
+
/**
|
|
3330
|
+
Optional request information for telemetry and debugging purposes.
|
|
3331
|
+
*/
|
|
3332
|
+
request?: {
|
|
3333
|
+
/**
|
|
3334
|
+
Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
|
|
3335
|
+
Non-HTTP(s) providers should not set this.
|
|
3336
|
+
*/
|
|
3337
|
+
body?: string;
|
|
3338
|
+
};
|
|
3339
|
+
/**
|
|
3340
|
+
Response information for telemetry and debugging purposes.
|
|
3341
|
+
*/
|
|
3342
|
+
response: {
|
|
3343
|
+
/**
|
|
3344
|
+
Timestamp for the start of the generated response.
|
|
3345
|
+
*/
|
|
3346
|
+
timestamp: Date;
|
|
3347
|
+
/**
|
|
3348
|
+
The ID of the response model that was used to generate the response.
|
|
3349
|
+
*/
|
|
3350
|
+
modelId: string;
|
|
3351
|
+
/**
|
|
3352
|
+
Response headers.
|
|
3353
|
+
*/
|
|
3354
|
+
headers?: SharedV3Headers;
|
|
3355
|
+
/**
|
|
3356
|
+
Response body.
|
|
3357
|
+
*/
|
|
3358
|
+
body?: unknown;
|
|
3359
|
+
};
|
|
3360
|
+
/**
|
|
3361
|
+
Additional provider-specific metadata. They are passed through
|
|
3362
|
+
from the provider to the AI SDK and enable provider-specific
|
|
3363
|
+
results that can be fully encapsulated in the provider.
|
|
3364
|
+
*/
|
|
3365
|
+
providerMetadata?: Record<string, JSONObject>;
|
|
3366
|
+
}>;
|
|
3367
|
+
};
|
|
3368
|
+
|
|
3369
|
+
/**
|
|
3370
|
+
* Provider for language, text embedding, and image generation models.
|
|
3371
|
+
*/
|
|
3372
|
+
interface ProviderV3 {
|
|
3373
|
+
readonly specificationVersion: 'v3';
|
|
3374
|
+
/**
|
|
3375
|
+
Returns the language model with the given id.
|
|
3376
|
+
The model id is then passed to the provider function to get the model.
|
|
3377
|
+
|
|
3378
|
+
@param {string} modelId - The id of the model to return.
|
|
3379
|
+
|
|
3380
|
+
@returns {LanguageModel} The language model associated with the id
|
|
3381
|
+
|
|
3382
|
+
@throws {NoSuchModelError} If no such model exists.
|
|
3383
|
+
*/
|
|
3384
|
+
languageModel(modelId: string): LanguageModelV3;
|
|
3385
|
+
/**
|
|
3386
|
+
Returns the text embedding model with the given id.
|
|
3387
|
+
The model id is then passed to the provider function to get the model.
|
|
3388
|
+
|
|
3389
|
+
@param {string} modelId - The id of the model to return.
|
|
3390
|
+
|
|
3391
|
+
@returns {LanguageModel} The language model associated with the id
|
|
3392
|
+
|
|
3393
|
+
@throws {NoSuchModelError} If no such model exists.
|
|
3394
|
+
*/
|
|
3395
|
+
embeddingModel(modelId: string): EmbeddingModelV3;
|
|
3396
|
+
/**
|
|
3397
|
+
Returns the text embedding model with the given id.
|
|
3398
|
+
The model id is then passed to the provider function to get the model.
|
|
3399
|
+
|
|
3400
|
+
@param {string} modelId - The id of the model to return.
|
|
3401
|
+
|
|
3402
|
+
@returns {EmbeddingModel} The embedding model associated with the id
|
|
3403
|
+
|
|
3404
|
+
@throws {NoSuchModelError} If no such model exists.
|
|
3405
|
+
|
|
3406
|
+
@deprecated Use `embeddingModel` instead.
|
|
3407
|
+
*/
|
|
3408
|
+
textEmbeddingModel?(modelId: string): EmbeddingModelV3;
|
|
3409
|
+
/**
|
|
3410
|
+
Returns the image model with the given id.
|
|
3411
|
+
The model id is then passed to the provider function to get the model.
|
|
3412
|
+
|
|
3413
|
+
@param {string} modelId - The id of the model to return.
|
|
3414
|
+
|
|
3415
|
+
@returns {ImageModel} The image model associated with the id
|
|
3416
|
+
*/
|
|
3417
|
+
imageModel(modelId: string): ImageModelV3;
|
|
3418
|
+
/**
|
|
3419
|
+
Returns the transcription model with the given id.
|
|
3420
|
+
The model id is then passed to the provider function to get the model.
|
|
3421
|
+
|
|
3422
|
+
@param {string} modelId - The id of the model to return.
|
|
3423
|
+
|
|
3424
|
+
@returns {TranscriptionModel} The transcription model associated with the id
|
|
3425
|
+
*/
|
|
3426
|
+
transcriptionModel?(modelId: string): TranscriptionModelV3;
|
|
3427
|
+
/**
|
|
3428
|
+
Returns the speech model with the given id.
|
|
3429
|
+
The model id is then passed to the provider function to get the model.
|
|
3430
|
+
|
|
3431
|
+
@param {string} modelId - The id of the model to return.
|
|
3432
|
+
|
|
3433
|
+
@returns {SpeechModel} The speech model associated with the id
|
|
3434
|
+
*/
|
|
3435
|
+
speechModel?(modelId: string): SpeechModelV3;
|
|
3436
|
+
/**
|
|
3437
|
+
Returns the reranking model with the given id.
|
|
3438
|
+
The model id is then passed to the provider function to get the model.
|
|
3439
|
+
|
|
3440
|
+
@param {string} modelId - The id of the model to return.
|
|
3441
|
+
|
|
3442
|
+
@returns {RerankingModel} The reranking model associated with the id
|
|
3443
|
+
|
|
3444
|
+
@throws {NoSuchModelError} If no such model exists.
|
|
3445
|
+
*/
|
|
3446
|
+
rerankingModel?(modelId: string): RerankingModelV3;
|
|
3447
|
+
}
|
|
3448
|
+
|
|
3449
|
+
type SpeechModelV2ProviderOptions = Record<string, Record<string, JSONValue>>;
|
|
3450
|
+
type SpeechModelV2CallOptions = {
|
|
3451
|
+
/**
|
|
3452
|
+
* Text to convert to speech.
|
|
3453
|
+
*/
|
|
3454
|
+
text: string;
|
|
3455
|
+
/**
|
|
3456
|
+
* The voice to use for speech synthesis.
|
|
3457
|
+
* This is provider-specific and may be a voice ID, name, or other identifier.
|
|
3458
|
+
*/
|
|
3459
|
+
voice?: string;
|
|
3460
|
+
/**
|
|
3461
|
+
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
|
3462
|
+
*/
|
|
3463
|
+
outputFormat?: string;
|
|
3464
|
+
/**
|
|
3465
|
+
* Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
|
3466
|
+
*/
|
|
3467
|
+
instructions?: string;
|
|
3468
|
+
/**
|
|
3469
|
+
* The speed of the speech generation.
|
|
3470
|
+
*/
|
|
3471
|
+
speed?: number;
|
|
3472
|
+
/**
|
|
3473
|
+
* The language for speech generation. This should be an ISO 639-1 language code (e.g. "en", "es", "fr")
|
|
3474
|
+
* or "auto" for automatic language detection. Provider support varies.
|
|
3475
|
+
*/
|
|
3476
|
+
language?: string;
|
|
3477
|
+
/**
|
|
3478
|
+
* Additional provider-specific options that are passed through to the provider
|
|
3479
|
+
* as body parameters.
|
|
3480
|
+
*
|
|
3481
|
+
* The outer record is keyed by the provider name, and the inner
|
|
3482
|
+
* record is keyed by the provider-specific metadata key.
|
|
3483
|
+
* ```ts
|
|
3484
|
+
* {
|
|
3485
|
+
* "openai": {}
|
|
3486
|
+
* }
|
|
3487
|
+
* ```
|
|
3488
|
+
*/
|
|
3489
|
+
providerOptions?: SpeechModelV2ProviderOptions;
|
|
3490
|
+
/**
|
|
3491
|
+
* Abort signal for cancelling the operation.
|
|
3492
|
+
*/
|
|
3493
|
+
abortSignal?: AbortSignal;
|
|
3494
|
+
/**
|
|
3495
|
+
* Additional HTTP headers to be sent with the request.
|
|
3496
|
+
* Only applicable for HTTP-based providers.
|
|
3497
|
+
*/
|
|
3498
|
+
headers?: Record<string, string | undefined>;
|
|
3499
|
+
};
|
|
3500
|
+
|
|
3501
|
+
/**
|
|
3502
|
+
* Warning from the model provider for this call. The call will proceed, but e.g.
|
|
3503
|
+
* some settings might not be supported, which can lead to suboptimal results.
|
|
3504
|
+
*/
|
|
3505
|
+
type SpeechModelV2CallWarning = {
|
|
3506
|
+
type: 'unsupported-setting';
|
|
3507
|
+
setting: keyof SpeechModelV2CallOptions;
|
|
3508
|
+
details?: string;
|
|
3509
|
+
} | {
|
|
3510
|
+
type: 'other';
|
|
3511
|
+
message: string;
|
|
3512
|
+
};
|
|
3513
|
+
|
|
3514
|
+
/**
|
|
3515
|
+
* Speech model specification version 2.
|
|
3516
|
+
*/
|
|
3517
|
+
type SpeechModelV2 = {
|
|
3518
|
+
/**
|
|
3519
|
+
* The speech model must specify which speech model interface
|
|
3520
|
+
* version it implements. This will allow us to evolve the speech
|
|
3521
|
+
* model interface and retain backwards compatibility. The different
|
|
3522
|
+
* implementation versions can be handled as a discriminated union
|
|
3523
|
+
* on our side.
|
|
3524
|
+
*/
|
|
3525
|
+
readonly specificationVersion: 'v2';
|
|
3526
|
+
/**
|
|
3527
|
+
* Name of the provider for logging purposes.
|
|
3528
|
+
*/
|
|
3529
|
+
readonly provider: string;
|
|
3530
|
+
/**
|
|
3531
|
+
* Provider-specific model ID for logging purposes.
|
|
3532
|
+
*/
|
|
3533
|
+
readonly modelId: string;
|
|
3534
|
+
/**
|
|
3535
|
+
* Generates speech audio from text.
|
|
3536
|
+
*/
|
|
3537
|
+
doGenerate(options: SpeechModelV2CallOptions): PromiseLike<{
|
|
3538
|
+
/**
|
|
3539
|
+
* Generated audio as an ArrayBuffer.
|
|
3540
|
+
* The audio should be returned without any unnecessary conversion.
|
|
3541
|
+
* If the API returns base64 encoded strings, the audio should be returned
|
|
3542
|
+
* as base64 encoded strings. If the API returns binary data, the audio
|
|
3543
|
+
* should be returned as binary data.
|
|
3544
|
+
*/
|
|
3545
|
+
audio: string | Uint8Array;
|
|
3546
|
+
/**
|
|
3547
|
+
* Warnings for the call, e.g. unsupported settings.
|
|
3548
|
+
*/
|
|
3549
|
+
warnings: Array<SpeechModelV2CallWarning>;
|
|
3550
|
+
/**
|
|
3551
|
+
* Optional request information for telemetry and debugging purposes.
|
|
3552
|
+
*/
|
|
3553
|
+
request?: {
|
|
3554
|
+
/**
|
|
3555
|
+
* Response body (available only for providers that use HTTP requests).
|
|
3556
|
+
*/
|
|
3557
|
+
body?: unknown;
|
|
3558
|
+
};
|
|
3559
|
+
/**
|
|
3560
|
+
* Response information for telemetry and debugging purposes.
|
|
3561
|
+
*/
|
|
3562
|
+
response: {
|
|
3563
|
+
/**
|
|
3564
|
+
* Timestamp for the start of the generated response.
|
|
3565
|
+
*/
|
|
3566
|
+
timestamp: Date;
|
|
3567
|
+
/**
|
|
3568
|
+
* The ID of the response model that was used to generate the response.
|
|
3569
|
+
*/
|
|
3570
|
+
modelId: string;
|
|
3571
|
+
/**
|
|
3572
|
+
* Response headers.
|
|
3573
|
+
*/
|
|
3574
|
+
headers?: SharedV2Headers;
|
|
3575
|
+
/**
|
|
3576
|
+
* Response body.
|
|
3577
|
+
*/
|
|
3578
|
+
body?: unknown;
|
|
3579
|
+
};
|
|
3580
|
+
/**
|
|
3581
|
+
* Additional provider-specific metadata. They are passed through
|
|
3582
|
+
* from the provider to the AI SDK and enable provider-specific
|
|
3583
|
+
* results that can be fully encapsulated in the provider.
|
|
3584
|
+
*/
|
|
3585
|
+
providerMetadata?: Record<string, Record<string, JSONValue>>;
|
|
3586
|
+
}>;
|
|
3587
|
+
};
|
|
3588
|
+
|
|
3589
|
+
type TranscriptionModelV2ProviderOptions = Record<string, Record<string, JSONValue>>;
|
|
3590
|
+
type TranscriptionModelV2CallOptions = {
|
|
3591
|
+
/**
|
|
3592
|
+
Audio data to transcribe.
|
|
3593
|
+
Accepts a `Uint8Array` or `string`, where `string` is a base64 encoded audio file.
|
|
3594
|
+
*/
|
|
3595
|
+
audio: Uint8Array | string;
|
|
3596
|
+
/**
|
|
3597
|
+
The IANA media type of the audio data.
|
|
3598
|
+
|
|
3599
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
3600
|
+
*/
|
|
3601
|
+
mediaType: string;
|
|
3602
|
+
/**
|
|
3603
|
+
Additional provider-specific options that are passed through to the provider
|
|
3604
|
+
as body parameters.
|
|
3605
|
+
|
|
3606
|
+
The outer record is keyed by the provider name, and the inner
|
|
3607
|
+
record is keyed by the provider-specific metadata key.
|
|
3608
|
+
```ts
|
|
3609
|
+
{
|
|
3610
|
+
"openai": {
|
|
3611
|
+
"timestampGranularities": ["word"]
|
|
3612
|
+
}
|
|
3613
|
+
}
|
|
3614
|
+
```
|
|
3615
|
+
*/
|
|
3616
|
+
providerOptions?: TranscriptionModelV2ProviderOptions;
|
|
3617
|
+
/**
|
|
3618
|
+
Abort signal for cancelling the operation.
|
|
3619
|
+
*/
|
|
3620
|
+
abortSignal?: AbortSignal;
|
|
3621
|
+
/**
|
|
3622
|
+
Additional HTTP headers to be sent with the request.
|
|
3623
|
+
Only applicable for HTTP-based providers.
|
|
3624
|
+
*/
|
|
3625
|
+
headers?: Record<string, string | undefined>;
|
|
3626
|
+
};
|
|
3627
|
+
|
|
3628
|
+
/**
|
|
3629
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
3630
|
+
some settings might not be supported, which can lead to suboptimal results.
|
|
3631
|
+
*/
|
|
3632
|
+
type TranscriptionModelV2CallWarning = {
|
|
3633
|
+
type: 'unsupported-setting';
|
|
3634
|
+
setting: keyof TranscriptionModelV2CallOptions;
|
|
3635
|
+
details?: string;
|
|
3636
|
+
} | {
|
|
3637
|
+
type: 'other';
|
|
3638
|
+
message: string;
|
|
3639
|
+
};
|
|
3640
|
+
|
|
3641
|
+
/**
|
|
3642
|
+
Transcription model specification version 2.
|
|
3643
|
+
*/
|
|
3644
|
+
type TranscriptionModelV2 = {
|
|
3645
|
+
/**
|
|
3646
|
+
The transcription model must specify which transcription model interface
|
|
3647
|
+
version it implements. This will allow us to evolve the transcription
|
|
3648
|
+
model interface and retain backwards compatibility. The different
|
|
3649
|
+
implementation versions can be handled as a discriminated union
|
|
3650
|
+
on our side.
|
|
3651
|
+
*/
|
|
3652
|
+
readonly specificationVersion: 'v2';
|
|
3653
|
+
/**
|
|
3654
|
+
Name of the provider for logging purposes.
|
|
3655
|
+
*/
|
|
3656
|
+
readonly provider: string;
|
|
3657
|
+
/**
|
|
3658
|
+
Provider-specific model ID for logging purposes.
|
|
3659
|
+
*/
|
|
3660
|
+
readonly modelId: string;
|
|
3661
|
+
/**
|
|
3662
|
+
Generates a transcript.
|
|
3663
|
+
*/
|
|
3664
|
+
doGenerate(options: TranscriptionModelV2CallOptions): PromiseLike<{
|
|
3665
|
+
/**
|
|
3666
|
+
* The complete transcribed text from the audio.
|
|
3667
|
+
*/
|
|
3668
|
+
text: string;
|
|
3669
|
+
/**
|
|
3670
|
+
* Array of transcript segments with timing information.
|
|
3671
|
+
* Each segment represents a portion of the transcribed text with start and end times.
|
|
3672
|
+
*/
|
|
3673
|
+
segments: Array<{
|
|
3674
|
+
/**
|
|
3675
|
+
* The text content of this segment.
|
|
3676
|
+
*/
|
|
3677
|
+
text: string;
|
|
3678
|
+
/**
|
|
3679
|
+
* The start time of this segment in seconds.
|
|
3680
|
+
*/
|
|
3681
|
+
startSecond: number;
|
|
3682
|
+
/**
|
|
3683
|
+
* The end time of this segment in seconds.
|
|
3684
|
+
*/
|
|
3685
|
+
endSecond: number;
|
|
3686
|
+
}>;
|
|
3687
|
+
/**
|
|
3688
|
+
* The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
|
|
3689
|
+
* May be undefined if the language couldn't be detected.
|
|
3690
|
+
*/
|
|
3691
|
+
language: string | undefined;
|
|
3692
|
+
/**
|
|
3693
|
+
* The total duration of the audio file in seconds.
|
|
3694
|
+
* May be undefined if the duration couldn't be determined.
|
|
3695
|
+
*/
|
|
3696
|
+
durationInSeconds: number | undefined;
|
|
3697
|
+
/**
|
|
3698
|
+
Warnings for the call, e.g. unsupported settings.
|
|
3699
|
+
*/
|
|
3700
|
+
warnings: Array<TranscriptionModelV2CallWarning>;
|
|
3701
|
+
/**
|
|
3702
|
+
Optional request information for telemetry and debugging purposes.
|
|
3703
|
+
*/
|
|
3704
|
+
request?: {
|
|
3705
|
+
/**
|
|
3706
|
+
Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
|
|
3707
|
+
Non-HTTP(s) providers should not set this.
|
|
3708
|
+
*/
|
|
3709
|
+
body?: string;
|
|
3710
|
+
};
|
|
3711
|
+
/**
|
|
3712
|
+
Response information for telemetry and debugging purposes.
|
|
3713
|
+
*/
|
|
3714
|
+
response: {
|
|
3715
|
+
/**
|
|
3716
|
+
Timestamp for the start of the generated response.
|
|
3717
|
+
*/
|
|
3718
|
+
timestamp: Date;
|
|
3719
|
+
/**
|
|
3720
|
+
The ID of the response model that was used to generate the response.
|
|
3721
|
+
*/
|
|
3722
|
+
modelId: string;
|
|
3723
|
+
/**
|
|
3724
|
+
Response headers.
|
|
3725
|
+
*/
|
|
3726
|
+
headers?: SharedV2Headers;
|
|
3727
|
+
/**
|
|
3728
|
+
Response body.
|
|
3729
|
+
*/
|
|
3730
|
+
body?: unknown;
|
|
3731
|
+
};
|
|
3732
|
+
/**
|
|
3733
|
+
Additional provider-specific metadata. They are passed through
|
|
3734
|
+
from the provider to the AI SDK and enable provider-specific
|
|
3735
|
+
results that can be fully encapsulated in the provider.
|
|
3736
|
+
*/
|
|
3737
|
+
providerMetadata?: Record<string, Record<string, JSONValue>>;
|
|
3738
|
+
}>;
|
|
3739
|
+
};
|
|
3740
|
+
|
|
3741
|
+
/**
|
|
3742
|
+
* Provider for language, text embedding, and image generation models.
|
|
3743
|
+
*/
|
|
3744
|
+
interface ProviderV2 {
|
|
3745
|
+
/**
|
|
3746
|
+
Returns the language model with the given id.
|
|
3747
|
+
The model id is then passed to the provider function to get the model.
|
|
3748
|
+
|
|
3749
|
+
@param {string} modelId - The id of the model to return.
|
|
3750
|
+
|
|
3751
|
+
@returns {LanguageModel} The language model associated with the id
|
|
3752
|
+
|
|
3753
|
+
@throws {NoSuchModelError} If no such model exists.
|
|
3754
|
+
*/
|
|
3755
|
+
languageModel(modelId: string): LanguageModelV2;
|
|
3756
|
+
/**
|
|
3757
|
+
Returns the text embedding model with the given id.
|
|
3758
|
+
The model id is then passed to the provider function to get the model.
|
|
3759
|
+
|
|
3760
|
+
@param {string} modelId - The id of the model to return.
|
|
3761
|
+
|
|
3762
|
+
@returns {LanguageModel} The language model associated with the id
|
|
3763
|
+
|
|
3764
|
+
@throws {NoSuchModelError} If no such model exists.
|
|
3765
|
+
*/
|
|
3766
|
+
textEmbeddingModel(modelId: string): EmbeddingModelV2<string>;
|
|
3767
|
+
/**
|
|
3768
|
+
Returns the image model with the given id.
|
|
3769
|
+
The model id is then passed to the provider function to get the model.
|
|
3770
|
+
|
|
3771
|
+
@param {string} modelId - The id of the model to return.
|
|
3772
|
+
|
|
3773
|
+
@returns {ImageModel} The image model associated with the id
|
|
3774
|
+
*/
|
|
3775
|
+
imageModel(modelId: string): ImageModelV2;
|
|
3776
|
+
/**
|
|
3777
|
+
Returns the transcription model with the given id.
|
|
3778
|
+
The model id is then passed to the provider function to get the model.
|
|
3779
|
+
|
|
3780
|
+
@param {string} modelId - The id of the model to return.
|
|
3781
|
+
|
|
3782
|
+
@returns {TranscriptionModel} The transcription model associated with the id
|
|
3783
|
+
*/
|
|
3784
|
+
transcriptionModel?(modelId: string): TranscriptionModelV2;
|
|
3785
|
+
/**
|
|
3786
|
+
Returns the speech model with the given id.
|
|
3787
|
+
The model id is then passed to the provider function to get the model.
|
|
3788
|
+
|
|
3789
|
+
@param {string} modelId - The id of the model to return.
|
|
3790
|
+
|
|
3791
|
+
@returns {SpeechModel} The speech model associated with the id
|
|
3792
|
+
*/
|
|
3793
|
+
speechModel?(modelId: string): SpeechModelV2;
|
|
3794
|
+
}
|
|
3795
|
+
|
|
3796
|
+
export { AISDKError, APICallError, type EmbeddingModelV2, type EmbeddingModelV2Embedding, type EmbeddingModelV3, type EmbeddingModelV3CallOptions, type EmbeddingModelV3Embedding, type EmbeddingModelV3Middleware, type EmbeddingModelV3Result, EmptyResponseBodyError, type ImageModelV2, type ImageModelV2CallOptions, type ImageModelV2CallWarning, type ImageModelV2ProviderMetadata, type ImageModelV3, type ImageModelV3CallOptions, type ImageModelV3File, type ImageModelV3Middleware, type ImageModelV3ProviderMetadata, type ImageModelV3Usage, InvalidArgumentError, InvalidPromptError, InvalidResponseDataError, type JSONArray, type JSONObject, JSONParseError, type JSONValue, type LanguageModelV2, type LanguageModelV2CallOptions, type LanguageModelV2CallWarning, type LanguageModelV2Content, type LanguageModelV2DataContent, type LanguageModelV2File, type LanguageModelV2FilePart, type LanguageModelV2FinishReason, type LanguageModelV2FunctionTool, type LanguageModelV2Message, type LanguageModelV2Middleware, type LanguageModelV2Prompt, type LanguageModelV2ProviderDefinedTool, type LanguageModelV2Reasoning, type LanguageModelV2ReasoningPart, type LanguageModelV2ResponseMetadata, type LanguageModelV2Source, type LanguageModelV2StreamPart, type LanguageModelV2Text, type LanguageModelV2TextPart, type LanguageModelV2ToolCall, type LanguageModelV2ToolCallPart, type LanguageModelV2ToolChoice, type LanguageModelV2ToolResultOutput, type LanguageModelV2ToolResultPart, type LanguageModelV2Usage, type LanguageModelV3, type LanguageModelV3CallOptions, type LanguageModelV3Content, type LanguageModelV3DataContent, type LanguageModelV3File, type LanguageModelV3FilePart, type LanguageModelV3FinishReason, type LanguageModelV3FunctionTool, type LanguageModelV3GenerateResult, type LanguageModelV3Message, type LanguageModelV3Middleware, type LanguageModelV3Prompt, type LanguageModelV3ProviderTool, type LanguageModelV3Reasoning, type LanguageModelV3ReasoningPart, type LanguageModelV3ResponseMetadata, type LanguageModelV3Source, type LanguageModelV3StreamPart, type LanguageModelV3StreamResult, type LanguageModelV3Text, type LanguageModelV3TextPart, type LanguageModelV3ToolApprovalRequest, type LanguageModelV3ToolApprovalResponsePart, type LanguageModelV3ToolCall, type LanguageModelV3ToolCallPart, type LanguageModelV3ToolChoice, type LanguageModelV3ToolResult, type LanguageModelV3ToolResultOutput, type LanguageModelV3ToolResultPart, type LanguageModelV3Usage, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, type ProviderV2, type ProviderV3, type RerankingModelV3, type RerankingModelV3CallOptions, type SharedV2Headers, type SharedV2ProviderMetadata, type SharedV2ProviderOptions, type SharedV3Headers, type SharedV3ProviderMetadata, type SharedV3ProviderOptions, type SharedV3Warning, type SpeechModelV2, type SpeechModelV2CallOptions, type SpeechModelV2CallWarning, type SpeechModelV3, type SpeechModelV3CallOptions, TooManyEmbeddingValuesForCallError, type TranscriptionModelV2, type TranscriptionModelV2CallOptions, type TranscriptionModelV2CallWarning, type TranscriptionModelV3, type TranscriptionModelV3CallOptions, TypeValidationError, UnsupportedFunctionalityError, getErrorMessage, isJSONArray, isJSONObject, isJSONValue };
|