ai 3.0.16 → 3.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/google/dist/index.d.mts +365 -0
- package/google/dist/index.d.ts +365 -0
- package/google/dist/index.js +950 -0
- package/google/dist/index.js.map +1 -0
- package/google/dist/index.mjs +914 -0
- package/google/dist/index.mjs.map +1 -0
- package/mistral/dist/index.js +4 -4
- package/mistral/dist/index.js.map +1 -1
- package/mistral/dist/index.mjs +4 -4
- package/mistral/dist/index.mjs.map +1 -1
- package/openai/dist/index.js +1 -2
- package/openai/dist/index.js.map +1 -1
- package/openai/dist/index.mjs +1 -2
- package/openai/dist/index.mjs.map +1 -1
- package/package.json +10 -2
- package/spec/dist/index.js +1 -2
- package/spec/dist/index.js.map +1 -1
- package/spec/dist/index.mjs +1 -2
- package/spec/dist/index.mjs.map +1 -1
@@ -0,0 +1,365 @@
|
|
1
|
+
type JsonSchema = Record<string, unknown>;
|
2
|
+
|
3
|
+
type LanguageModelV1CallSettings = {
|
4
|
+
/**
|
5
|
+
* Maximum number of tokens to generate.
|
6
|
+
*/
|
7
|
+
maxTokens?: number;
|
8
|
+
/**
|
9
|
+
* Temperature setting. This is a number between 0 (almost no randomness) and
|
10
|
+
* 1 (very random).
|
11
|
+
*
|
12
|
+
* Different LLM providers have different temperature
|
13
|
+
* scales, so they'd need to map it (without mapping, the same temperature has
|
14
|
+
* different effects on different models). The provider can also chose to map
|
15
|
+
* this to topP, potentially even using a custom setting on their model.
|
16
|
+
*
|
17
|
+
* Note: This is an example of a setting that requires a clear specification of
|
18
|
+
* the semantics.
|
19
|
+
*/
|
20
|
+
temperature?: number;
|
21
|
+
/**
|
22
|
+
* Nucleus sampling. This is a number between 0 and 1.
|
23
|
+
*
|
24
|
+
* E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
25
|
+
* are considered.
|
26
|
+
*
|
27
|
+
* It is recommended to set either `temperature` or `topP`, but not both.
|
28
|
+
*/
|
29
|
+
topP?: number;
|
30
|
+
/**
|
31
|
+
* Presence penalty setting. It affects the likelihood of the model to
|
32
|
+
* repeat information that is already in the prompt.
|
33
|
+
*
|
34
|
+
* The presence penalty is a number between -1 (increase repetition)
|
35
|
+
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
36
|
+
*/
|
37
|
+
presencePenalty?: number;
|
38
|
+
/**
|
39
|
+
* Frequency penalty setting. It affects the likelihood of the model
|
40
|
+
* to repeatedly use the same words or phrases.
|
41
|
+
*
|
42
|
+
* The frequency penalty is a number between -1 (increase repetition)
|
43
|
+
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
44
|
+
*/
|
45
|
+
frequencyPenalty?: number;
|
46
|
+
/**
|
47
|
+
* The seed (integer) to use for random sampling. If set and supported
|
48
|
+
* by the model, calls will generate deterministic results.
|
49
|
+
*/
|
50
|
+
seed?: number;
|
51
|
+
/**
|
52
|
+
* Abort signal for cancelling the operation.
|
53
|
+
*/
|
54
|
+
abortSignal?: AbortSignal;
|
55
|
+
};
|
56
|
+
|
57
|
+
/**
|
58
|
+
* A tool has a name, a description, and a set of parameters.
|
59
|
+
*
|
60
|
+
* Note: this is **not** the user-facing tool definition. The AI SDK methods will
|
61
|
+
* map the user-facing tool definitions to this format.
|
62
|
+
*/
|
63
|
+
type LanguageModelV1FunctionTool = {
|
64
|
+
/**
|
65
|
+
* The type of the tool. Only functions for now, but this gives us room to
|
66
|
+
* add more specific tool types in the future and use a discriminated union.
|
67
|
+
*/
|
68
|
+
type: 'function';
|
69
|
+
/**
|
70
|
+
* The name of the tool. Unique within this model call.
|
71
|
+
*/
|
72
|
+
name: string;
|
73
|
+
description?: string;
|
74
|
+
parameters: JsonSchema;
|
75
|
+
};
|
76
|
+
|
77
|
+
/**
|
78
|
+
* A prompt is a list of messages.
|
79
|
+
*
|
80
|
+
* Note: Not all models and prompt formats support multi-modal inputs and
|
81
|
+
* tool calls. The validation happens at runtime.
|
82
|
+
*
|
83
|
+
* Note: This is not a user-facing prompt. The AI SDK methods will map the
|
84
|
+
* user-facing prompt types such as chat or instruction prompts to this format.
|
85
|
+
*/
|
86
|
+
type LanguageModelV1Prompt = Array<LanguageModelV1Message>;
|
87
|
+
type LanguageModelV1Message = {
|
88
|
+
role: 'system';
|
89
|
+
content: string;
|
90
|
+
} | {
|
91
|
+
role: 'user';
|
92
|
+
content: Array<LanguageModelV1TextPart | LanguageModelV1ImagePart>;
|
93
|
+
} | {
|
94
|
+
role: 'assistant';
|
95
|
+
content: Array<LanguageModelV1TextPart | LanguageModelV1ToolCallPart>;
|
96
|
+
} | {
|
97
|
+
role: 'tool';
|
98
|
+
content: Array<LanguageModelV1ToolResultPart>;
|
99
|
+
};
|
100
|
+
interface LanguageModelV1TextPart {
|
101
|
+
type: 'text';
|
102
|
+
/**
|
103
|
+
* The text content.
|
104
|
+
*/
|
105
|
+
text: string;
|
106
|
+
}
|
107
|
+
interface LanguageModelV1ImagePart {
|
108
|
+
type: 'image';
|
109
|
+
/**
|
110
|
+
* Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL.
|
111
|
+
*/
|
112
|
+
image: Uint8Array | URL;
|
113
|
+
/**
|
114
|
+
* Optional mime type of the image.
|
115
|
+
*/
|
116
|
+
mimeType?: string;
|
117
|
+
}
|
118
|
+
interface LanguageModelV1ToolCallPart {
|
119
|
+
type: 'tool-call';
|
120
|
+
toolCallId: string;
|
121
|
+
toolName: string;
|
122
|
+
args: unknown;
|
123
|
+
}
|
124
|
+
interface LanguageModelV1ToolResultPart {
|
125
|
+
type: 'tool-result';
|
126
|
+
toolCallId: string;
|
127
|
+
toolName: string;
|
128
|
+
result: unknown;
|
129
|
+
}
|
130
|
+
|
131
|
+
type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
|
132
|
+
/**
|
133
|
+
* Whether the user provided the input as messages or as
|
134
|
+
* a prompt. This can help guide non-chat models in the
|
135
|
+
* expansion, bc different expansions can be needed for
|
136
|
+
* chat/non-chat use cases.
|
137
|
+
*/
|
138
|
+
inputFormat: 'messages' | 'prompt';
|
139
|
+
/**
|
140
|
+
* The mode affects the behavior of the language model. It is required to
|
141
|
+
* support provider-independent streaming and generation of structured objects.
|
142
|
+
* The model can take this information and e.g. configure json mode, the correct
|
143
|
+
* low level grammar, etc. It can also be used to optimize the efficiency of the
|
144
|
+
* streaming, e.g. tool-delta stream parts are only needed in the
|
145
|
+
* object-tool mode.
|
146
|
+
*/
|
147
|
+
mode: {
|
148
|
+
type: 'regular';
|
149
|
+
tools?: Array<LanguageModelV1FunctionTool>;
|
150
|
+
} | {
|
151
|
+
type: 'object-json';
|
152
|
+
} | {
|
153
|
+
type: 'object-grammar';
|
154
|
+
schema: JsonSchema;
|
155
|
+
} | {
|
156
|
+
type: 'object-tool';
|
157
|
+
tool: LanguageModelV1FunctionTool;
|
158
|
+
};
|
159
|
+
/**
|
160
|
+
* A language mode prompt is a standardized prompt type.
|
161
|
+
*
|
162
|
+
* Note: This is **not** the user-facing prompt. The AI SDK methods will map the
|
163
|
+
* user-facing prompt types such as chat or instruction prompts to this format.
|
164
|
+
* That approach allows us to evolve the user facing prompts without breaking
|
165
|
+
* the language model interface.
|
166
|
+
*/
|
167
|
+
prompt: LanguageModelV1Prompt;
|
168
|
+
};
|
169
|
+
|
170
|
+
/**
|
171
|
+
* Warning from the model provider for this call. The call will proceed, but e.g.
|
172
|
+
* some settings might not be supported, which can lead to suboptimal results.
|
173
|
+
*/
|
174
|
+
type LanguageModelV1CallWarning = {
|
175
|
+
type: 'unsupported-setting';
|
176
|
+
setting: keyof LanguageModelV1CallSettings;
|
177
|
+
} | {
|
178
|
+
type: 'other';
|
179
|
+
message: string;
|
180
|
+
};
|
181
|
+
|
182
|
+
type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
|
183
|
+
|
184
|
+
type LanguageModelV1FunctionToolCall = {
|
185
|
+
toolCallType: 'function';
|
186
|
+
toolCallId: string;
|
187
|
+
toolName: string;
|
188
|
+
/**
|
189
|
+
* Stringified JSON object with the tool call arguments. Must match the
|
190
|
+
* parameters schema of the tool.
|
191
|
+
*/
|
192
|
+
args: string;
|
193
|
+
};
|
194
|
+
|
195
|
+
/**
|
196
|
+
* Experimental: Specification for a language model that implements the language model
|
197
|
+
* interface version 1.
|
198
|
+
*/
|
199
|
+
type LanguageModelV1 = {
|
200
|
+
/**
|
201
|
+
* The language model must specify which language model interface
|
202
|
+
* version it implements. This will allow us to evolve the language
|
203
|
+
* model interface and retain backwards compatibility. The different
|
204
|
+
* implementation versions can be handled as a discriminated union
|
205
|
+
* on our side.
|
206
|
+
*/
|
207
|
+
readonly specificationVersion: 'v1';
|
208
|
+
/**
|
209
|
+
* Name of the provider for logging purposes.
|
210
|
+
*/
|
211
|
+
readonly provider: string;
|
212
|
+
/**
|
213
|
+
* Provider-specific model ID for logging purposes.
|
214
|
+
*/
|
215
|
+
readonly modelId: string;
|
216
|
+
/**
|
217
|
+
* Default object generation mode that should be used with this model when
|
218
|
+
* no mode is specified. Should be the mode with the best results for this
|
219
|
+
* model. `undefined` can be returned if object generation is not supported.
|
220
|
+
*
|
221
|
+
* This is needed to generate the best objects possible w/o requiring the
|
222
|
+
* user to explicitly specify the object generation mode.
|
223
|
+
*/
|
224
|
+
readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
|
225
|
+
/**
|
226
|
+
* Generates a language model output (non-streaming).
|
227
|
+
*
|
228
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
229
|
+
* by the user.
|
230
|
+
*/
|
231
|
+
doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
|
232
|
+
/**
|
233
|
+
* Text that the model has generated. Can be undefined if the model
|
234
|
+
* has only generated tool calls.
|
235
|
+
*/
|
236
|
+
text?: string;
|
237
|
+
/**
|
238
|
+
* Tool calls that the model has generated. Can be undefined if the
|
239
|
+
* model has only generated text.
|
240
|
+
*/
|
241
|
+
toolCalls?: Array<LanguageModelV1FunctionToolCall>;
|
242
|
+
/**
|
243
|
+
* Finish reason.
|
244
|
+
*/
|
245
|
+
finishReason: LanguageModelV1FinishReason;
|
246
|
+
/**
|
247
|
+
* Usage information.
|
248
|
+
*/
|
249
|
+
usage: {
|
250
|
+
promptTokens: number;
|
251
|
+
completionTokens: number;
|
252
|
+
};
|
253
|
+
/**
|
254
|
+
* Raw prompt and setting information for observability provider integration.
|
255
|
+
*/
|
256
|
+
rawCall: {
|
257
|
+
/**
|
258
|
+
* Raw prompt after expansion and conversion to the format that the
|
259
|
+
* provider uses to send the information to their API.
|
260
|
+
*/
|
261
|
+
rawPrompt: unknown;
|
262
|
+
/**
|
263
|
+
* Raw settings that are used for the API call. Includes provider-specific
|
264
|
+
* settings.
|
265
|
+
*/
|
266
|
+
rawSettings: Record<string, unknown>;
|
267
|
+
};
|
268
|
+
warnings?: LanguageModelV1CallWarning[];
|
269
|
+
}>;
|
270
|
+
/**
|
271
|
+
* Generates a language model output (streaming).
|
272
|
+
*
|
273
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
274
|
+
* by the user.
|
275
|
+
*
|
276
|
+
* @return A stream of higher-level language model output parts.
|
277
|
+
*/
|
278
|
+
doStream(options: LanguageModelV1CallOptions): PromiseLike<{
|
279
|
+
stream: ReadableStream<LanguageModelV1StreamPart>;
|
280
|
+
/**
|
281
|
+
* Raw prompt and setting information for observability provider integration.
|
282
|
+
*/
|
283
|
+
rawCall: {
|
284
|
+
/**
|
285
|
+
* Raw prompt after expansion and conversion to the format that the
|
286
|
+
* provider uses to send the information to their API.
|
287
|
+
*/
|
288
|
+
rawPrompt: unknown;
|
289
|
+
/**
|
290
|
+
* Raw settings that are used for the API call. Includes provider-specific
|
291
|
+
* settings.
|
292
|
+
*/
|
293
|
+
rawSettings: Record<string, unknown>;
|
294
|
+
};
|
295
|
+
warnings?: LanguageModelV1CallWarning[];
|
296
|
+
}>;
|
297
|
+
};
|
298
|
+
type LanguageModelV1StreamPart = {
|
299
|
+
type: 'text-delta';
|
300
|
+
textDelta: string;
|
301
|
+
} | ({
|
302
|
+
type: 'tool-call';
|
303
|
+
} & LanguageModelV1FunctionToolCall) | {
|
304
|
+
type: 'tool-call-delta';
|
305
|
+
toolCallType: 'function';
|
306
|
+
toolCallId: string;
|
307
|
+
toolName: string;
|
308
|
+
argsTextDelta: string;
|
309
|
+
} | {
|
310
|
+
type: 'finish';
|
311
|
+
finishReason: LanguageModelV1FinishReason;
|
312
|
+
usage: {
|
313
|
+
promptTokens: number;
|
314
|
+
completionTokens: number;
|
315
|
+
};
|
316
|
+
} | {
|
317
|
+
type: 'error';
|
318
|
+
error: unknown;
|
319
|
+
};
|
320
|
+
|
321
|
+
type GoogleGenerativeAIModelId = 'models/gemini-1.5-pro-latest' | 'models/gemini-pro' | 'models/gemini-pro-vision' | (string & {});
|
322
|
+
interface GoogleGenerativeAISettings {
|
323
|
+
topK?: number;
|
324
|
+
}
|
325
|
+
|
326
|
+
type GoogleGenerativeAIConfig = {
|
327
|
+
provider: string;
|
328
|
+
baseUrl: string;
|
329
|
+
headers: () => Record<string, string | undefined>;
|
330
|
+
generateId: () => string;
|
331
|
+
};
|
332
|
+
declare class GoogleGenerativeAILanguageModel implements LanguageModelV1 {
|
333
|
+
readonly specificationVersion = "v1";
|
334
|
+
readonly defaultObjectGenerationMode: undefined;
|
335
|
+
readonly modelId: GoogleGenerativeAIModelId;
|
336
|
+
readonly settings: GoogleGenerativeAISettings;
|
337
|
+
private readonly config;
|
338
|
+
constructor(modelId: GoogleGenerativeAIModelId, settings: GoogleGenerativeAISettings, config: GoogleGenerativeAIConfig);
|
339
|
+
get provider(): string;
|
340
|
+
private getArgs;
|
341
|
+
doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
342
|
+
doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
343
|
+
}
|
344
|
+
|
345
|
+
/**
|
346
|
+
* Google provider.
|
347
|
+
*/
|
348
|
+
declare class Google {
|
349
|
+
readonly baseUrl?: string;
|
350
|
+
readonly apiKey?: string;
|
351
|
+
private readonly generateId;
|
352
|
+
constructor(options?: {
|
353
|
+
baseUrl?: string;
|
354
|
+
apiKey?: string;
|
355
|
+
generateId?: () => string;
|
356
|
+
});
|
357
|
+
private get baseConfig();
|
358
|
+
generativeAI(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings): GoogleGenerativeAILanguageModel;
|
359
|
+
}
|
360
|
+
/**
|
361
|
+
* Default Google provider instance.
|
362
|
+
*/
|
363
|
+
declare const google: Google;
|
364
|
+
|
365
|
+
export { Google, google };
|