ai 3.1.0-canary.1 → 3.1.0-canary.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai-model-specification/dist/index.d.mts +539 -0
- package/ai-model-specification/dist/index.d.ts +539 -0
- package/ai-model-specification/dist/index.js +581 -0
- package/ai-model-specification/dist/index.js.map +1 -0
- package/ai-model-specification/dist/index.mjs +526 -0
- package/ai-model-specification/dist/index.mjs.map +1 -0
- package/core/dist/index.d.mts +120 -75
- package/core/dist/index.d.ts +120 -75
- package/core/dist/index.js +261 -173
- package/core/dist/index.js.map +1 -1
- package/core/dist/index.mjs +261 -172
- package/core/dist/index.mjs.map +1 -1
- package/dist/index.d.mts +3 -1
- package/dist/index.d.ts +3 -1
- package/dist/index.js +39 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +39 -1
- package/dist/index.mjs.map +1 -1
- package/package.json +13 -4
- package/prompts/dist/index.d.mts +32 -19
- package/prompts/dist/index.d.ts +32 -19
- package/prompts/dist/index.js +0 -1
- package/prompts/dist/index.js.map +1 -1
- package/prompts/dist/index.mjs +0 -1
- package/prompts/dist/index.mjs.map +1 -1
- package/provider/dist/index.d.mts +154 -191
- package/provider/dist/index.d.ts +154 -191
- package/provider/dist/index.js +795 -26126
- package/provider/dist/index.js.map +1 -1
- package/provider/dist/index.mjs +763 -7729
- package/provider/dist/index.mjs.map +1 -1
- package/react/dist/index.js +16 -1
- package/react/dist/index.js.map +1 -1
- package/react/dist/index.mjs +16 -1
- package/react/dist/index.mjs.map +1 -1
- package/rsc/dist/index.d.ts +11 -0
- package/rsc/dist/rsc-server.d.mts +11 -0
- package/rsc/dist/rsc-server.mjs +21 -21
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/rsc/dist/rsc-shared.mjs +21 -1
- package/rsc/dist/rsc-shared.mjs.map +1 -1
- package/provider/dist/chunk-3DTRVHCT.mjs +0 -5046
- package/provider/dist/chunk-3DTRVHCT.mjs.map +0 -1
- package/provider/dist/chunk-4OUDS3CP.mjs +0 -30
- package/provider/dist/chunk-4OUDS3CP.mjs.map +0 -1
- package/provider/dist/chunk-5IYCPJBV.mjs +0 -56
- package/provider/dist/chunk-5IYCPJBV.mjs.map +0 -1
- package/provider/dist/chunk-VB2TCVQ4.mjs +0 -6746
- package/provider/dist/chunk-VB2TCVQ4.mjs.map +0 -1
- package/provider/dist/chunk-VYIXVZ6L.mjs +0 -317
- package/provider/dist/chunk-VYIXVZ6L.mjs.map +0 -1
- package/provider/dist/chunk-WTOUHN6A.mjs +0 -2251
- package/provider/dist/chunk-WTOUHN6A.mjs.map +0 -1
- package/provider/dist/client-22WAAXR7.mjs +0 -10
- package/provider/dist/client-22WAAXR7.mjs.map +0 -1
- package/provider/dist/fileFromPath-23RINPB2.mjs +0 -115
- package/provider/dist/fileFromPath-23RINPB2.mjs.map +0 -1
- package/provider/dist/lib-BZMMM4HX.mjs +0 -20
- package/provider/dist/lib-BZMMM4HX.mjs.map +0 -1
- package/provider/dist/openai-3YL4AWLI.mjs +0 -3451
- package/provider/dist/openai-3YL4AWLI.mjs.map +0 -1
@@ -1,5 +1,4 @@
|
|
1
|
-
|
2
|
-
import MistralClient from '@mistralai/mistralai';
|
1
|
+
type JsonSchema = Record<string, unknown>;
|
3
2
|
|
4
3
|
type LanguageModelV1CallSettings = {
|
5
4
|
/**
|
@@ -29,38 +28,28 @@ type LanguageModelV1CallSettings = {
|
|
29
28
|
*/
|
30
29
|
topP?: number;
|
31
30
|
/**
|
32
|
-
* Presence penalty setting.
|
33
|
-
*
|
34
|
-
*
|
31
|
+
* Presence penalty setting. It affects the likelihood of the model to
|
32
|
+
* repeat information that is already in the prompt.
|
33
|
+
*
|
34
|
+
* The presence penalty is a number between -1 (increase repetition)
|
35
|
+
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
35
36
|
*/
|
36
37
|
presencePenalty?: number;
|
37
38
|
/**
|
38
|
-
* Frequency penalty setting.
|
39
|
-
*
|
40
|
-
*
|
39
|
+
* Frequency penalty setting. It affects the likelihood of the model
|
40
|
+
* to repeatedly use the same words or phrases.
|
41
|
+
*
|
42
|
+
* The frequency penalty is a number between -1 (increase repetition)
|
43
|
+
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
41
44
|
*/
|
42
45
|
frequencyPenalty?: number;
|
43
46
|
/**
|
44
|
-
* The seed to use for random sampling. If set and supported
|
45
|
-
* calls will generate deterministic results.
|
47
|
+
* The seed (integer) to use for random sampling. If set and supported
|
48
|
+
* by the model, calls will generate deterministic results.
|
46
49
|
*/
|
47
50
|
seed?: number;
|
48
51
|
};
|
49
52
|
|
50
|
-
/**
|
51
|
-
* Warning from the model provider for this call. The call will proceed, but e.g.
|
52
|
-
* some settings might not be supported, which can lead to suboptimal results.
|
53
|
-
*/
|
54
|
-
type LanguageModelV1CallWarning = {
|
55
|
-
type: 'unsupported-setting';
|
56
|
-
setting: keyof LanguageModelV1CallSettings;
|
57
|
-
} | {
|
58
|
-
type: 'other';
|
59
|
-
message: string;
|
60
|
-
};
|
61
|
-
|
62
|
-
type JsonSchema = Record<string, unknown>;
|
63
|
-
|
64
53
|
/**
|
65
54
|
* A tool has a name, a description, and a set of parameters.
|
66
55
|
*
|
@@ -174,7 +163,48 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
|
|
174
163
|
prompt: LanguageModelV1Prompt;
|
175
164
|
};
|
176
165
|
|
177
|
-
|
166
|
+
/**
|
167
|
+
* Warning from the model provider for this call. The call will proceed, but e.g.
|
168
|
+
* some settings might not be supported, which can lead to suboptimal results.
|
169
|
+
*/
|
170
|
+
type LanguageModelV1CallWarning = {
|
171
|
+
type: 'unsupported-setting';
|
172
|
+
setting: keyof LanguageModelV1CallSettings;
|
173
|
+
} | {
|
174
|
+
type: 'other';
|
175
|
+
message: string;
|
176
|
+
};
|
177
|
+
|
178
|
+
type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
|
179
|
+
|
180
|
+
type LanguageModelV1FunctionToolCall = {
|
181
|
+
toolCallType: 'function';
|
182
|
+
toolCallId: string;
|
183
|
+
toolName: string;
|
184
|
+
/**
|
185
|
+
* Stringified JSON object with the tool call arguments. Must match the
|
186
|
+
* parameters schema of the tool.
|
187
|
+
*/
|
188
|
+
args: string;
|
189
|
+
};
|
190
|
+
|
191
|
+
type LanguageModelV1 = {
|
192
|
+
/**
|
193
|
+
* The language model must specify which language model interface
|
194
|
+
* version it implements. This will allow us to evolve the language
|
195
|
+
* model interface and retain backwards compatibility. The different
|
196
|
+
* implementation versions can be handled as a discriminated union
|
197
|
+
* on our side.
|
198
|
+
*/
|
199
|
+
readonly specificationVersion: 'v1';
|
200
|
+
/**
|
201
|
+
* Name of the provider for logging purposes.
|
202
|
+
*/
|
203
|
+
readonly provider: string;
|
204
|
+
/**
|
205
|
+
* Provider-specific model ID for logging purposes.
|
206
|
+
*/
|
207
|
+
readonly modelId: string;
|
178
208
|
/**
|
179
209
|
* Default object generation mode that should be used with this model when
|
180
210
|
* no mode is specified. Should be the mode with the best results for this
|
@@ -184,152 +214,83 @@ interface LanguageModel {
|
|
184
214
|
* user to explicitly specify the object generation mode.
|
185
215
|
*/
|
186
216
|
readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
|
217
|
+
/**
|
218
|
+
* Generates a language model output (non-streaming).
|
219
|
+
*
|
220
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
221
|
+
* by the user.
|
222
|
+
*/
|
187
223
|
doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
|
224
|
+
/**
|
225
|
+
* Text that the model has generated. Can be undefined if the model
|
226
|
+
* has only generated tool calls.
|
227
|
+
*/
|
188
228
|
text?: string;
|
189
|
-
|
190
|
-
|
229
|
+
/**
|
230
|
+
* Tool calls that the model has generated. Can be undefined if the
|
231
|
+
* model has only generated text.
|
232
|
+
*/
|
233
|
+
toolCalls?: Array<LanguageModelV1FunctionToolCall>;
|
234
|
+
warnings?: LanguageModelV1CallWarning[];
|
191
235
|
}>;
|
236
|
+
/**
|
237
|
+
* Generates a language model output (streaming).
|
238
|
+
*
|
239
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
240
|
+
* by the user.
|
241
|
+
*
|
242
|
+
* @return A stream of higher-level language model output parts.
|
243
|
+
*/
|
192
244
|
doStream(options: LanguageModelV1CallOptions): PromiseLike<{
|
193
|
-
stream: ReadableStream<
|
194
|
-
warnings
|
245
|
+
stream: ReadableStream<LanguageModelV1StreamPart>;
|
246
|
+
warnings?: LanguageModelV1CallWarning[];
|
195
247
|
}>;
|
196
|
-
}
|
197
|
-
type ErrorStreamPart = {
|
198
|
-
type: 'error';
|
199
|
-
error: unknown;
|
200
248
|
};
|
201
|
-
type
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
};
|
206
|
-
type ToolCallStreamPart = {
|
249
|
+
type LanguageModelV1StreamPart = {
|
250
|
+
type: 'text-delta';
|
251
|
+
textDelta: string;
|
252
|
+
} | ({
|
207
253
|
type: 'tool-call';
|
208
|
-
} &
|
209
|
-
type ToolCallDeltaStreamPart = {
|
254
|
+
} & LanguageModelV1FunctionToolCall) | {
|
210
255
|
type: 'tool-call-delta';
|
211
256
|
toolCallId: string;
|
212
257
|
toolName: string;
|
213
258
|
argsTextDelta: string;
|
259
|
+
} | {
|
260
|
+
type: 'finish-metadata';
|
261
|
+
finishReason: LanguageModelV1FinishReason;
|
262
|
+
usage: {
|
263
|
+
promptTokens: number;
|
264
|
+
completionTokens: number;
|
265
|
+
};
|
266
|
+
} | {
|
267
|
+
type: 'error';
|
268
|
+
error: unknown;
|
214
269
|
};
|
215
|
-
type TextDeltaStreamPart = {
|
216
|
-
type: 'text-delta';
|
217
|
-
textDelta: string;
|
218
|
-
};
|
219
|
-
type LanguageModelStreamPart = TextDeltaStreamPart | ToolCallDeltaStreamPart | ToolCallStreamPart | ErrorStreamPart;
|
220
270
|
|
221
|
-
|
222
|
-
|
271
|
+
type Config$1<SETTINGS extends {
|
272
|
+
id: string;
|
273
|
+
}> = {
|
274
|
+
provider: string;
|
275
|
+
baseUrl: string;
|
276
|
+
apiKey: () => string;
|
277
|
+
mapSettings: (settings: SETTINGS) => Record<string, unknown> & {
|
278
|
+
model: string;
|
279
|
+
};
|
280
|
+
};
|
281
|
+
declare class OpenAIChatLanguageModel<SETTINGS extends {
|
282
|
+
id: string;
|
283
|
+
}> implements LanguageModelV1 {
|
284
|
+
readonly specificationVersion = "v1";
|
223
285
|
readonly defaultObjectGenerationMode = "tool";
|
224
|
-
|
225
|
-
private readonly
|
226
|
-
constructor(settings: SETTINGS, config:
|
227
|
-
|
228
|
-
|
229
|
-
model: string;
|
230
|
-
};
|
231
|
-
});
|
232
|
-
private get basePrompt();
|
233
|
-
private getArgs;
|
234
|
-
doGenerate(options: Parameters<LanguageModel['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModel['doGenerate']>>>;
|
235
|
-
doStream(options: Parameters<LanguageModel['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModel['doStream']>>>;
|
236
|
-
}
|
237
|
-
|
238
|
-
type FireworksChatModelId = `accounts/${string}/models/${string}`;
|
239
|
-
/**
|
240
|
-
* @see https://readme.fireworks.ai/reference/createchatcompletion
|
241
|
-
*/
|
242
|
-
interface FireworksChatSettings {
|
243
|
-
/**
|
244
|
-
* The ID of the model to use.
|
245
|
-
*/
|
246
|
-
id: FireworksChatModelId;
|
247
|
-
/**
|
248
|
-
* The size to which to truncate chat prompts. Earlier user/assistant messages will be
|
249
|
-
* evicted to fit the prompt into this length.
|
250
|
-
*
|
251
|
-
* This should usually be set to a number << the max context size of the model, to allow
|
252
|
-
* enough remaining tokens for generating a response.
|
253
|
-
*
|
254
|
-
* If omitted, you may receive "prompt too long" errors in your responses as
|
255
|
-
* conversations grow. Note that even with this set, you may still receive "prompt too long"
|
256
|
-
* errors if individual messages are too long for the model context window.
|
257
|
-
*/
|
258
|
-
promptTruncateLength?: number;
|
259
|
-
/**
|
260
|
-
* Top-k sampling is another sampling method where the k most probable next tokens are filtered
|
261
|
-
* and the probability mass is redistributed among only those k next tokens. The value of k
|
262
|
-
* controls the number of candidates for the next token at each step during text generation.
|
263
|
-
*/
|
264
|
-
topK?: number;
|
265
|
-
/**
|
266
|
-
* What to do if the token count of prompt plus max_tokens exceeds the model's context window.
|
267
|
-
*
|
268
|
-
* Passing truncate limits the max_tokens to at most context_window_length - prompt_length.
|
269
|
-
* This is the default.
|
270
|
-
*
|
271
|
-
* Passing error would trigger a request error.
|
272
|
-
*
|
273
|
-
* The default of 'truncate' is selected as it allows to ask for high max_tokens value while
|
274
|
-
* respecting the context window length without having to do client-side prompt tokenization.
|
275
|
-
*
|
276
|
-
* Note, that it differs from OpenAI's behavior that matches that of error.
|
277
|
-
*/
|
278
|
-
contextLengthExceededBehavior?: 'truncate' | 'error';
|
279
|
-
}
|
280
|
-
|
281
|
-
declare function chat$3(settings: Omit<FireworksChatSettings, 'client'> & {
|
282
|
-
client?: OpenAI;
|
283
|
-
apiKey?: string;
|
284
|
-
}): OpenAIChatLanguageModel<{
|
285
|
-
id: `accounts/${string}/models/${string}`;
|
286
|
-
promptTruncateLength?: number | undefined;
|
287
|
-
topK?: number | undefined;
|
288
|
-
contextLengthExceededBehavior?: "error" | "truncate" | undefined;
|
289
|
-
}>;
|
290
|
-
|
291
|
-
declare namespace fireworksFacade {
|
292
|
-
export {
|
293
|
-
chat$3 as chat,
|
294
|
-
};
|
295
|
-
}
|
296
|
-
|
297
|
-
type MistralChatModelId = 'open-mistral-7b' | 'open-mixtral-8x7b' | 'mistral-small-latest' | 'mistral-medium-latest' | 'mistral-large-latest' | (string & {});
|
298
|
-
interface MistralChatSettings {
|
299
|
-
/**
|
300
|
-
* The ID of the model to use.
|
301
|
-
*/
|
302
|
-
id: MistralChatModelId;
|
303
|
-
/**
|
304
|
-
* Whether to inject a safety prompt before all conversations.
|
305
|
-
*
|
306
|
-
* Default: false
|
307
|
-
*/
|
308
|
-
safePrompt?: boolean;
|
309
|
-
}
|
310
|
-
|
311
|
-
declare class MistralChatLanguageModel implements LanguageModel {
|
312
|
-
readonly settings: MistralChatSettings;
|
313
|
-
readonly defaultObjectGenerationMode = "json";
|
314
|
-
private readonly getClient;
|
315
|
-
constructor(settings: MistralChatSettings, config: {
|
316
|
-
client: () => Promise<MistralClient>;
|
317
|
-
});
|
318
|
-
private get basePrompt();
|
286
|
+
readonly settings: SETTINGS;
|
287
|
+
private readonly config;
|
288
|
+
constructor(settings: SETTINGS, config: Config$1<SETTINGS>);
|
289
|
+
get provider(): string;
|
290
|
+
get modelId(): string;
|
319
291
|
private getArgs;
|
320
|
-
doGenerate(options: Parameters<
|
321
|
-
doStream(options: Parameters<
|
322
|
-
}
|
323
|
-
|
324
|
-
declare function chat$2(settings: Omit<MistralChatSettings, 'client'> & {
|
325
|
-
client?: MistralClient;
|
326
|
-
apiKey?: string;
|
327
|
-
}): MistralChatLanguageModel;
|
328
|
-
|
329
|
-
declare namespace mistralFacade {
|
330
|
-
export {
|
331
|
-
chat$2 as chat,
|
332
|
-
};
|
292
|
+
doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
293
|
+
doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
333
294
|
}
|
334
295
|
|
335
296
|
type OpenAIChatModelId = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-4-vision-preview' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | (string & {});
|
@@ -341,47 +302,49 @@ interface OpenAIChatSettings {
|
|
341
302
|
logitBias?: Record<number, number>;
|
342
303
|
}
|
343
304
|
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
305
|
+
type Config<SETTINGS extends {
|
306
|
+
id: string;
|
307
|
+
}> = {
|
308
|
+
provider: string;
|
309
|
+
baseUrl: string;
|
310
|
+
apiKey: () => string;
|
311
|
+
mapSettings: (settings: SETTINGS) => Record<string, unknown> & {
|
312
|
+
model: string;
|
313
|
+
};
|
314
|
+
};
|
315
|
+
declare class OpenAICompletionLanguageModel<SETTINGS extends {
|
316
|
+
id: string;
|
317
|
+
}> implements LanguageModelV1 {
|
318
|
+
readonly specificationVersion = "v1";
|
319
|
+
readonly defaultObjectGenerationMode: undefined;
|
320
|
+
readonly settings: SETTINGS;
|
321
|
+
private readonly config;
|
322
|
+
constructor(settings: SETTINGS, config: Config<SETTINGS>);
|
323
|
+
get provider(): string;
|
324
|
+
get modelId(): string;
|
325
|
+
private getArgs;
|
326
|
+
doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
327
|
+
doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
353
328
|
}
|
354
329
|
|
355
|
-
type
|
356
|
-
|
357
|
-
* @see https://docs.perplexity.ai/reference/post_chat_completions
|
358
|
-
*/
|
359
|
-
interface PerplexityChatSettings {
|
330
|
+
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
331
|
+
interface OpenAICompletionSettings {
|
360
332
|
/**
|
361
333
|
* The ID of the model to use.
|
362
334
|
*/
|
363
|
-
id:
|
364
|
-
|
365
|
-
* The number of tokens to keep for highest top-k filtering, specified as an
|
366
|
-
* integer between 0 and 2048 inclusive. If set to 0, top-k filtering is disabled.
|
367
|
-
* We recommend either altering top_k or top_p, but not both.
|
368
|
-
*/
|
369
|
-
topK?: number;
|
335
|
+
id: OpenAICompletionModelId;
|
336
|
+
logitBias?: Record<number, number>;
|
370
337
|
}
|
371
338
|
|
372
|
-
declare
|
373
|
-
|
374
|
-
apiKey?: string;
|
375
|
-
}
|
376
|
-
|
377
|
-
|
378
|
-
}
|
379
|
-
|
380
|
-
|
381
|
-
declare namespace perplexityFacade {
|
382
|
-
export {
|
383
|
-
perplexityFacade_chat as chat,
|
384
|
-
};
|
339
|
+
declare class OpenAI {
|
340
|
+
readonly baseUrl?: string;
|
341
|
+
readonly apiKey?: string;
|
342
|
+
constructor({ baseUrl, apiKey }?: {
|
343
|
+
baseUrl?: string;
|
344
|
+
apiKey?: string;
|
345
|
+
});
|
346
|
+
chat(settings: OpenAIChatSettings): OpenAIChatLanguageModel<OpenAIChatSettings>;
|
347
|
+
completion(settings: OpenAICompletionSettings): OpenAICompletionLanguageModel<OpenAICompletionSettings>;
|
385
348
|
}
|
386
349
|
|
387
|
-
export {
|
350
|
+
export { OpenAI };
|