ai 3.1.0-canary.1 → 3.1.0-canary.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai-model-specification/dist/index.d.mts +606 -0
- package/ai-model-specification/dist/index.d.ts +606 -0
- package/ai-model-specification/dist/index.js +617 -0
- package/ai-model-specification/dist/index.js.map +1 -0
- package/ai-model-specification/dist/index.mjs +560 -0
- package/ai-model-specification/dist/index.mjs.map +1 -0
- package/core/dist/index.d.mts +195 -85
- package/core/dist/index.d.ts +195 -85
- package/core/dist/index.js +497 -501
- package/core/dist/index.js.map +1 -1
- package/core/dist/index.mjs +497 -499
- package/core/dist/index.mjs.map +1 -1
- package/dist/index.d.mts +5 -2
- package/dist/index.d.ts +5 -2
- package/dist/index.js +39 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +39 -1
- package/dist/index.mjs.map +1 -1
- package/package.json +16 -7
- package/prompts/dist/index.d.mts +32 -19
- package/prompts/dist/index.d.ts +32 -19
- package/prompts/dist/index.js +0 -1
- package/prompts/dist/index.js.map +1 -1
- package/prompts/dist/index.mjs +0 -1
- package/prompts/dist/index.mjs.map +1 -1
- package/provider/dist/index.d.mts +232 -190
- package/provider/dist/index.d.ts +232 -190
- package/provider/dist/index.js +838 -26131
- package/provider/dist/index.js.map +1 -1
- package/provider/dist/index.mjs +806 -7735
- package/provider/dist/index.mjs.map +1 -1
- package/react/dist/index.d.mts +4 -4
- package/react/dist/index.d.ts +4 -4
- package/react/dist/index.js +16 -1
- package/react/dist/index.js.map +1 -1
- package/react/dist/index.mjs +16 -1
- package/react/dist/index.mjs.map +1 -1
- package/rsc/dist/index.d.ts +11 -0
- package/rsc/dist/rsc-server.d.mts +11 -0
- package/rsc/dist/rsc-server.mjs +21 -21
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/rsc/dist/rsc-shared.mjs +21 -1
- package/rsc/dist/rsc-shared.mjs.map +1 -1
- package/provider/dist/chunk-3DTRVHCT.mjs +0 -5046
- package/provider/dist/chunk-3DTRVHCT.mjs.map +0 -1
- package/provider/dist/chunk-4OUDS3CP.mjs +0 -30
- package/provider/dist/chunk-4OUDS3CP.mjs.map +0 -1
- package/provider/dist/chunk-5IYCPJBV.mjs +0 -56
- package/provider/dist/chunk-5IYCPJBV.mjs.map +0 -1
- package/provider/dist/chunk-VB2TCVQ4.mjs +0 -6746
- package/provider/dist/chunk-VB2TCVQ4.mjs.map +0 -1
- package/provider/dist/chunk-VYIXVZ6L.mjs +0 -317
- package/provider/dist/chunk-VYIXVZ6L.mjs.map +0 -1
- package/provider/dist/chunk-WTOUHN6A.mjs +0 -2251
- package/provider/dist/chunk-WTOUHN6A.mjs.map +0 -1
- package/provider/dist/client-22WAAXR7.mjs +0 -10
- package/provider/dist/client-22WAAXR7.mjs.map +0 -1
- package/provider/dist/fileFromPath-23RINPB2.mjs +0 -115
- package/provider/dist/fileFromPath-23RINPB2.mjs.map +0 -1
- package/provider/dist/lib-BZMMM4HX.mjs +0 -20
- package/provider/dist/lib-BZMMM4HX.mjs.map +0 -1
- package/provider/dist/openai-3YL4AWLI.mjs +0 -3451
- package/provider/dist/openai-3YL4AWLI.mjs.map +0 -1
package/provider/dist/index.d.ts
CHANGED
@@ -1,5 +1,4 @@
|
|
1
|
-
|
2
|
-
import MistralClient from '@mistralai/mistralai';
|
1
|
+
type JsonSchema = Record<string, unknown>;
|
3
2
|
|
4
3
|
type LanguageModelV1CallSettings = {
|
5
4
|
/**
|
@@ -29,38 +28,32 @@ type LanguageModelV1CallSettings = {
|
|
29
28
|
*/
|
30
29
|
topP?: number;
|
31
30
|
/**
|
32
|
-
* Presence penalty setting.
|
33
|
-
*
|
34
|
-
*
|
31
|
+
* Presence penalty setting. It affects the likelihood of the model to
|
32
|
+
* repeat information that is already in the prompt.
|
33
|
+
*
|
34
|
+
* The presence penalty is a number between -1 (increase repetition)
|
35
|
+
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
35
36
|
*/
|
36
37
|
presencePenalty?: number;
|
37
38
|
/**
|
38
|
-
* Frequency penalty setting.
|
39
|
-
*
|
40
|
-
*
|
39
|
+
* Frequency penalty setting. It affects the likelihood of the model
|
40
|
+
* to repeatedly use the same words or phrases.
|
41
|
+
*
|
42
|
+
* The frequency penalty is a number between -1 (increase repetition)
|
43
|
+
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
41
44
|
*/
|
42
45
|
frequencyPenalty?: number;
|
43
46
|
/**
|
44
|
-
* The seed to use for random sampling. If set and supported
|
45
|
-
* calls will generate deterministic results.
|
47
|
+
* The seed (integer) to use for random sampling. If set and supported
|
48
|
+
* by the model, calls will generate deterministic results.
|
46
49
|
*/
|
47
50
|
seed?: number;
|
51
|
+
/**
|
52
|
+
* Abort signal for cancelling the operation.
|
53
|
+
*/
|
54
|
+
abortSignal?: AbortSignal;
|
48
55
|
};
|
49
56
|
|
50
|
-
/**
|
51
|
-
* Warning from the model provider for this call. The call will proceed, but e.g.
|
52
|
-
* some settings might not be supported, which can lead to suboptimal results.
|
53
|
-
*/
|
54
|
-
type LanguageModelV1CallWarning = {
|
55
|
-
type: 'unsupported-setting';
|
56
|
-
setting: keyof LanguageModelV1CallSettings;
|
57
|
-
} | {
|
58
|
-
type: 'other';
|
59
|
-
message: string;
|
60
|
-
};
|
61
|
-
|
62
|
-
type JsonSchema = Record<string, unknown>;
|
63
|
-
|
64
57
|
/**
|
65
58
|
* A tool has a name, a description, and a set of parameters.
|
66
59
|
*
|
@@ -114,9 +107,9 @@ interface LanguageModelV1TextPart {
|
|
114
107
|
interface LanguageModelV1ImagePart {
|
115
108
|
type: 'image';
|
116
109
|
/**
|
117
|
-
* Image data as a Uint8Array.
|
110
|
+
* Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL.
|
118
111
|
*/
|
119
|
-
image: Uint8Array;
|
112
|
+
image: Uint8Array | URL;
|
120
113
|
/**
|
121
114
|
* Optional mime type of the image.
|
122
115
|
*/
|
@@ -174,7 +167,48 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
|
|
174
167
|
prompt: LanguageModelV1Prompt;
|
175
168
|
};
|
176
169
|
|
177
|
-
|
170
|
+
/**
|
171
|
+
* Warning from the model provider for this call. The call will proceed, but e.g.
|
172
|
+
* some settings might not be supported, which can lead to suboptimal results.
|
173
|
+
*/
|
174
|
+
type LanguageModelV1CallWarning = {
|
175
|
+
type: 'unsupported-setting';
|
176
|
+
setting: keyof LanguageModelV1CallSettings;
|
177
|
+
} | {
|
178
|
+
type: 'other';
|
179
|
+
message: string;
|
180
|
+
};
|
181
|
+
|
182
|
+
type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
|
183
|
+
|
184
|
+
type LanguageModelV1FunctionToolCall = {
|
185
|
+
toolCallType: 'function';
|
186
|
+
toolCallId: string;
|
187
|
+
toolName: string;
|
188
|
+
/**
|
189
|
+
* Stringified JSON object with the tool call arguments. Must match the
|
190
|
+
* parameters schema of the tool.
|
191
|
+
*/
|
192
|
+
args: string;
|
193
|
+
};
|
194
|
+
|
195
|
+
type LanguageModelV1 = {
|
196
|
+
/**
|
197
|
+
* The language model must specify which language model interface
|
198
|
+
* version it implements. This will allow us to evolve the language
|
199
|
+
* model interface and retain backwards compatibility. The different
|
200
|
+
* implementation versions can be handled as a discriminated union
|
201
|
+
* on our side.
|
202
|
+
*/
|
203
|
+
readonly specificationVersion: 'v1';
|
204
|
+
/**
|
205
|
+
* Name of the provider for logging purposes.
|
206
|
+
*/
|
207
|
+
readonly provider: string;
|
208
|
+
/**
|
209
|
+
* Provider-specific model ID for logging purposes.
|
210
|
+
*/
|
211
|
+
readonly modelId: string;
|
178
212
|
/**
|
179
213
|
* Default object generation mode that should be used with this model when
|
180
214
|
* no mode is specified. Should be the mode with the best results for this
|
@@ -184,204 +218,212 @@ interface LanguageModel {
|
|
184
218
|
* user to explicitly specify the object generation mode.
|
185
219
|
*/
|
186
220
|
readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
|
221
|
+
/**
|
222
|
+
* Generates a language model output (non-streaming).
|
223
|
+
*
|
224
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
225
|
+
* by the user.
|
226
|
+
*/
|
187
227
|
doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
|
228
|
+
/**
|
229
|
+
* Text that the model has generated. Can be undefined if the model
|
230
|
+
* has only generated tool calls.
|
231
|
+
*/
|
188
232
|
text?: string;
|
189
|
-
|
190
|
-
|
233
|
+
/**
|
234
|
+
* Tool calls that the model has generated. Can be undefined if the
|
235
|
+
* model has only generated text.
|
236
|
+
*/
|
237
|
+
toolCalls?: Array<LanguageModelV1FunctionToolCall>;
|
238
|
+
/**
|
239
|
+
* Finish reason.
|
240
|
+
*/
|
241
|
+
finishReason: LanguageModelV1FinishReason;
|
242
|
+
/**
|
243
|
+
* Usage information.
|
244
|
+
*/
|
245
|
+
usage: {
|
246
|
+
promptTokens: number;
|
247
|
+
completionTokens: number;
|
248
|
+
};
|
249
|
+
/**
|
250
|
+
* Raw prompt and setting information for observability provider integration.
|
251
|
+
*/
|
252
|
+
rawCall: {
|
253
|
+
/**
|
254
|
+
* Raw prompt after expansion and conversion to the format that the
|
255
|
+
* provider uses to send the information to their API.
|
256
|
+
*/
|
257
|
+
rawPrompt: unknown;
|
258
|
+
/**
|
259
|
+
* Raw settings that are used for the API call. Includes provider-specific
|
260
|
+
* settings.
|
261
|
+
*/
|
262
|
+
rawSettings: Record<string, unknown>;
|
263
|
+
};
|
264
|
+
warnings?: LanguageModelV1CallWarning[];
|
191
265
|
}>;
|
266
|
+
/**
|
267
|
+
* Generates a language model output (streaming).
|
268
|
+
*
|
269
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
270
|
+
* by the user.
|
271
|
+
*
|
272
|
+
* @return A stream of higher-level language model output parts.
|
273
|
+
*/
|
192
274
|
doStream(options: LanguageModelV1CallOptions): PromiseLike<{
|
193
|
-
stream: ReadableStream<
|
194
|
-
|
275
|
+
stream: ReadableStream<LanguageModelV1StreamPart>;
|
276
|
+
/**
|
277
|
+
* Raw prompt and setting information for observability provider integration.
|
278
|
+
*/
|
279
|
+
rawCall: {
|
280
|
+
/**
|
281
|
+
* Raw prompt after expansion and conversion to the format that the
|
282
|
+
* provider uses to send the information to their API.
|
283
|
+
*/
|
284
|
+
rawPrompt: unknown;
|
285
|
+
/**
|
286
|
+
* Raw settings that are used for the API call. Includes provider-specific
|
287
|
+
* settings.
|
288
|
+
*/
|
289
|
+
rawSettings: Record<string, unknown>;
|
290
|
+
};
|
291
|
+
warnings?: LanguageModelV1CallWarning[];
|
195
292
|
}>;
|
196
|
-
}
|
197
|
-
type ErrorStreamPart = {
|
198
|
-
type: 'error';
|
199
|
-
error: unknown;
|
200
|
-
};
|
201
|
-
type LanguageModelToolCall = {
|
202
|
-
toolCallId: string;
|
203
|
-
toolName: string;
|
204
|
-
args: string;
|
205
293
|
};
|
206
|
-
type
|
294
|
+
type LanguageModelV1StreamPart = {
|
295
|
+
type: 'text-delta';
|
296
|
+
textDelta: string;
|
297
|
+
} | ({
|
207
298
|
type: 'tool-call';
|
208
|
-
} &
|
209
|
-
type ToolCallDeltaStreamPart = {
|
299
|
+
} & LanguageModelV1FunctionToolCall) | {
|
210
300
|
type: 'tool-call-delta';
|
211
301
|
toolCallId: string;
|
212
302
|
toolName: string;
|
213
303
|
argsTextDelta: string;
|
304
|
+
} | {
|
305
|
+
type: 'finish-metadata';
|
306
|
+
finishReason: LanguageModelV1FinishReason;
|
307
|
+
usage: {
|
308
|
+
promptTokens: number;
|
309
|
+
completionTokens: number;
|
310
|
+
};
|
311
|
+
} | {
|
312
|
+
type: 'error';
|
313
|
+
error: unknown;
|
214
314
|
};
|
215
|
-
type TextDeltaStreamPart = {
|
216
|
-
type: 'text-delta';
|
217
|
-
textDelta: string;
|
218
|
-
};
|
219
|
-
type LanguageModelStreamPart = TextDeltaStreamPart | ToolCallDeltaStreamPart | ToolCallStreamPart | ErrorStreamPart;
|
220
|
-
|
221
|
-
declare class OpenAIChatLanguageModel<SETTINGS> implements LanguageModel {
|
222
|
-
readonly settings: SETTINGS;
|
223
|
-
readonly defaultObjectGenerationMode = "tool";
|
224
|
-
private readonly getClient;
|
225
|
-
private readonly mapSettings;
|
226
|
-
constructor(settings: SETTINGS, config: {
|
227
|
-
client: () => Promise<OpenAI>;
|
228
|
-
mapSettings: (settings: SETTINGS) => Record<string, unknown> & {
|
229
|
-
model: string;
|
230
|
-
};
|
231
|
-
});
|
232
|
-
private get basePrompt();
|
233
|
-
private getArgs;
|
234
|
-
doGenerate(options: Parameters<LanguageModel['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModel['doGenerate']>>>;
|
235
|
-
doStream(options: Parameters<LanguageModel['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModel['doStream']>>>;
|
236
|
-
}
|
237
315
|
|
238
|
-
type
|
239
|
-
|
240
|
-
* @see https://readme.fireworks.ai/reference/createchatcompletion
|
241
|
-
*/
|
242
|
-
interface FireworksChatSettings {
|
243
|
-
/**
|
244
|
-
* The ID of the model to use.
|
245
|
-
*/
|
246
|
-
id: FireworksChatModelId;
|
316
|
+
type OpenAIChatModelId = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-4-vision-preview' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | (string & {});
|
317
|
+
interface OpenAIChatSettings {
|
247
318
|
/**
|
248
|
-
*
|
249
|
-
* evicted to fit the prompt into this length.
|
319
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
250
320
|
*
|
251
|
-
*
|
252
|
-
*
|
321
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
322
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100. You
|
323
|
+
* can use this tokenizer tool to convert text to token IDs. Mathematically,
|
324
|
+
* the bias is added to the logits generated by the model prior to sampling.
|
325
|
+
* The exact effect will vary per model, but values between -1 and 1 should
|
326
|
+
* decrease or increase likelihood of selection; values like -100 or 100
|
327
|
+
* should result in a ban or exclusive selection of the relevant token.
|
253
328
|
*
|
254
|
-
*
|
255
|
-
*
|
256
|
-
* errors if individual messages are too long for the model context window.
|
257
|
-
*/
|
258
|
-
promptTruncateLength?: number;
|
259
|
-
/**
|
260
|
-
* Top-k sampling is another sampling method where the k most probable next tokens are filtered
|
261
|
-
* and the probability mass is redistributed among only those k next tokens. The value of k
|
262
|
-
* controls the number of candidates for the next token at each step during text generation.
|
329
|
+
* As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
330
|
+
* token from being generated.
|
263
331
|
*/
|
264
|
-
|
332
|
+
logitBias?: Record<number, number>;
|
265
333
|
/**
|
266
|
-
*
|
267
|
-
*
|
268
|
-
* Passing truncate limits the max_tokens to at most context_window_length - prompt_length.
|
269
|
-
* This is the default.
|
270
|
-
*
|
271
|
-
* Passing error would trigger a request error.
|
272
|
-
*
|
273
|
-
* The default of 'truncate' is selected as it allows to ask for high max_tokens value while
|
274
|
-
* respecting the context window length without having to do client-side prompt tokenization.
|
275
|
-
*
|
276
|
-
* Note, that it differs from OpenAI's behavior that matches that of error.
|
334
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
335
|
+
* monitor and detect abuse. Learn more.
|
277
336
|
*/
|
278
|
-
|
337
|
+
user?: string;
|
279
338
|
}
|
280
339
|
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
340
|
+
type OpenAIChatConfig = {
|
341
|
+
provider: string;
|
342
|
+
baseUrl: string;
|
343
|
+
headers: () => Record<string, string | undefined>;
|
344
|
+
};
|
345
|
+
declare class OpenAIChatLanguageModel implements LanguageModelV1 {
|
346
|
+
readonly specificationVersion = "v1";
|
347
|
+
readonly defaultObjectGenerationMode = "tool";
|
348
|
+
readonly modelId: OpenAIChatModelId;
|
349
|
+
readonly settings: OpenAIChatSettings;
|
350
|
+
private readonly config;
|
351
|
+
constructor(modelId: OpenAIChatModelId, settings: OpenAIChatSettings, config: OpenAIChatConfig);
|
352
|
+
get provider(): string;
|
353
|
+
private getArgs;
|
354
|
+
doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
355
|
+
doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
295
356
|
}
|
296
357
|
|
297
|
-
type
|
298
|
-
interface
|
358
|
+
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
359
|
+
interface OpenAICompletionSettings {
|
299
360
|
/**
|
300
|
-
*
|
361
|
+
* Echo back the prompt in addition to the completion
|
301
362
|
*/
|
302
|
-
|
363
|
+
echo?: boolean;
|
303
364
|
/**
|
304
|
-
*
|
365
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
305
366
|
*
|
306
|
-
*
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
client: () => Promise<MistralClient>;
|
317
|
-
});
|
318
|
-
private get basePrompt();
|
319
|
-
private getArgs;
|
320
|
-
doGenerate(options: Parameters<LanguageModel['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModel['doGenerate']>>>;
|
321
|
-
doStream(options: Parameters<LanguageModel['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModel['doStream']>>>;
|
322
|
-
}
|
323
|
-
|
324
|
-
declare function chat$2(settings: Omit<MistralChatSettings, 'client'> & {
|
325
|
-
client?: MistralClient;
|
326
|
-
apiKey?: string;
|
327
|
-
}): MistralChatLanguageModel;
|
328
|
-
|
329
|
-
declare namespace mistralFacade {
|
330
|
-
export {
|
331
|
-
chat$2 as chat,
|
332
|
-
};
|
333
|
-
}
|
334
|
-
|
335
|
-
type OpenAIChatModelId = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-4-vision-preview' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | (string & {});
|
336
|
-
interface OpenAIChatSettings {
|
337
|
-
/**
|
338
|
-
* The ID of the model to use.
|
367
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
368
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100. You
|
369
|
+
* can use this tokenizer tool to convert text to token IDs. Mathematically,
|
370
|
+
* the bias is added to the logits generated by the model prior to sampling.
|
371
|
+
* The exact effect will vary per model, but values between -1 and 1 should
|
372
|
+
* decrease or increase likelihood of selection; values like -100 or 100
|
373
|
+
* should result in a ban or exclusive selection of the relevant token.
|
374
|
+
*
|
375
|
+
* As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
376
|
+
* token from being generated.
|
339
377
|
*/
|
340
|
-
id: OpenAIChatModelId;
|
341
378
|
logitBias?: Record<number, number>;
|
342
|
-
}
|
343
|
-
|
344
|
-
declare function chat$1(settings: OpenAIChatSettings & {
|
345
|
-
client?: OpenAI;
|
346
|
-
apiKey?: string;
|
347
|
-
}): OpenAIChatLanguageModel<OpenAIChatSettings>;
|
348
|
-
|
349
|
-
declare namespace openaiFacade {
|
350
|
-
export {
|
351
|
-
chat$1 as chat,
|
352
|
-
};
|
353
|
-
}
|
354
|
-
|
355
|
-
type PerplexityChatModelId = 'sonar-small-chat' | 'sonar-small-online' | 'sonar-medium-chat' | 'sonar-medium-online' | 'mistral-7b-instruct' | 'mixtral-8x7b-instruct' | (string & {});
|
356
|
-
/**
|
357
|
-
* @see https://docs.perplexity.ai/reference/post_chat_completions
|
358
|
-
*/
|
359
|
-
interface PerplexityChatSettings {
|
360
379
|
/**
|
361
|
-
* The
|
380
|
+
* The suffix that comes after a completion of inserted text.
|
362
381
|
*/
|
363
|
-
|
382
|
+
suffix?: string;
|
364
383
|
/**
|
365
|
-
*
|
366
|
-
*
|
367
|
-
* We recommend either altering top_k or top_p, but not both.
|
384
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
385
|
+
* monitor and detect abuse. Learn more.
|
368
386
|
*/
|
369
|
-
|
387
|
+
user?: string;
|
370
388
|
}
|
371
389
|
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
390
|
+
type OpenAICompletionConfig = {
|
391
|
+
provider: string;
|
392
|
+
baseUrl: string;
|
393
|
+
headers: () => Record<string, string | undefined>;
|
394
|
+
};
|
395
|
+
declare class OpenAICompletionLanguageModel implements LanguageModelV1 {
|
396
|
+
readonly specificationVersion = "v1";
|
397
|
+
readonly defaultObjectGenerationMode: undefined;
|
398
|
+
readonly modelId: OpenAICompletionModelId;
|
399
|
+
readonly settings: OpenAICompletionSettings;
|
400
|
+
private readonly config;
|
401
|
+
constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
|
402
|
+
get provider(): string;
|
403
|
+
private getArgs;
|
404
|
+
doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
405
|
+
doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
406
|
+
}
|
379
407
|
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
408
|
+
/**
|
409
|
+
* OpenAI provider.
|
410
|
+
*/
|
411
|
+
declare class OpenAI {
|
412
|
+
readonly baseUrl?: string;
|
413
|
+
readonly apiKey?: string;
|
414
|
+
readonly organization?: string;
|
415
|
+
constructor(options?: {
|
416
|
+
baseUrl?: string;
|
417
|
+
apiKey?: string;
|
418
|
+
organization?: string;
|
419
|
+
});
|
420
|
+
private get baseConfig();
|
421
|
+
chat(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): OpenAIChatLanguageModel;
|
422
|
+
completion(modelId: OpenAICompletionModelId, settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
|
385
423
|
}
|
424
|
+
/**
|
425
|
+
* Default OpenAI provider instance.
|
426
|
+
*/
|
427
|
+
declare const openai: OpenAI;
|
386
428
|
|
387
|
-
export {
|
429
|
+
export { OpenAI, openai };
|