ai 3.1.0-canary.1 → 3.1.0-canary.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/ai-model-specification/dist/index.d.mts +539 -0
  2. package/ai-model-specification/dist/index.d.ts +539 -0
  3. package/ai-model-specification/dist/index.js +581 -0
  4. package/ai-model-specification/dist/index.js.map +1 -0
  5. package/ai-model-specification/dist/index.mjs +526 -0
  6. package/ai-model-specification/dist/index.mjs.map +1 -0
  7. package/core/dist/index.d.mts +120 -75
  8. package/core/dist/index.d.ts +120 -75
  9. package/core/dist/index.js +261 -173
  10. package/core/dist/index.js.map +1 -1
  11. package/core/dist/index.mjs +261 -172
  12. package/core/dist/index.mjs.map +1 -1
  13. package/dist/index.d.mts +3 -1
  14. package/dist/index.d.ts +3 -1
  15. package/dist/index.js +39 -1
  16. package/dist/index.js.map +1 -1
  17. package/dist/index.mjs +39 -1
  18. package/dist/index.mjs.map +1 -1
  19. package/package.json +13 -4
  20. package/prompts/dist/index.d.mts +32 -19
  21. package/prompts/dist/index.d.ts +32 -19
  22. package/prompts/dist/index.js +0 -1
  23. package/prompts/dist/index.js.map +1 -1
  24. package/prompts/dist/index.mjs +0 -1
  25. package/prompts/dist/index.mjs.map +1 -1
  26. package/provider/dist/index.d.mts +154 -191
  27. package/provider/dist/index.d.ts +154 -191
  28. package/provider/dist/index.js +795 -26126
  29. package/provider/dist/index.js.map +1 -1
  30. package/provider/dist/index.mjs +763 -7729
  31. package/provider/dist/index.mjs.map +1 -1
  32. package/react/dist/index.js +16 -1
  33. package/react/dist/index.js.map +1 -1
  34. package/react/dist/index.mjs +16 -1
  35. package/react/dist/index.mjs.map +1 -1
  36. package/rsc/dist/index.d.ts +11 -0
  37. package/rsc/dist/rsc-server.d.mts +11 -0
  38. package/rsc/dist/rsc-server.mjs +21 -21
  39. package/rsc/dist/rsc-server.mjs.map +1 -1
  40. package/rsc/dist/rsc-shared.mjs +21 -1
  41. package/rsc/dist/rsc-shared.mjs.map +1 -1
  42. package/provider/dist/chunk-3DTRVHCT.mjs +0 -5046
  43. package/provider/dist/chunk-3DTRVHCT.mjs.map +0 -1
  44. package/provider/dist/chunk-4OUDS3CP.mjs +0 -30
  45. package/provider/dist/chunk-4OUDS3CP.mjs.map +0 -1
  46. package/provider/dist/chunk-5IYCPJBV.mjs +0 -56
  47. package/provider/dist/chunk-5IYCPJBV.mjs.map +0 -1
  48. package/provider/dist/chunk-VB2TCVQ4.mjs +0 -6746
  49. package/provider/dist/chunk-VB2TCVQ4.mjs.map +0 -1
  50. package/provider/dist/chunk-VYIXVZ6L.mjs +0 -317
  51. package/provider/dist/chunk-VYIXVZ6L.mjs.map +0 -1
  52. package/provider/dist/chunk-WTOUHN6A.mjs +0 -2251
  53. package/provider/dist/chunk-WTOUHN6A.mjs.map +0 -1
  54. package/provider/dist/client-22WAAXR7.mjs +0 -10
  55. package/provider/dist/client-22WAAXR7.mjs.map +0 -1
  56. package/provider/dist/fileFromPath-23RINPB2.mjs +0 -115
  57. package/provider/dist/fileFromPath-23RINPB2.mjs.map +0 -1
  58. package/provider/dist/lib-BZMMM4HX.mjs +0 -20
  59. package/provider/dist/lib-BZMMM4HX.mjs.map +0 -1
  60. package/provider/dist/openai-3YL4AWLI.mjs +0 -3451
  61. package/provider/dist/openai-3YL4AWLI.mjs.map +0 -1
@@ -1,5 +1,4 @@
1
- import OpenAI from 'openai';
2
- import MistralClient from '@mistralai/mistralai';
1
+ type JsonSchema = Record<string, unknown>;
3
2
 
4
3
  type LanguageModelV1CallSettings = {
5
4
  /**
@@ -29,38 +28,28 @@ type LanguageModelV1CallSettings = {
29
28
  */
30
29
  topP?: number;
31
30
  /**
32
- * Presence penalty setting. This is a number between 0 (no penalty)
33
- * and 1 (maximum penalty). It affects the likelihood of the model to repeat
34
- * information that is already in the prompt.
31
+ * Presence penalty setting. It affects the likelihood of the model to
32
+ * repeat information that is already in the prompt.
33
+ *
34
+ * The presence penalty is a number between -1 (increase repetition)
35
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
35
36
  */
36
37
  presencePenalty?: number;
37
38
  /**
38
- * Frequency penalty setting. This is a number between 0 (no penalty)
39
- * and 1 (maximum penalty). It affects the likelihood of the model to repeatedly
40
- * use the same words or phrases.
39
+ * Frequency penalty setting. It affects the likelihood of the model
40
+ * to repeatedly use the same words or phrases.
41
+ *
42
+ * The frequency penalty is a number between -1 (increase repetition)
43
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
41
44
  */
42
45
  frequencyPenalty?: number;
43
46
  /**
44
- * The seed to use for random sampling. If set and supported by the model,
45
- * calls will generate deterministic results.
47
+ * The seed (integer) to use for random sampling. If set and supported
48
+ * by the model, calls will generate deterministic results.
46
49
  */
47
50
  seed?: number;
48
51
  };
49
52
 
50
- /**
51
- * Warning from the model provider for this call. The call will proceed, but e.g.
52
- * some settings might not be supported, which can lead to suboptimal results.
53
- */
54
- type LanguageModelV1CallWarning = {
55
- type: 'unsupported-setting';
56
- setting: keyof LanguageModelV1CallSettings;
57
- } | {
58
- type: 'other';
59
- message: string;
60
- };
61
-
62
- type JsonSchema = Record<string, unknown>;
63
-
64
53
  /**
65
54
  * A tool has a name, a description, and a set of parameters.
66
55
  *
@@ -174,7 +163,48 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
174
163
  prompt: LanguageModelV1Prompt;
175
164
  };
176
165
 
177
- interface LanguageModel {
166
+ /**
167
+ * Warning from the model provider for this call. The call will proceed, but e.g.
168
+ * some settings might not be supported, which can lead to suboptimal results.
169
+ */
170
+ type LanguageModelV1CallWarning = {
171
+ type: 'unsupported-setting';
172
+ setting: keyof LanguageModelV1CallSettings;
173
+ } | {
174
+ type: 'other';
175
+ message: string;
176
+ };
177
+
178
+ type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
179
+
180
+ type LanguageModelV1FunctionToolCall = {
181
+ toolCallType: 'function';
182
+ toolCallId: string;
183
+ toolName: string;
184
+ /**
185
+ * Stringified JSON object with the tool call arguments. Must match the
186
+ * parameters schema of the tool.
187
+ */
188
+ args: string;
189
+ };
190
+
191
+ type LanguageModelV1 = {
192
+ /**
193
+ * The language model must specify which language model interface
194
+ * version it implements. This will allow us to evolve the language
195
+ * model interface and retain backwards compatibility. The different
196
+ * implementation versions can be handled as a discriminated union
197
+ * on our side.
198
+ */
199
+ readonly specificationVersion: 'v1';
200
+ /**
201
+ * Name of the provider for logging purposes.
202
+ */
203
+ readonly provider: string;
204
+ /**
205
+ * Provider-specific model ID for logging purposes.
206
+ */
207
+ readonly modelId: string;
178
208
  /**
179
209
  * Default object generation mode that should be used with this model when
180
210
  * no mode is specified. Should be the mode with the best results for this
@@ -184,152 +214,83 @@ interface LanguageModel {
184
214
  * user to explicitly specify the object generation mode.
185
215
  */
186
216
  readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
217
+ /**
218
+ * Generates a language model output (non-streaming).
219
+ *
220
+ * Naming: "do" prefix to prevent accidental direct usage of the method
221
+ * by the user.
222
+ */
187
223
  doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
224
+ /**
225
+ * Text that the model has generated. Can be undefined if the model
226
+ * has only generated tool calls.
227
+ */
188
228
  text?: string;
189
- toolCalls?: Array<LanguageModelToolCall>;
190
- warnings: LanguageModelV1CallWarning[];
229
+ /**
230
+ * Tool calls that the model has generated. Can be undefined if the
231
+ * model has only generated text.
232
+ */
233
+ toolCalls?: Array<LanguageModelV1FunctionToolCall>;
234
+ warnings?: LanguageModelV1CallWarning[];
191
235
  }>;
236
+ /**
237
+ * Generates a language model output (streaming).
238
+ *
239
+ * Naming: "do" prefix to prevent accidental direct usage of the method
240
+ * by the user.
241
+ *
242
+ * @return A stream of higher-level language model output parts.
243
+ */
192
244
  doStream(options: LanguageModelV1CallOptions): PromiseLike<{
193
- stream: ReadableStream<LanguageModelStreamPart>;
194
- warnings: LanguageModelV1CallWarning[];
245
+ stream: ReadableStream<LanguageModelV1StreamPart>;
246
+ warnings?: LanguageModelV1CallWarning[];
195
247
  }>;
196
- }
197
- type ErrorStreamPart = {
198
- type: 'error';
199
- error: unknown;
200
248
  };
201
- type LanguageModelToolCall = {
202
- toolCallId: string;
203
- toolName: string;
204
- args: string;
205
- };
206
- type ToolCallStreamPart = {
249
+ type LanguageModelV1StreamPart = {
250
+ type: 'text-delta';
251
+ textDelta: string;
252
+ } | ({
207
253
  type: 'tool-call';
208
- } & LanguageModelToolCall;
209
- type ToolCallDeltaStreamPart = {
254
+ } & LanguageModelV1FunctionToolCall) | {
210
255
  type: 'tool-call-delta';
211
256
  toolCallId: string;
212
257
  toolName: string;
213
258
  argsTextDelta: string;
259
+ } | {
260
+ type: 'finish-metadata';
261
+ finishReason: LanguageModelV1FinishReason;
262
+ usage: {
263
+ promptTokens: number;
264
+ completionTokens: number;
265
+ };
266
+ } | {
267
+ type: 'error';
268
+ error: unknown;
214
269
  };
215
- type TextDeltaStreamPart = {
216
- type: 'text-delta';
217
- textDelta: string;
218
- };
219
- type LanguageModelStreamPart = TextDeltaStreamPart | ToolCallDeltaStreamPart | ToolCallStreamPart | ErrorStreamPart;
220
270
 
221
- declare class OpenAIChatLanguageModel<SETTINGS> implements LanguageModel {
222
- readonly settings: SETTINGS;
271
+ type Config$1<SETTINGS extends {
272
+ id: string;
273
+ }> = {
274
+ provider: string;
275
+ baseUrl: string;
276
+ apiKey: () => string;
277
+ mapSettings: (settings: SETTINGS) => Record<string, unknown> & {
278
+ model: string;
279
+ };
280
+ };
281
+ declare class OpenAIChatLanguageModel<SETTINGS extends {
282
+ id: string;
283
+ }> implements LanguageModelV1 {
284
+ readonly specificationVersion = "v1";
223
285
  readonly defaultObjectGenerationMode = "tool";
224
- private readonly getClient;
225
- private readonly mapSettings;
226
- constructor(settings: SETTINGS, config: {
227
- client: () => Promise<OpenAI>;
228
- mapSettings: (settings: SETTINGS) => Record<string, unknown> & {
229
- model: string;
230
- };
231
- });
232
- private get basePrompt();
233
- private getArgs;
234
- doGenerate(options: Parameters<LanguageModel['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModel['doGenerate']>>>;
235
- doStream(options: Parameters<LanguageModel['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModel['doStream']>>>;
236
- }
237
-
238
- type FireworksChatModelId = `accounts/${string}/models/${string}`;
239
- /**
240
- * @see https://readme.fireworks.ai/reference/createchatcompletion
241
- */
242
- interface FireworksChatSettings {
243
- /**
244
- * The ID of the model to use.
245
- */
246
- id: FireworksChatModelId;
247
- /**
248
- * The size to which to truncate chat prompts. Earlier user/assistant messages will be
249
- * evicted to fit the prompt into this length.
250
- *
251
- * This should usually be set to a number << the max context size of the model, to allow
252
- * enough remaining tokens for generating a response.
253
- *
254
- * If omitted, you may receive "prompt too long" errors in your responses as
255
- * conversations grow. Note that even with this set, you may still receive "prompt too long"
256
- * errors if individual messages are too long for the model context window.
257
- */
258
- promptTruncateLength?: number;
259
- /**
260
- * Top-k sampling is another sampling method where the k most probable next tokens are filtered
261
- * and the probability mass is redistributed among only those k next tokens. The value of k
262
- * controls the number of candidates for the next token at each step during text generation.
263
- */
264
- topK?: number;
265
- /**
266
- * What to do if the token count of prompt plus max_tokens exceeds the model's context window.
267
- *
268
- * Passing truncate limits the max_tokens to at most context_window_length - prompt_length.
269
- * This is the default.
270
- *
271
- * Passing error would trigger a request error.
272
- *
273
- * The default of 'truncate' is selected as it allows to ask for high max_tokens value while
274
- * respecting the context window length without having to do client-side prompt tokenization.
275
- *
276
- * Note, that it differs from OpenAI's behavior that matches that of error.
277
- */
278
- contextLengthExceededBehavior?: 'truncate' | 'error';
279
- }
280
-
281
- declare function chat$3(settings: Omit<FireworksChatSettings, 'client'> & {
282
- client?: OpenAI;
283
- apiKey?: string;
284
- }): OpenAIChatLanguageModel<{
285
- id: `accounts/${string}/models/${string}`;
286
- promptTruncateLength?: number | undefined;
287
- topK?: number | undefined;
288
- contextLengthExceededBehavior?: "error" | "truncate" | undefined;
289
- }>;
290
-
291
- declare namespace fireworksFacade {
292
- export {
293
- chat$3 as chat,
294
- };
295
- }
296
-
297
- type MistralChatModelId = 'open-mistral-7b' | 'open-mixtral-8x7b' | 'mistral-small-latest' | 'mistral-medium-latest' | 'mistral-large-latest' | (string & {});
298
- interface MistralChatSettings {
299
- /**
300
- * The ID of the model to use.
301
- */
302
- id: MistralChatModelId;
303
- /**
304
- * Whether to inject a safety prompt before all conversations.
305
- *
306
- * Default: false
307
- */
308
- safePrompt?: boolean;
309
- }
310
-
311
- declare class MistralChatLanguageModel implements LanguageModel {
312
- readonly settings: MistralChatSettings;
313
- readonly defaultObjectGenerationMode = "json";
314
- private readonly getClient;
315
- constructor(settings: MistralChatSettings, config: {
316
- client: () => Promise<MistralClient>;
317
- });
318
- private get basePrompt();
286
+ readonly settings: SETTINGS;
287
+ private readonly config;
288
+ constructor(settings: SETTINGS, config: Config$1<SETTINGS>);
289
+ get provider(): string;
290
+ get modelId(): string;
319
291
  private getArgs;
320
- doGenerate(options: Parameters<LanguageModel['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModel['doGenerate']>>>;
321
- doStream(options: Parameters<LanguageModel['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModel['doStream']>>>;
322
- }
323
-
324
- declare function chat$2(settings: Omit<MistralChatSettings, 'client'> & {
325
- client?: MistralClient;
326
- apiKey?: string;
327
- }): MistralChatLanguageModel;
328
-
329
- declare namespace mistralFacade {
330
- export {
331
- chat$2 as chat,
332
- };
292
+ doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
293
+ doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
333
294
  }
334
295
 
335
296
  type OpenAIChatModelId = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-4-vision-preview' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | (string & {});
@@ -341,47 +302,49 @@ interface OpenAIChatSettings {
341
302
  logitBias?: Record<number, number>;
342
303
  }
343
304
 
344
- declare function chat$1(settings: OpenAIChatSettings & {
345
- client?: OpenAI;
346
- apiKey?: string;
347
- }): OpenAIChatLanguageModel<OpenAIChatSettings>;
348
-
349
- declare namespace openaiFacade {
350
- export {
351
- chat$1 as chat,
352
- };
305
+ type Config<SETTINGS extends {
306
+ id: string;
307
+ }> = {
308
+ provider: string;
309
+ baseUrl: string;
310
+ apiKey: () => string;
311
+ mapSettings: (settings: SETTINGS) => Record<string, unknown> & {
312
+ model: string;
313
+ };
314
+ };
315
+ declare class OpenAICompletionLanguageModel<SETTINGS extends {
316
+ id: string;
317
+ }> implements LanguageModelV1 {
318
+ readonly specificationVersion = "v1";
319
+ readonly defaultObjectGenerationMode: undefined;
320
+ readonly settings: SETTINGS;
321
+ private readonly config;
322
+ constructor(settings: SETTINGS, config: Config<SETTINGS>);
323
+ get provider(): string;
324
+ get modelId(): string;
325
+ private getArgs;
326
+ doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
327
+ doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
353
328
  }
354
329
 
355
- type PerplexityChatModelId = 'sonar-small-chat' | 'sonar-small-online' | 'sonar-medium-chat' | 'sonar-medium-online' | 'mistral-7b-instruct' | 'mixtral-8x7b-instruct' | (string & {});
356
- /**
357
- * @see https://docs.perplexity.ai/reference/post_chat_completions
358
- */
359
- interface PerplexityChatSettings {
330
+ type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
331
+ interface OpenAICompletionSettings {
360
332
  /**
361
333
  * The ID of the model to use.
362
334
  */
363
- id: PerplexityChatModelId;
364
- /**
365
- * The number of tokens to keep for highest top-k filtering, specified as an
366
- * integer between 0 and 2048 inclusive. If set to 0, top-k filtering is disabled.
367
- * We recommend either altering top_k or top_p, but not both.
368
- */
369
- topK?: number;
335
+ id: OpenAICompletionModelId;
336
+ logitBias?: Record<number, number>;
370
337
  }
371
338
 
372
- declare function chat(settings: PerplexityChatSettings & {
373
- client?: OpenAI;
374
- apiKey?: string;
375
- }): OpenAIChatLanguageModel<{
376
- id: PerplexityChatModelId;
377
- topK?: number | undefined;
378
- }>;
379
-
380
- declare const perplexityFacade_chat: typeof chat;
381
- declare namespace perplexityFacade {
382
- export {
383
- perplexityFacade_chat as chat,
384
- };
339
+ declare class OpenAI {
340
+ readonly baseUrl?: string;
341
+ readonly apiKey?: string;
342
+ constructor({ baseUrl, apiKey }?: {
343
+ baseUrl?: string;
344
+ apiKey?: string;
345
+ });
346
+ chat(settings: OpenAIChatSettings): OpenAIChatLanguageModel<OpenAIChatSettings>;
347
+ completion(settings: OpenAICompletionSettings): OpenAICompletionLanguageModel<OpenAICompletionSettings>;
385
348
  }
386
349
 
387
- export { fireworksFacade as fireworks, mistralFacade as mistral, openaiFacade as openai, perplexityFacade as perplexity };
350
+ export { OpenAI };