ai 3.1.0-canary.1 → 3.1.0-canary.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/ai-model-specification/dist/index.d.mts +606 -0
  2. package/ai-model-specification/dist/index.d.ts +606 -0
  3. package/ai-model-specification/dist/index.js +617 -0
  4. package/ai-model-specification/dist/index.js.map +1 -0
  5. package/ai-model-specification/dist/index.mjs +560 -0
  6. package/ai-model-specification/dist/index.mjs.map +1 -0
  7. package/core/dist/index.d.mts +195 -85
  8. package/core/dist/index.d.ts +195 -85
  9. package/core/dist/index.js +497 -501
  10. package/core/dist/index.js.map +1 -1
  11. package/core/dist/index.mjs +497 -499
  12. package/core/dist/index.mjs.map +1 -1
  13. package/dist/index.d.mts +5 -2
  14. package/dist/index.d.ts +5 -2
  15. package/dist/index.js +39 -1
  16. package/dist/index.js.map +1 -1
  17. package/dist/index.mjs +39 -1
  18. package/dist/index.mjs.map +1 -1
  19. package/package.json +16 -7
  20. package/prompts/dist/index.d.mts +32 -19
  21. package/prompts/dist/index.d.ts +32 -19
  22. package/prompts/dist/index.js +0 -1
  23. package/prompts/dist/index.js.map +1 -1
  24. package/prompts/dist/index.mjs +0 -1
  25. package/prompts/dist/index.mjs.map +1 -1
  26. package/provider/dist/index.d.mts +232 -190
  27. package/provider/dist/index.d.ts +232 -190
  28. package/provider/dist/index.js +838 -26131
  29. package/provider/dist/index.js.map +1 -1
  30. package/provider/dist/index.mjs +806 -7735
  31. package/provider/dist/index.mjs.map +1 -1
  32. package/react/dist/index.d.mts +4 -4
  33. package/react/dist/index.d.ts +4 -4
  34. package/react/dist/index.js +16 -1
  35. package/react/dist/index.js.map +1 -1
  36. package/react/dist/index.mjs +16 -1
  37. package/react/dist/index.mjs.map +1 -1
  38. package/rsc/dist/index.d.ts +11 -0
  39. package/rsc/dist/rsc-server.d.mts +11 -0
  40. package/rsc/dist/rsc-server.mjs +21 -21
  41. package/rsc/dist/rsc-server.mjs.map +1 -1
  42. package/rsc/dist/rsc-shared.mjs +21 -1
  43. package/rsc/dist/rsc-shared.mjs.map +1 -1
  44. package/provider/dist/chunk-3DTRVHCT.mjs +0 -5046
  45. package/provider/dist/chunk-3DTRVHCT.mjs.map +0 -1
  46. package/provider/dist/chunk-4OUDS3CP.mjs +0 -30
  47. package/provider/dist/chunk-4OUDS3CP.mjs.map +0 -1
  48. package/provider/dist/chunk-5IYCPJBV.mjs +0 -56
  49. package/provider/dist/chunk-5IYCPJBV.mjs.map +0 -1
  50. package/provider/dist/chunk-VB2TCVQ4.mjs +0 -6746
  51. package/provider/dist/chunk-VB2TCVQ4.mjs.map +0 -1
  52. package/provider/dist/chunk-VYIXVZ6L.mjs +0 -317
  53. package/provider/dist/chunk-VYIXVZ6L.mjs.map +0 -1
  54. package/provider/dist/chunk-WTOUHN6A.mjs +0 -2251
  55. package/provider/dist/chunk-WTOUHN6A.mjs.map +0 -1
  56. package/provider/dist/client-22WAAXR7.mjs +0 -10
  57. package/provider/dist/client-22WAAXR7.mjs.map +0 -1
  58. package/provider/dist/fileFromPath-23RINPB2.mjs +0 -115
  59. package/provider/dist/fileFromPath-23RINPB2.mjs.map +0 -1
  60. package/provider/dist/lib-BZMMM4HX.mjs +0 -20
  61. package/provider/dist/lib-BZMMM4HX.mjs.map +0 -1
  62. package/provider/dist/openai-3YL4AWLI.mjs +0 -3451
  63. package/provider/dist/openai-3YL4AWLI.mjs.map +0 -1
@@ -1,6 +1,8 @@
1
1
  import { z } from 'zod';
2
2
  import { PartialDeep, ValueOf } from 'type-fest';
3
3
 
4
+ type JsonSchema = Record<string, unknown>;
5
+
4
6
  type LanguageModelV1CallSettings = {
5
7
  /**
6
8
  * Maximum number of tokens to generate.
@@ -29,38 +31,32 @@ type LanguageModelV1CallSettings = {
29
31
  */
30
32
  topP?: number;
31
33
  /**
32
- * Presence penalty setting. This is a number between 0 (no penalty)
33
- * and 1 (maximum penalty). It affects the likelihood of the model to repeat
34
- * information that is already in the prompt.
34
+ * Presence penalty setting. It affects the likelihood of the model to
35
+ * repeat information that is already in the prompt.
36
+ *
37
+ * The presence penalty is a number between -1 (increase repetition)
38
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
35
39
  */
36
40
  presencePenalty?: number;
37
41
  /**
38
- * Frequency penalty setting. This is a number between 0 (no penalty)
39
- * and 1 (maximum penalty). It affects the likelihood of the model to repeatedly
40
- * use the same words or phrases.
42
+ * Frequency penalty setting. It affects the likelihood of the model
43
+ * to repeatedly use the same words or phrases.
44
+ *
45
+ * The frequency penalty is a number between -1 (increase repetition)
46
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
41
47
  */
42
48
  frequencyPenalty?: number;
43
49
  /**
44
- * The seed to use for random sampling. If set and supported by the model,
45
- * calls will generate deterministic results.
50
+ * The seed (integer) to use for random sampling. If set and supported
51
+ * by the model, calls will generate deterministic results.
46
52
  */
47
53
  seed?: number;
54
+ /**
55
+ * Abort signal for cancelling the operation.
56
+ */
57
+ abortSignal?: AbortSignal;
48
58
  };
49
59
 
50
- /**
51
- * Warning from the model provider for this call. The call will proceed, but e.g.
52
- * some settings might not be supported, which can lead to suboptimal results.
53
- */
54
- type LanguageModelV1CallWarning = {
55
- type: 'unsupported-setting';
56
- setting: keyof LanguageModelV1CallSettings;
57
- } | {
58
- type: 'other';
59
- message: string;
60
- };
61
-
62
- type JsonSchema = Record<string, unknown>;
63
-
64
60
  /**
65
61
  * A tool has a name, a description, and a set of parameters.
66
62
  *
@@ -114,9 +110,9 @@ interface LanguageModelV1TextPart {
114
110
  interface LanguageModelV1ImagePart {
115
111
  type: 'image';
116
112
  /**
117
- * Image data as a Uint8Array.
113
+ * Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL.
118
114
  */
119
- image: Uint8Array;
115
+ image: Uint8Array | URL;
120
116
  /**
121
117
  * Optional mime type of the image.
122
118
  */
@@ -174,7 +170,48 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
174
170
  prompt: LanguageModelV1Prompt;
175
171
  };
176
172
 
177
- interface LanguageModel {
173
+ /**
174
+ * Warning from the model provider for this call. The call will proceed, but e.g.
175
+ * some settings might not be supported, which can lead to suboptimal results.
176
+ */
177
+ type LanguageModelV1CallWarning = {
178
+ type: 'unsupported-setting';
179
+ setting: keyof LanguageModelV1CallSettings;
180
+ } | {
181
+ type: 'other';
182
+ message: string;
183
+ };
184
+
185
+ type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
186
+
187
+ type LanguageModelV1FunctionToolCall = {
188
+ toolCallType: 'function';
189
+ toolCallId: string;
190
+ toolName: string;
191
+ /**
192
+ * Stringified JSON object with the tool call arguments. Must match the
193
+ * parameters schema of the tool.
194
+ */
195
+ args: string;
196
+ };
197
+
198
+ type LanguageModelV1 = {
199
+ /**
200
+ * The language model must specify which language model interface
201
+ * version it implements. This will allow us to evolve the language
202
+ * model interface and retain backwards compatibility. The different
203
+ * implementation versions can be handled as a discriminated union
204
+ * on our side.
205
+ */
206
+ readonly specificationVersion: 'v1';
207
+ /**
208
+ * Name of the provider for logging purposes.
209
+ */
210
+ readonly provider: string;
211
+ /**
212
+ * Provider-specific model ID for logging purposes.
213
+ */
214
+ readonly modelId: string;
178
215
  /**
179
216
  * Default object generation mode that should be used with this model when
180
217
  * no mode is specified. Should be the mode with the best results for this
@@ -184,39 +221,106 @@ interface LanguageModel {
184
221
  * user to explicitly specify the object generation mode.
185
222
  */
186
223
  readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
224
+ /**
225
+ * Generates a language model output (non-streaming).
226
+ *
227
+ * Naming: "do" prefix to prevent accidental direct usage of the method
228
+ * by the user.
229
+ */
187
230
  doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
231
+ /**
232
+ * Text that the model has generated. Can be undefined if the model
233
+ * has only generated tool calls.
234
+ */
188
235
  text?: string;
189
- toolCalls?: Array<LanguageModelToolCall>;
190
- warnings: LanguageModelV1CallWarning[];
236
+ /**
237
+ * Tool calls that the model has generated. Can be undefined if the
238
+ * model has only generated text.
239
+ */
240
+ toolCalls?: Array<LanguageModelV1FunctionToolCall>;
241
+ /**
242
+ * Finish reason.
243
+ */
244
+ finishReason: LanguageModelV1FinishReason;
245
+ /**
246
+ * Usage information.
247
+ */
248
+ usage: {
249
+ promptTokens: number;
250
+ completionTokens: number;
251
+ };
252
+ /**
253
+ * Raw prompt and setting information for observability provider integration.
254
+ */
255
+ rawCall: {
256
+ /**
257
+ * Raw prompt after expansion and conversion to the format that the
258
+ * provider uses to send the information to their API.
259
+ */
260
+ rawPrompt: unknown;
261
+ /**
262
+ * Raw settings that are used for the API call. Includes provider-specific
263
+ * settings.
264
+ */
265
+ rawSettings: Record<string, unknown>;
266
+ };
267
+ warnings?: LanguageModelV1CallWarning[];
191
268
  }>;
269
+ /**
270
+ * Generates a language model output (streaming).
271
+ *
272
+ * Naming: "do" prefix to prevent accidental direct usage of the method
273
+ * by the user.
274
+ *
275
+ * @return A stream of higher-level language model output parts.
276
+ */
192
277
  doStream(options: LanguageModelV1CallOptions): PromiseLike<{
193
- stream: ReadableStream<LanguageModelStreamPart>;
194
- warnings: LanguageModelV1CallWarning[];
278
+ stream: ReadableStream<LanguageModelV1StreamPart>;
279
+ /**
280
+ * Raw prompt and setting information for observability provider integration.
281
+ */
282
+ rawCall: {
283
+ /**
284
+ * Raw prompt after expansion and conversion to the format that the
285
+ * provider uses to send the information to their API.
286
+ */
287
+ rawPrompt: unknown;
288
+ /**
289
+ * Raw settings that are used for the API call. Includes provider-specific
290
+ * settings.
291
+ */
292
+ rawSettings: Record<string, unknown>;
293
+ };
294
+ warnings?: LanguageModelV1CallWarning[];
195
295
  }>;
196
- }
197
- type ErrorStreamPart = {
198
- type: 'error';
199
- error: unknown;
200
296
  };
201
- type LanguageModelToolCall = {
202
- toolCallId: string;
203
- toolName: string;
204
- args: string;
205
- };
206
- type ToolCallStreamPart = {
297
+ type LanguageModelV1StreamPart = {
298
+ type: 'text-delta';
299
+ textDelta: string;
300
+ } | ({
207
301
  type: 'tool-call';
208
- } & LanguageModelToolCall;
209
- type ToolCallDeltaStreamPart = {
302
+ } & LanguageModelV1FunctionToolCall) | {
210
303
  type: 'tool-call-delta';
211
304
  toolCallId: string;
212
305
  toolName: string;
213
306
  argsTextDelta: string;
307
+ } | {
308
+ type: 'finish-metadata';
309
+ finishReason: LanguageModelV1FinishReason;
310
+ usage: {
311
+ promptTokens: number;
312
+ completionTokens: number;
313
+ };
314
+ } | {
315
+ type: 'error';
316
+ error: unknown;
214
317
  };
215
- type TextDeltaStreamPart = {
216
- type: 'text-delta';
217
- textDelta: string;
318
+
319
+ type TokenUsage = {
320
+ promptTokens: number;
321
+ completionTokens: number;
322
+ totalTokens: number;
218
323
  };
219
- type LanguageModelStreamPart = TextDeltaStreamPart | ToolCallDeltaStreamPart | ToolCallStreamPart | ErrorStreamPart;
220
324
 
221
325
  type CallSettings = {
222
326
  /**
@@ -240,22 +344,34 @@ type CallSettings = {
240
344
  */
241
345
  topP?: number;
242
346
  /**
243
- * Presence penalty setting. This is a number between 0 (no penalty)
244
- * and 1 (maximum penalty). It affects the likelihood of the model to repeat
245
- * information that is already in the prompt.
347
+ * Presence penalty setting. It affects the likelihood of the model to
348
+ * repeat information that is already in the prompt.
349
+ *
350
+ * The presence penalty is a number between -1 (increase repetition)
351
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
246
352
  */
247
353
  presencePenalty?: number;
248
354
  /**
249
- * Frequency penalty setting. This is a number between 0 (no penalty)
250
- * and 1 (maximum penalty). It affects the likelihood of the model to
251
- * repeatedly use the same words or phrases.
355
+ * Frequency penalty setting. It affects the likelihood of the model
356
+ * to repeatedly use the same words or phrases.
357
+ *
358
+ * The frequency penalty is a number between -1 (increase repetition)
359
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
252
360
  */
253
361
  frequencyPenalty?: number;
254
362
  /**
255
- * The seed to use for random sampling. If set and supported by the model,
256
- * calls will generate deterministic results.
363
+ * The seed (integer) to use for random sampling. If set and supported
364
+ * by the model, calls will generate deterministic results.
257
365
  */
258
366
  seed?: number;
367
+ /**
368
+ * Maximum number of retries. Set to 0 to disable retries. Default is 2.
369
+ */
370
+ maxRetries?: number;
371
+ /**
372
+ * Abort signal.
373
+ */
374
+ abortSignal?: AbortSignal;
259
375
  };
260
376
 
261
377
  /**
@@ -275,9 +391,12 @@ interface TextPart {
275
391
  interface ImagePart {
276
392
  type: 'image';
277
393
  /**
278
- * Image data. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
394
+ * Image data. Can either be:
395
+ *
396
+ * - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
397
+ * - URL: a URL that points to the image
279
398
  */
280
- image: DataContent;
399
+ image: DataContent | URL;
281
400
  /**
282
401
  * Optional mime type of the image.
283
402
  */
@@ -322,23 +441,27 @@ type Prompt = {
322
441
  /**
323
442
  * Generate a structured, typed object using a language model.
324
443
  */
325
- declare function generateObject<T>({ model, schema: zodSchema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
326
- model: LanguageModel;
444
+ declare function generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
445
+ model: LanguageModelV1;
327
446
  schema: z.Schema<T>;
328
447
  mode?: 'auto' | 'json' | 'tool' | 'grammar';
329
448
  }): Promise<GenerateObjectResult<T>>;
330
449
  declare class GenerateObjectResult<T> {
331
450
  readonly object: T;
451
+ readonly finishReason: LanguageModelV1FinishReason;
452
+ readonly usage: TokenUsage;
332
453
  constructor(options: {
333
454
  object: T;
455
+ finishReason: LanguageModelV1FinishReason;
456
+ usage: TokenUsage;
334
457
  });
335
458
  }
336
459
 
337
460
  /**
338
461
  * Stream an object as a partial object stream.
339
462
  */
340
- declare function streamObject<T>({ model, schema: zodSchema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
341
- model: LanguageModel;
463
+ declare function streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
464
+ model: LanguageModelV1;
342
465
  schema: z.Schema<T>;
343
466
  mode?: 'auto' | 'json' | 'tool' | 'grammar';
344
467
  }): Promise<StreamObjectResult<T>>;
@@ -348,6 +471,10 @@ declare class StreamObjectResult<T> {
348
471
  }>>;
349
472
  constructor(modelStream: ReadableStream<string | ErrorStreamPart>);
350
473
  }
474
+ type ErrorStreamPart = {
475
+ type: 'error';
476
+ error: unknown;
477
+ };
351
478
 
352
479
  /**
353
480
  * A tool contains the description and the schema of the input that the tool expects.
@@ -416,30 +543,30 @@ type ToToolResultArray<TOOLS extends Record<string, Tool>> = Array<ToToolResult<
416
543
  /**
417
544
  * Generate a text and call tools using a language model.
418
545
  */
419
- declare function generateText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, ...settings }: CallSettings & Prompt & {
420
- model: LanguageModel;
546
+ declare function generateText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
547
+ model: LanguageModelV1;
421
548
  tools?: TOOLS;
422
549
  }): Promise<GenerateTextResult<TOOLS>>;
423
550
  declare class GenerateTextResult<TOOLS extends Record<string, Tool>> {
424
551
  readonly text: string;
425
552
  readonly toolCalls: ToToolCallArray<TOOLS>;
426
553
  readonly toolResults: ToToolResultArray<TOOLS>;
554
+ readonly finishReason: LanguageModelV1FinishReason;
555
+ readonly usage: TokenUsage;
427
556
  constructor(options: {
428
557
  text: string;
429
558
  toolCalls: ToToolCallArray<TOOLS>;
430
559
  toolResults: ToToolResultArray<TOOLS>;
560
+ finishReason: LanguageModelV1FinishReason;
561
+ usage: TokenUsage;
431
562
  });
432
563
  }
433
564
 
434
- declare class StreamTextHttpResponse extends Response {
435
- constructor(messageStream: ReadableStream<TextStreamPart<any>>);
436
- }
437
-
438
565
  /**
439
566
  * Stream text generated by a language model.
440
567
  */
441
- declare function streamText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, ...settings }: CallSettings & Prompt & {
442
- model: LanguageModel;
568
+ declare function streamText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
569
+ model: LanguageModelV1;
443
570
  tools?: TOOLS;
444
571
  }): Promise<StreamTextResult<TOOLS>>;
445
572
  type TextStreamPart<TOOLS extends Record<string, Tool>> = {
@@ -458,23 +585,6 @@ declare class StreamTextResult<TOOLS extends Record<string, Tool>> {
458
585
  readonly textStream: AsyncIterable<string>;
459
586
  readonly fullStream: AsyncIterable<TextStreamPart<TOOLS>>;
460
587
  constructor(stream: ReadableStream<TextStreamPart<TOOLS>>);
461
- toResponse(): StreamTextHttpResponse;
462
- }
463
-
464
- declare class UnsupportedFunctionalityError extends Error {
465
- readonly functionality: string;
466
- readonly provider: string;
467
- constructor({ provider, functionality, }: {
468
- provider: string;
469
- functionality: string;
470
- });
471
- toJSON(): {
472
- name: string;
473
- message: string;
474
- stack: string | undefined;
475
- provider: string;
476
- functionality: string;
477
- };
478
588
  }
479
589
 
480
- export { AssistantContent, AssistantMessage, DataContent, ErrorStreamPart, GenerateObjectResult, GenerateTextResult, ImagePart, LanguageModel, LanguageModelStreamPart, LanguageModelToolCall, LanguageModelV1ImagePart, LanguageModelV1Message, LanguageModelV1Prompt, LanguageModelV1TextPart, LanguageModelV1ToolCallPart, LanguageModelV1ToolResultPart, Message, StreamObjectResult, StreamTextHttpResponse, StreamTextResult, TextPart, TextStreamPart, Tool, ToolCallPart, ToolContent, ToolMessage, ToolResultPart, UnsupportedFunctionalityError, UserContent, UserMessage, convertDataContentToBase64String, convertDataContentToUint8Array, generateObject, generateText, streamObject, streamText, tool };
590
+ export { AssistantContent, AssistantMessage, DataContent, ErrorStreamPart, GenerateObjectResult, GenerateTextResult, ImagePart, Message, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, Tool, ToolCallPart, ToolContent, ToolMessage, ToolResultPart, UserContent, UserMessage, convertDataContentToBase64String, convertDataContentToUint8Array, generateObject, generateText, streamObject, streamText, tool };