ai 3.1.0-canary.0 → 3.1.0-canary.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/ai-model-specification/dist/index.d.mts +539 -0
  2. package/ai-model-specification/dist/index.d.ts +539 -0
  3. package/ai-model-specification/dist/index.js +581 -0
  4. package/ai-model-specification/dist/index.js.map +1 -0
  5. package/ai-model-specification/dist/index.mjs +526 -0
  6. package/ai-model-specification/dist/index.mjs.map +1 -0
  7. package/core/dist/index.d.mts +122 -77
  8. package/core/dist/index.d.ts +122 -77
  9. package/core/dist/index.js +266 -174
  10. package/core/dist/index.js.map +1 -1
  11. package/core/dist/index.mjs +266 -173
  12. package/core/dist/index.mjs.map +1 -1
  13. package/dist/index.d.mts +3 -1
  14. package/dist/index.d.ts +3 -1
  15. package/dist/index.js +39 -1
  16. package/dist/index.js.map +1 -1
  17. package/dist/index.mjs +39 -1
  18. package/dist/index.mjs.map +1 -1
  19. package/package.json +14 -4
  20. package/prompts/dist/index.d.mts +32 -19
  21. package/prompts/dist/index.d.ts +32 -19
  22. package/prompts/dist/index.js +0 -1
  23. package/prompts/dist/index.js.map +1 -1
  24. package/prompts/dist/index.mjs +0 -1
  25. package/prompts/dist/index.mjs.map +1 -1
  26. package/provider/dist/index.d.mts +154 -191
  27. package/provider/dist/index.d.ts +154 -191
  28. package/provider/dist/index.js +800 -26131
  29. package/provider/dist/index.js.map +1 -1
  30. package/provider/dist/index.mjs +770 -7736
  31. package/provider/dist/index.mjs.map +1 -1
  32. package/react/dist/index.js +16 -1
  33. package/react/dist/index.js.map +1 -1
  34. package/react/dist/index.mjs +16 -1
  35. package/react/dist/index.mjs.map +1 -1
  36. package/rsc/dist/index.d.ts +11 -0
  37. package/rsc/dist/rsc-server.d.mts +11 -0
  38. package/rsc/dist/rsc-server.mjs +21 -21
  39. package/rsc/dist/rsc-server.mjs.map +1 -1
  40. package/rsc/dist/rsc-shared.mjs +21 -1
  41. package/rsc/dist/rsc-shared.mjs.map +1 -1
  42. package/provider/dist/chunk-3DTRVHCT.mjs +0 -5046
  43. package/provider/dist/chunk-3DTRVHCT.mjs.map +0 -1
  44. package/provider/dist/chunk-4OUDS3CP.mjs +0 -30
  45. package/provider/dist/chunk-4OUDS3CP.mjs.map +0 -1
  46. package/provider/dist/chunk-5IYCPJBV.mjs +0 -56
  47. package/provider/dist/chunk-5IYCPJBV.mjs.map +0 -1
  48. package/provider/dist/chunk-VB2TCVQ4.mjs +0 -6746
  49. package/provider/dist/chunk-VB2TCVQ4.mjs.map +0 -1
  50. package/provider/dist/chunk-VYIXVZ6L.mjs +0 -317
  51. package/provider/dist/chunk-VYIXVZ6L.mjs.map +0 -1
  52. package/provider/dist/chunk-WTOUHN6A.mjs +0 -2251
  53. package/provider/dist/chunk-WTOUHN6A.mjs.map +0 -1
  54. package/provider/dist/client-22WAAXR7.mjs +0 -10
  55. package/provider/dist/client-22WAAXR7.mjs.map +0 -1
  56. package/provider/dist/fileFromPath-23RINPB2.mjs +0 -115
  57. package/provider/dist/fileFromPath-23RINPB2.mjs.map +0 -1
  58. package/provider/dist/lib-BZMMM4HX.mjs +0 -20
  59. package/provider/dist/lib-BZMMM4HX.mjs.map +0 -1
  60. package/provider/dist/openai-3YL4AWLI.mjs +0 -3451
  61. package/provider/dist/openai-3YL4AWLI.mjs.map +0 -1
@@ -1,6 +1,8 @@
1
1
  import { z } from 'zod';
2
2
  import { PartialDeep, ValueOf } from 'type-fest';
3
3
 
4
+ type JsonSchema = Record<string, unknown>;
5
+
4
6
  type LanguageModelV1CallSettings = {
5
7
  /**
6
8
  * Maximum number of tokens to generate.
@@ -29,38 +31,28 @@ type LanguageModelV1CallSettings = {
29
31
  */
30
32
  topP?: number;
31
33
  /**
32
- * Presence penalty setting. This is a number between 0 (no penalty)
33
- * and 1 (maximum penalty). It affects the likelihood of the model to repeat
34
- * information that is already in the prompt.
34
+ * Presence penalty setting. It affects the likelihood of the model to
35
+ * repeat information that is already in the prompt.
36
+ *
37
+ * The presence penalty is a number between -1 (increase repetition)
38
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
35
39
  */
36
40
  presencePenalty?: number;
37
41
  /**
38
- * Frequency penalty setting. This is a number between 0 (no penalty)
39
- * and 1 (maximum penalty). It affects the likelihood of the model to repeatedly
40
- * use the same words or phrases.
42
+ * Frequency penalty setting. It affects the likelihood of the model
43
+ * to repeatedly use the same words or phrases.
44
+ *
45
+ * The frequency penalty is a number between -1 (increase repetition)
46
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
41
47
  */
42
48
  frequencyPenalty?: number;
43
49
  /**
44
- * The seed to use for random sampling. If set and supported by the model,
45
- * calls will generate deterministic results.
50
+ * The seed (integer) to use for random sampling. If set and supported
51
+ * by the model, calls will generate deterministic results.
46
52
  */
47
53
  seed?: number;
48
54
  };
49
55
 
50
- /**
51
- * Warning from the model provider for this call. The call will proceed, but e.g.
52
- * some settings might not be supported, which can lead to suboptimal results.
53
- */
54
- type LanguageModelV1CallWarning = {
55
- type: 'unsupported-setting';
56
- setting: keyof LanguageModelV1CallSettings;
57
- } | {
58
- type: 'other';
59
- message: string;
60
- };
61
-
62
- type JsonSchema = Record<string, unknown>;
63
-
64
56
  /**
65
57
  * A tool has a name, a description, and a set of parameters.
66
58
  *
@@ -174,7 +166,48 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
174
166
  prompt: LanguageModelV1Prompt;
175
167
  };
176
168
 
177
- interface LanguageModel {
169
+ /**
170
+ * Warning from the model provider for this call. The call will proceed, but e.g.
171
+ * some settings might not be supported, which can lead to suboptimal results.
172
+ */
173
+ type LanguageModelV1CallWarning = {
174
+ type: 'unsupported-setting';
175
+ setting: keyof LanguageModelV1CallSettings;
176
+ } | {
177
+ type: 'other';
178
+ message: string;
179
+ };
180
+
181
+ type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
182
+
183
+ type LanguageModelV1FunctionToolCall = {
184
+ toolCallType: 'function';
185
+ toolCallId: string;
186
+ toolName: string;
187
+ /**
188
+ * Stringified JSON object with the tool call arguments. Must match the
189
+ * parameters schema of the tool.
190
+ */
191
+ args: string;
192
+ };
193
+
194
+ type LanguageModelV1 = {
195
+ /**
196
+ * The language model must specify which language model interface
197
+ * version it implements. This will allow us to evolve the language
198
+ * model interface and retain backwards compatibility. The different
199
+ * implementation versions can be handled as a discriminated union
200
+ * on our side.
201
+ */
202
+ readonly specificationVersion: 'v1';
203
+ /**
204
+ * Name of the provider for logging purposes.
205
+ */
206
+ readonly provider: string;
207
+ /**
208
+ * Provider-specific model ID for logging purposes.
209
+ */
210
+ readonly modelId: string;
178
211
  /**
179
212
  * Default object generation mode that should be used with this model when
180
213
  * no mode is specified. Should be the mode with the best results for this
@@ -184,39 +217,59 @@ interface LanguageModel {
184
217
  * user to explicitly specify the object generation mode.
185
218
  */
186
219
  readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
220
+ /**
221
+ * Generates a language model output (non-streaming).
222
+ *
223
+ * Naming: "do" prefix to prevent accidental direct usage of the method
224
+ * by the user.
225
+ */
187
226
  doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
227
+ /**
228
+ * Text that the model has generated. Can be undefined if the model
229
+ * has only generated tool calls.
230
+ */
188
231
  text?: string;
189
- toolCalls?: Array<LanguageModelToolCall>;
190
- warnings: LanguageModelV1CallWarning[];
232
+ /**
233
+ * Tool calls that the model has generated. Can be undefined if the
234
+ * model has only generated text.
235
+ */
236
+ toolCalls?: Array<LanguageModelV1FunctionToolCall>;
237
+ warnings?: LanguageModelV1CallWarning[];
191
238
  }>;
239
+ /**
240
+ * Generates a language model output (streaming).
241
+ *
242
+ * Naming: "do" prefix to prevent accidental direct usage of the method
243
+ * by the user.
244
+ *
245
+ * @return A stream of higher-level language model output parts.
246
+ */
192
247
  doStream(options: LanguageModelV1CallOptions): PromiseLike<{
193
- stream: ReadableStream<LanguageModelStreamPart>;
194
- warnings: LanguageModelV1CallWarning[];
248
+ stream: ReadableStream<LanguageModelV1StreamPart>;
249
+ warnings?: LanguageModelV1CallWarning[];
195
250
  }>;
196
- }
197
- type ErrorStreamPart = {
198
- type: 'error';
199
- error: unknown;
200
- };
201
- type LanguageModelToolCall = {
202
- toolCallId: string;
203
- toolName: string;
204
- args: string;
205
251
  };
206
- type ToolCallStreamPart = {
252
+ type LanguageModelV1StreamPart = {
253
+ type: 'text-delta';
254
+ textDelta: string;
255
+ } | ({
207
256
  type: 'tool-call';
208
- } & LanguageModelToolCall;
209
- type ToolCallDeltaStreamPart = {
257
+ } & LanguageModelV1FunctionToolCall) | {
210
258
  type: 'tool-call-delta';
211
259
  toolCallId: string;
212
260
  toolName: string;
213
261
  argsTextDelta: string;
262
+ } | {
263
+ type: 'finish-metadata';
264
+ finishReason: LanguageModelV1FinishReason;
265
+ usage: {
266
+ promptTokens: number;
267
+ completionTokens: number;
268
+ };
269
+ } | {
270
+ type: 'error';
271
+ error: unknown;
214
272
  };
215
- type TextDeltaStreamPart = {
216
- type: 'text-delta';
217
- textDelta: string;
218
- };
219
- type LanguageModelStreamPart = TextDeltaStreamPart | ToolCallDeltaStreamPart | ToolCallStreamPart | ErrorStreamPart;
220
273
 
221
274
  type CallSettings = {
222
275
  /**
@@ -240,20 +293,24 @@ type CallSettings = {
240
293
  */
241
294
  topP?: number;
242
295
  /**
243
- * Presence penalty setting. This is a number between 0 (no penalty)
244
- * and 1 (maximum penalty). It affects the likelihood of the model to repeat
245
- * information that is already in the prompt.
296
+ * Presence penalty setting. It affects the likelihood of the model to
297
+ * repeat information that is already in the prompt.
298
+ *
299
+ * The presence penalty is a number between -1 (increase repetition)
300
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
246
301
  */
247
302
  presencePenalty?: number;
248
303
  /**
249
- * Frequency penalty setting. This is a number between 0 (no penalty)
250
- * and 1 (maximum penalty). It affects the likelihood of the model to
251
- * repeatedly use the same words or phrases.
304
+ * Frequency penalty setting. It affects the likelihood of the model
305
+ * to repeatedly use the same words or phrases.
306
+ *
307
+ * The frequency penalty is a number between -1 (increase repetition)
308
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
252
309
  */
253
310
  frequencyPenalty?: number;
254
311
  /**
255
- * The seed to use for random sampling. If set and supported by the model,
256
- * calls will generate deterministic results.
312
+ * The seed (integer) to use for random sampling. If set and supported
313
+ * by the model, calls will generate deterministic results.
257
314
  */
258
315
  seed?: number;
259
316
  };
@@ -322,10 +379,10 @@ type Prompt = {
322
379
  /**
323
380
  * Generate a structured, typed object using a language model.
324
381
  */
325
- declare function generateObject<T>({ model, schema: zodSchema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
326
- model: LanguageModel;
382
+ declare function generateObject<T>({ model, schema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
383
+ model: LanguageModelV1;
327
384
  schema: z.Schema<T>;
328
- mode?: 'json' | 'tool' | 'grammar';
385
+ mode?: 'auto' | 'json' | 'tool' | 'grammar';
329
386
  }): Promise<GenerateObjectResult<T>>;
330
387
  declare class GenerateObjectResult<T> {
331
388
  readonly object: T;
@@ -337,10 +394,10 @@ declare class GenerateObjectResult<T> {
337
394
  /**
338
395
  * Stream an object as a partial object stream.
339
396
  */
340
- declare function streamObject<T>({ model, schema: zodSchema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
341
- model: LanguageModel;
397
+ declare function streamObject<T>({ model, schema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
398
+ model: LanguageModelV1;
342
399
  schema: z.Schema<T>;
343
- mode?: 'json' | 'tool' | 'grammar';
400
+ mode?: 'auto' | 'json' | 'tool' | 'grammar';
344
401
  }): Promise<StreamObjectResult<T>>;
345
402
  declare class StreamObjectResult<T> {
346
403
  readonly objectStream: AsyncIterable<PartialDeep<T, {
@@ -348,6 +405,10 @@ declare class StreamObjectResult<T> {
348
405
  }>>;
349
406
  constructor(modelStream: ReadableStream<string | ErrorStreamPart>);
350
407
  }
408
+ type ErrorStreamPart = {
409
+ type: 'error';
410
+ error: unknown;
411
+ };
351
412
 
352
413
  /**
353
414
  * A tool contains the description and the schema of the input that the tool expects.
@@ -417,7 +478,7 @@ type ToToolResultArray<TOOLS extends Record<string, Tool>> = Array<ToToolResult<
417
478
  * Generate a text and call tools using a language model.
418
479
  */
419
480
  declare function generateText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, ...settings }: CallSettings & Prompt & {
420
- model: LanguageModel;
481
+ model: LanguageModelV1;
421
482
  tools?: TOOLS;
422
483
  }): Promise<GenerateTextResult<TOOLS>>;
423
484
  declare class GenerateTextResult<TOOLS extends Record<string, Tool>> {
@@ -439,7 +500,7 @@ declare class StreamTextHttpResponse extends Response {
439
500
  * Stream text generated by a language model.
440
501
  */
441
502
  declare function streamText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, ...settings }: CallSettings & Prompt & {
442
- model: LanguageModel;
503
+ model: LanguageModelV1;
443
504
  tools?: TOOLS;
444
505
  }): Promise<StreamTextResult<TOOLS>>;
445
506
  type TextStreamPart<TOOLS extends Record<string, Tool>> = {
@@ -461,20 +522,4 @@ declare class StreamTextResult<TOOLS extends Record<string, Tool>> {
461
522
  toResponse(): StreamTextHttpResponse;
462
523
  }
463
524
 
464
- declare class UnsupportedFunctionalityError extends Error {
465
- readonly functionality: string;
466
- readonly provider: string;
467
- constructor({ provider, functionality, }: {
468
- provider: string;
469
- functionality: string;
470
- });
471
- toJSON(): {
472
- name: string;
473
- message: string;
474
- stack: string | undefined;
475
- provider: string;
476
- functionality: string;
477
- };
478
- }
479
-
480
- export { AssistantContent, AssistantMessage, DataContent, ErrorStreamPart, GenerateObjectResult, GenerateTextResult, ImagePart, LanguageModel, LanguageModelStreamPart, LanguageModelToolCall, LanguageModelV1ImagePart, LanguageModelV1Message, LanguageModelV1Prompt, LanguageModelV1TextPart, LanguageModelV1ToolCallPart, LanguageModelV1ToolResultPart, Message, StreamObjectResult, StreamTextHttpResponse, StreamTextResult, TextPart, TextStreamPart, Tool, ToolCallPart, ToolContent, ToolMessage, ToolResultPart, UnsupportedFunctionalityError, UserContent, UserMessage, convertDataContentToBase64String, convertDataContentToUint8Array, generateObject, generateText, streamObject, streamText, tool };
525
+ export { AssistantContent, AssistantMessage, DataContent, ErrorStreamPart, GenerateObjectResult, GenerateTextResult, ImagePart, Message, StreamObjectResult, StreamTextHttpResponse, StreamTextResult, TextPart, TextStreamPart, Tool, ToolCallPart, ToolContent, ToolMessage, ToolResultPart, UserContent, UserMessage, convertDataContentToBase64String, convertDataContentToUint8Array, generateObject, generateText, streamObject, streamText, tool };
@@ -1,6 +1,8 @@
1
1
  import { z } from 'zod';
2
2
  import { PartialDeep, ValueOf } from 'type-fest';
3
3
 
4
+ type JsonSchema = Record<string, unknown>;
5
+
4
6
  type LanguageModelV1CallSettings = {
5
7
  /**
6
8
  * Maximum number of tokens to generate.
@@ -29,38 +31,28 @@ type LanguageModelV1CallSettings = {
29
31
  */
30
32
  topP?: number;
31
33
  /**
32
- * Presence penalty setting. This is a number between 0 (no penalty)
33
- * and 1 (maximum penalty). It affects the likelihood of the model to repeat
34
- * information that is already in the prompt.
34
+ * Presence penalty setting. It affects the likelihood of the model to
35
+ * repeat information that is already in the prompt.
36
+ *
37
+ * The presence penalty is a number between -1 (increase repetition)
38
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
35
39
  */
36
40
  presencePenalty?: number;
37
41
  /**
38
- * Frequency penalty setting. This is a number between 0 (no penalty)
39
- * and 1 (maximum penalty). It affects the likelihood of the model to repeatedly
40
- * use the same words or phrases.
42
+ * Frequency penalty setting. It affects the likelihood of the model
43
+ * to repeatedly use the same words or phrases.
44
+ *
45
+ * The frequency penalty is a number between -1 (increase repetition)
46
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
41
47
  */
42
48
  frequencyPenalty?: number;
43
49
  /**
44
- * The seed to use for random sampling. If set and supported by the model,
45
- * calls will generate deterministic results.
50
+ * The seed (integer) to use for random sampling. If set and supported
51
+ * by the model, calls will generate deterministic results.
46
52
  */
47
53
  seed?: number;
48
54
  };
49
55
 
50
- /**
51
- * Warning from the model provider for this call. The call will proceed, but e.g.
52
- * some settings might not be supported, which can lead to suboptimal results.
53
- */
54
- type LanguageModelV1CallWarning = {
55
- type: 'unsupported-setting';
56
- setting: keyof LanguageModelV1CallSettings;
57
- } | {
58
- type: 'other';
59
- message: string;
60
- };
61
-
62
- type JsonSchema = Record<string, unknown>;
63
-
64
56
  /**
65
57
  * A tool has a name, a description, and a set of parameters.
66
58
  *
@@ -174,7 +166,48 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
174
166
  prompt: LanguageModelV1Prompt;
175
167
  };
176
168
 
177
- interface LanguageModel {
169
+ /**
170
+ * Warning from the model provider for this call. The call will proceed, but e.g.
171
+ * some settings might not be supported, which can lead to suboptimal results.
172
+ */
173
+ type LanguageModelV1CallWarning = {
174
+ type: 'unsupported-setting';
175
+ setting: keyof LanguageModelV1CallSettings;
176
+ } | {
177
+ type: 'other';
178
+ message: string;
179
+ };
180
+
181
+ type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
182
+
183
+ type LanguageModelV1FunctionToolCall = {
184
+ toolCallType: 'function';
185
+ toolCallId: string;
186
+ toolName: string;
187
+ /**
188
+ * Stringified JSON object with the tool call arguments. Must match the
189
+ * parameters schema of the tool.
190
+ */
191
+ args: string;
192
+ };
193
+
194
+ type LanguageModelV1 = {
195
+ /**
196
+ * The language model must specify which language model interface
197
+ * version it implements. This will allow us to evolve the language
198
+ * model interface and retain backwards compatibility. The different
199
+ * implementation versions can be handled as a discriminated union
200
+ * on our side.
201
+ */
202
+ readonly specificationVersion: 'v1';
203
+ /**
204
+ * Name of the provider for logging purposes.
205
+ */
206
+ readonly provider: string;
207
+ /**
208
+ * Provider-specific model ID for logging purposes.
209
+ */
210
+ readonly modelId: string;
178
211
  /**
179
212
  * Default object generation mode that should be used with this model when
180
213
  * no mode is specified. Should be the mode with the best results for this
@@ -184,39 +217,59 @@ interface LanguageModel {
184
217
  * user to explicitly specify the object generation mode.
185
218
  */
186
219
  readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
220
+ /**
221
+ * Generates a language model output (non-streaming).
222
+ *
223
+ * Naming: "do" prefix to prevent accidental direct usage of the method
224
+ * by the user.
225
+ */
187
226
  doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
227
+ /**
228
+ * Text that the model has generated. Can be undefined if the model
229
+ * has only generated tool calls.
230
+ */
188
231
  text?: string;
189
- toolCalls?: Array<LanguageModelToolCall>;
190
- warnings: LanguageModelV1CallWarning[];
232
+ /**
233
+ * Tool calls that the model has generated. Can be undefined if the
234
+ * model has only generated text.
235
+ */
236
+ toolCalls?: Array<LanguageModelV1FunctionToolCall>;
237
+ warnings?: LanguageModelV1CallWarning[];
191
238
  }>;
239
+ /**
240
+ * Generates a language model output (streaming).
241
+ *
242
+ * Naming: "do" prefix to prevent accidental direct usage of the method
243
+ * by the user.
244
+ *
245
+ * @return A stream of higher-level language model output parts.
246
+ */
192
247
  doStream(options: LanguageModelV1CallOptions): PromiseLike<{
193
- stream: ReadableStream<LanguageModelStreamPart>;
194
- warnings: LanguageModelV1CallWarning[];
248
+ stream: ReadableStream<LanguageModelV1StreamPart>;
249
+ warnings?: LanguageModelV1CallWarning[];
195
250
  }>;
196
- }
197
- type ErrorStreamPart = {
198
- type: 'error';
199
- error: unknown;
200
- };
201
- type LanguageModelToolCall = {
202
- toolCallId: string;
203
- toolName: string;
204
- args: string;
205
251
  };
206
- type ToolCallStreamPart = {
252
+ type LanguageModelV1StreamPart = {
253
+ type: 'text-delta';
254
+ textDelta: string;
255
+ } | ({
207
256
  type: 'tool-call';
208
- } & LanguageModelToolCall;
209
- type ToolCallDeltaStreamPart = {
257
+ } & LanguageModelV1FunctionToolCall) | {
210
258
  type: 'tool-call-delta';
211
259
  toolCallId: string;
212
260
  toolName: string;
213
261
  argsTextDelta: string;
262
+ } | {
263
+ type: 'finish-metadata';
264
+ finishReason: LanguageModelV1FinishReason;
265
+ usage: {
266
+ promptTokens: number;
267
+ completionTokens: number;
268
+ };
269
+ } | {
270
+ type: 'error';
271
+ error: unknown;
214
272
  };
215
- type TextDeltaStreamPart = {
216
- type: 'text-delta';
217
- textDelta: string;
218
- };
219
- type LanguageModelStreamPart = TextDeltaStreamPart | ToolCallDeltaStreamPart | ToolCallStreamPart | ErrorStreamPart;
220
273
 
221
274
  type CallSettings = {
222
275
  /**
@@ -240,20 +293,24 @@ type CallSettings = {
240
293
  */
241
294
  topP?: number;
242
295
  /**
243
- * Presence penalty setting. This is a number between 0 (no penalty)
244
- * and 1 (maximum penalty). It affects the likelihood of the model to repeat
245
- * information that is already in the prompt.
296
+ * Presence penalty setting. It affects the likelihood of the model to
297
+ * repeat information that is already in the prompt.
298
+ *
299
+ * The presence penalty is a number between -1 (increase repetition)
300
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
246
301
  */
247
302
  presencePenalty?: number;
248
303
  /**
249
- * Frequency penalty setting. This is a number between 0 (no penalty)
250
- * and 1 (maximum penalty). It affects the likelihood of the model to
251
- * repeatedly use the same words or phrases.
304
+ * Frequency penalty setting. It affects the likelihood of the model
305
+ * to repeatedly use the same words or phrases.
306
+ *
307
+ * The frequency penalty is a number between -1 (increase repetition)
308
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
252
309
  */
253
310
  frequencyPenalty?: number;
254
311
  /**
255
- * The seed to use for random sampling. If set and supported by the model,
256
- * calls will generate deterministic results.
312
+ * The seed (integer) to use for random sampling. If set and supported
313
+ * by the model, calls will generate deterministic results.
257
314
  */
258
315
  seed?: number;
259
316
  };
@@ -322,10 +379,10 @@ type Prompt = {
322
379
  /**
323
380
  * Generate a structured, typed object using a language model.
324
381
  */
325
- declare function generateObject<T>({ model, schema: zodSchema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
326
- model: LanguageModel;
382
+ declare function generateObject<T>({ model, schema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
383
+ model: LanguageModelV1;
327
384
  schema: z.Schema<T>;
328
- mode?: 'json' | 'tool' | 'grammar';
385
+ mode?: 'auto' | 'json' | 'tool' | 'grammar';
329
386
  }): Promise<GenerateObjectResult<T>>;
330
387
  declare class GenerateObjectResult<T> {
331
388
  readonly object: T;
@@ -337,10 +394,10 @@ declare class GenerateObjectResult<T> {
337
394
  /**
338
395
  * Stream an object as a partial object stream.
339
396
  */
340
- declare function streamObject<T>({ model, schema: zodSchema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
341
- model: LanguageModel;
397
+ declare function streamObject<T>({ model, schema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
398
+ model: LanguageModelV1;
342
399
  schema: z.Schema<T>;
343
- mode?: 'json' | 'tool' | 'grammar';
400
+ mode?: 'auto' | 'json' | 'tool' | 'grammar';
344
401
  }): Promise<StreamObjectResult<T>>;
345
402
  declare class StreamObjectResult<T> {
346
403
  readonly objectStream: AsyncIterable<PartialDeep<T, {
@@ -348,6 +405,10 @@ declare class StreamObjectResult<T> {
348
405
  }>>;
349
406
  constructor(modelStream: ReadableStream<string | ErrorStreamPart>);
350
407
  }
408
+ type ErrorStreamPart = {
409
+ type: 'error';
410
+ error: unknown;
411
+ };
351
412
 
352
413
  /**
353
414
  * A tool contains the description and the schema of the input that the tool expects.
@@ -417,7 +478,7 @@ type ToToolResultArray<TOOLS extends Record<string, Tool>> = Array<ToToolResult<
417
478
  * Generate a text and call tools using a language model.
418
479
  */
419
480
  declare function generateText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, ...settings }: CallSettings & Prompt & {
420
- model: LanguageModel;
481
+ model: LanguageModelV1;
421
482
  tools?: TOOLS;
422
483
  }): Promise<GenerateTextResult<TOOLS>>;
423
484
  declare class GenerateTextResult<TOOLS extends Record<string, Tool>> {
@@ -439,7 +500,7 @@ declare class StreamTextHttpResponse extends Response {
439
500
  * Stream text generated by a language model.
440
501
  */
441
502
  declare function streamText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, ...settings }: CallSettings & Prompt & {
442
- model: LanguageModel;
503
+ model: LanguageModelV1;
443
504
  tools?: TOOLS;
444
505
  }): Promise<StreamTextResult<TOOLS>>;
445
506
  type TextStreamPart<TOOLS extends Record<string, Tool>> = {
@@ -461,20 +522,4 @@ declare class StreamTextResult<TOOLS extends Record<string, Tool>> {
461
522
  toResponse(): StreamTextHttpResponse;
462
523
  }
463
524
 
464
- declare class UnsupportedFunctionalityError extends Error {
465
- readonly functionality: string;
466
- readonly provider: string;
467
- constructor({ provider, functionality, }: {
468
- provider: string;
469
- functionality: string;
470
- });
471
- toJSON(): {
472
- name: string;
473
- message: string;
474
- stack: string | undefined;
475
- provider: string;
476
- functionality: string;
477
- };
478
- }
479
-
480
- export { AssistantContent, AssistantMessage, DataContent, ErrorStreamPart, GenerateObjectResult, GenerateTextResult, ImagePart, LanguageModel, LanguageModelStreamPart, LanguageModelToolCall, LanguageModelV1ImagePart, LanguageModelV1Message, LanguageModelV1Prompt, LanguageModelV1TextPart, LanguageModelV1ToolCallPart, LanguageModelV1ToolResultPart, Message, StreamObjectResult, StreamTextHttpResponse, StreamTextResult, TextPart, TextStreamPart, Tool, ToolCallPart, ToolContent, ToolMessage, ToolResultPart, UnsupportedFunctionalityError, UserContent, UserMessage, convertDataContentToBase64String, convertDataContentToUint8Array, generateObject, generateText, streamObject, streamText, tool };
525
+ export { AssistantContent, AssistantMessage, DataContent, ErrorStreamPart, GenerateObjectResult, GenerateTextResult, ImagePart, Message, StreamObjectResult, StreamTextHttpResponse, StreamTextResult, TextPart, TextStreamPart, Tool, ToolCallPart, ToolContent, ToolMessage, ToolResultPart, UserContent, UserMessage, convertDataContentToBase64String, convertDataContentToUint8Array, generateObject, generateText, streamObject, streamText, tool };