ai 3.0.13 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/dist/index.d.mts +998 -2
  2. package/dist/index.d.ts +998 -2
  3. package/dist/index.js +1743 -15
  4. package/dist/index.js.map +1 -1
  5. package/dist/index.mjs +1720 -15
  6. package/dist/index.mjs.map +1 -1
  7. package/mistral/dist/index.d.mts +371 -0
  8. package/mistral/dist/index.d.ts +371 -0
  9. package/mistral/dist/index.js +936 -0
  10. package/mistral/dist/index.js.map +1 -0
  11. package/mistral/dist/index.mjs +900 -0
  12. package/mistral/dist/index.mjs.map +1 -0
  13. package/openai/dist/index.d.mts +434 -0
  14. package/openai/dist/index.d.ts +434 -0
  15. package/openai/dist/index.js +1355 -0
  16. package/openai/dist/index.js.map +1 -0
  17. package/openai/dist/index.mjs +1319 -0
  18. package/openai/dist/index.mjs.map +1 -0
  19. package/package.json +32 -6
  20. package/prompts/dist/index.d.mts +13 -1
  21. package/prompts/dist/index.d.ts +13 -1
  22. package/prompts/dist/index.js +13 -0
  23. package/prompts/dist/index.js.map +1 -1
  24. package/prompts/dist/index.mjs +12 -0
  25. package/prompts/dist/index.mjs.map +1 -1
  26. package/react/dist/index.js +35 -34
  27. package/react/dist/index.js.map +1 -1
  28. package/react/dist/index.mjs +35 -34
  29. package/react/dist/index.mjs.map +1 -1
  30. package/rsc/dist/index.d.ts +45 -8
  31. package/rsc/dist/rsc-server.d.mts +45 -8
  32. package/rsc/dist/rsc-server.mjs +67 -13
  33. package/rsc/dist/rsc-server.mjs.map +1 -1
  34. package/rsc/dist/rsc-shared.d.mts +5 -8
  35. package/rsc/dist/rsc-shared.mjs +23 -2
  36. package/rsc/dist/rsc-shared.mjs.map +1 -1
  37. package/solid/dist/index.js +29 -27
  38. package/solid/dist/index.js.map +1 -1
  39. package/solid/dist/index.mjs +29 -27
  40. package/solid/dist/index.mjs.map +1 -1
  41. package/spec/dist/index.d.mts +708 -0
  42. package/spec/dist/index.d.ts +708 -0
  43. package/spec/dist/index.js +806 -0
  44. package/spec/dist/index.js.map +1 -0
  45. package/spec/dist/index.mjs +742 -0
  46. package/spec/dist/index.mjs.map +1 -0
  47. package/svelte/dist/index.js +31 -29
  48. package/svelte/dist/index.js.map +1 -1
  49. package/svelte/dist/index.mjs +31 -29
  50. package/svelte/dist/index.mjs.map +1 -1
  51. package/vue/dist/index.js +29 -27
  52. package/vue/dist/index.js.map +1 -1
  53. package/vue/dist/index.mjs +29 -27
  54. package/vue/dist/index.mjs.map +1 -1
package/dist/index.d.mts CHANGED
@@ -1,8 +1,999 @@
1
+ import { z } from 'zod';
1
2
  import { AssistantStream } from 'openai/lib/AssistantStream';
2
3
  import { Run } from 'openai/resources/beta/threads/runs/runs';
3
4
  import { ChatCompletionResponseChunk } from '@mistralai/mistralai';
4
5
  import { ServerResponse } from 'node:http';
5
6
 
7
+ type JsonSchema = Record<string, unknown>;
8
+
9
+ type LanguageModelV1CallSettings = {
10
+ /**
11
+ * Maximum number of tokens to generate.
12
+ */
13
+ maxTokens?: number;
14
+ /**
15
+ * Temperature setting. This is a number between 0 (almost no randomness) and
16
+ * 1 (very random).
17
+ *
18
+ * Different LLM providers have different temperature
19
+ * scales, so they'd need to map it (without mapping, the same temperature has
20
+ * different effects on different models). The provider can also chose to map
21
+ * this to topP, potentially even using a custom setting on their model.
22
+ *
23
+ * Note: This is an example of a setting that requires a clear specification of
24
+ * the semantics.
25
+ */
26
+ temperature?: number;
27
+ /**
28
+ * Nucleus sampling. This is a number between 0 and 1.
29
+ *
30
+ * E.g. 0.1 would mean that only tokens with the top 10% probability mass
31
+ * are considered.
32
+ *
33
+ * It is recommended to set either `temperature` or `topP`, but not both.
34
+ */
35
+ topP?: number;
36
+ /**
37
+ * Presence penalty setting. It affects the likelihood of the model to
38
+ * repeat information that is already in the prompt.
39
+ *
40
+ * The presence penalty is a number between -1 (increase repetition)
41
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
42
+ */
43
+ presencePenalty?: number;
44
+ /**
45
+ * Frequency penalty setting. It affects the likelihood of the model
46
+ * to repeatedly use the same words or phrases.
47
+ *
48
+ * The frequency penalty is a number between -1 (increase repetition)
49
+ * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
50
+ */
51
+ frequencyPenalty?: number;
52
+ /**
53
+ * The seed (integer) to use for random sampling. If set and supported
54
+ * by the model, calls will generate deterministic results.
55
+ */
56
+ seed?: number;
57
+ /**
58
+ * Abort signal for cancelling the operation.
59
+ */
60
+ abortSignal?: AbortSignal;
61
+ };
62
+
63
+ /**
64
+ * A tool has a name, a description, and a set of parameters.
65
+ *
66
+ * Note: this is **not** the user-facing tool definition. The AI SDK methods will
67
+ * map the user-facing tool definitions to this format.
68
+ */
69
+ type LanguageModelV1FunctionTool = {
70
+ /**
71
+ * The type of the tool. Only functions for now, but this gives us room to
72
+ * add more specific tool types in the future and use a discriminated union.
73
+ */
74
+ type: 'function';
75
+ /**
76
+ * The name of the tool. Unique within this model call.
77
+ */
78
+ name: string;
79
+ description?: string;
80
+ parameters: JsonSchema;
81
+ };
82
+
83
+ /**
84
+ * A prompt is a list of messages.
85
+ *
86
+ * Note: Not all models and prompt formats support multi-modal inputs and
87
+ * tool calls. The validation happens at runtime.
88
+ *
89
+ * Note: This is not a user-facing prompt. The AI SDK methods will map the
90
+ * user-facing prompt types such as chat or instruction prompts to this format.
91
+ */
92
+ type LanguageModelV1Prompt = Array<LanguageModelV1Message>;
93
+ type LanguageModelV1Message = {
94
+ role: 'system';
95
+ content: string;
96
+ } | {
97
+ role: 'user';
98
+ content: Array<LanguageModelV1TextPart | LanguageModelV1ImagePart>;
99
+ } | {
100
+ role: 'assistant';
101
+ content: Array<LanguageModelV1TextPart | LanguageModelV1ToolCallPart>;
102
+ } | {
103
+ role: 'tool';
104
+ content: Array<LanguageModelV1ToolResultPart>;
105
+ };
106
+ interface LanguageModelV1TextPart {
107
+ type: 'text';
108
+ /**
109
+ * The text content.
110
+ */
111
+ text: string;
112
+ }
113
+ interface LanguageModelV1ImagePart {
114
+ type: 'image';
115
+ /**
116
+ * Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL.
117
+ */
118
+ image: Uint8Array | URL;
119
+ /**
120
+ * Optional mime type of the image.
121
+ */
122
+ mimeType?: string;
123
+ }
124
+ interface LanguageModelV1ToolCallPart {
125
+ type: 'tool-call';
126
+ toolCallId: string;
127
+ toolName: string;
128
+ args: unknown;
129
+ }
130
+ interface LanguageModelV1ToolResultPart {
131
+ type: 'tool-result';
132
+ toolCallId: string;
133
+ toolName: string;
134
+ result: unknown;
135
+ }
136
+
137
+ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
138
+ /**
139
+ * Whether the user provided the input as messages or as
140
+ * a prompt. This can help guide non-chat models in the
141
+ * expansion, bc different expansions can be needed for
142
+ * chat/non-chat use cases.
143
+ */
144
+ inputFormat: 'messages' | 'prompt';
145
+ /**
146
+ * The mode affects the behavior of the language model. It is required to
147
+ * support provider-independent streaming and generation of structured objects.
148
+ * The model can take this information and e.g. configure json mode, the correct
149
+ * low level grammar, etc. It can also be used to optimize the efficiency of the
150
+ * streaming, e.g. tool-delta stream parts are only needed in the
151
+ * object-tool mode.
152
+ */
153
+ mode: {
154
+ type: 'regular';
155
+ tools?: Array<LanguageModelV1FunctionTool>;
156
+ } | {
157
+ type: 'object-json';
158
+ } | {
159
+ type: 'object-grammar';
160
+ schema: JsonSchema;
161
+ } | {
162
+ type: 'object-tool';
163
+ tool: LanguageModelV1FunctionTool;
164
+ };
165
+ /**
166
+ * A language mode prompt is a standardized prompt type.
167
+ *
168
+ * Note: This is **not** the user-facing prompt. The AI SDK methods will map the
169
+ * user-facing prompt types such as chat or instruction prompts to this format.
170
+ * That approach allows us to evolve the user facing prompts without breaking
171
+ * the language model interface.
172
+ */
173
+ prompt: LanguageModelV1Prompt;
174
+ };
175
+
176
+ /**
177
+ * Warning from the model provider for this call. The call will proceed, but e.g.
178
+ * some settings might not be supported, which can lead to suboptimal results.
179
+ */
180
+ type LanguageModelV1CallWarning = {
181
+ type: 'unsupported-setting';
182
+ setting: keyof LanguageModelV1CallSettings;
183
+ } | {
184
+ type: 'other';
185
+ message: string;
186
+ };
187
+
188
+ type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
189
+
190
+ type LanguageModelV1FunctionToolCall = {
191
+ toolCallType: 'function';
192
+ toolCallId: string;
193
+ toolName: string;
194
+ /**
195
+ * Stringified JSON object with the tool call arguments. Must match the
196
+ * parameters schema of the tool.
197
+ */
198
+ args: string;
199
+ };
200
+
201
+ /**
202
+ * Experimental: Specification for a language model that implements the language model
203
+ * interface version 1.
204
+ */
205
+ type LanguageModelV1 = {
206
+ /**
207
+ * The language model must specify which language model interface
208
+ * version it implements. This will allow us to evolve the language
209
+ * model interface and retain backwards compatibility. The different
210
+ * implementation versions can be handled as a discriminated union
211
+ * on our side.
212
+ */
213
+ readonly specificationVersion: 'v1';
214
+ /**
215
+ * Name of the provider for logging purposes.
216
+ */
217
+ readonly provider: string;
218
+ /**
219
+ * Provider-specific model ID for logging purposes.
220
+ */
221
+ readonly modelId: string;
222
+ /**
223
+ * Default object generation mode that should be used with this model when
224
+ * no mode is specified. Should be the mode with the best results for this
225
+ * model. `undefined` can be returned if object generation is not supported.
226
+ *
227
+ * This is needed to generate the best objects possible w/o requiring the
228
+ * user to explicitly specify the object generation mode.
229
+ */
230
+ readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
231
+ /**
232
+ * Generates a language model output (non-streaming).
233
+ *
234
+ * Naming: "do" prefix to prevent accidental direct usage of the method
235
+ * by the user.
236
+ */
237
+ doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
238
+ /**
239
+ * Text that the model has generated. Can be undefined if the model
240
+ * has only generated tool calls.
241
+ */
242
+ text?: string;
243
+ /**
244
+ * Tool calls that the model has generated. Can be undefined if the
245
+ * model has only generated text.
246
+ */
247
+ toolCalls?: Array<LanguageModelV1FunctionToolCall>;
248
+ /**
249
+ * Finish reason.
250
+ */
251
+ finishReason: LanguageModelV1FinishReason;
252
+ /**
253
+ * Usage information.
254
+ */
255
+ usage: {
256
+ promptTokens: number;
257
+ completionTokens: number;
258
+ };
259
+ /**
260
+ * Raw prompt and setting information for observability provider integration.
261
+ */
262
+ rawCall: {
263
+ /**
264
+ * Raw prompt after expansion and conversion to the format that the
265
+ * provider uses to send the information to their API.
266
+ */
267
+ rawPrompt: unknown;
268
+ /**
269
+ * Raw settings that are used for the API call. Includes provider-specific
270
+ * settings.
271
+ */
272
+ rawSettings: Record<string, unknown>;
273
+ };
274
+ warnings?: LanguageModelV1CallWarning[];
275
+ }>;
276
+ /**
277
+ * Generates a language model output (streaming).
278
+ *
279
+ * Naming: "do" prefix to prevent accidental direct usage of the method
280
+ * by the user.
281
+ *
282
+ * @return A stream of higher-level language model output parts.
283
+ */
284
+ doStream(options: LanguageModelV1CallOptions): PromiseLike<{
285
+ stream: ReadableStream<LanguageModelV1StreamPart>;
286
+ /**
287
+ * Raw prompt and setting information for observability provider integration.
288
+ */
289
+ rawCall: {
290
+ /**
291
+ * Raw prompt after expansion and conversion to the format that the
292
+ * provider uses to send the information to their API.
293
+ */
294
+ rawPrompt: unknown;
295
+ /**
296
+ * Raw settings that are used for the API call. Includes provider-specific
297
+ * settings.
298
+ */
299
+ rawSettings: Record<string, unknown>;
300
+ };
301
+ warnings?: LanguageModelV1CallWarning[];
302
+ }>;
303
+ };
304
+ type LanguageModelV1StreamPart = {
305
+ type: 'text-delta';
306
+ textDelta: string;
307
+ } | ({
308
+ type: 'tool-call';
309
+ } & LanguageModelV1FunctionToolCall) | {
310
+ type: 'tool-call-delta';
311
+ toolCallType: 'function';
312
+ toolCallId: string;
313
+ toolName: string;
314
+ argsTextDelta: string;
315
+ } | {
316
+ type: 'finish';
317
+ finishReason: LanguageModelV1FinishReason;
318
+ usage: {
319
+ promptTokens: number;
320
+ completionTokens: number;
321
+ };
322
+ } | {
323
+ type: 'error';
324
+ error: unknown;
325
+ };
326
+
327
+ type TokenUsage = {
328
+ promptTokens: number;
329
+ completionTokens: number;
330
+ totalTokens: number;
331
+ };
332
+
333
+ type CallSettings = {
334
+ /**
335
+ Maximum number of tokens to generate.
336
+ */
337
+ maxTokens?: number;
338
+ /**
339
+ Temperature setting. This is a number between 0 (almost no randomness) and
340
+ 1 (very random).
341
+
342
+ It is recommended to set either `temperature` or `topP`, but not both.
343
+
344
+ @default 0
345
+ */
346
+ temperature?: number;
347
+ /**
348
+ Nucleus sampling. This is a number between 0 and 1.
349
+
350
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
351
+ are considered.
352
+
353
+ It is recommended to set either `temperature` or `topP`, but not both.
354
+ */
355
+ topP?: number;
356
+ /**
357
+ Presence penalty setting. It affects the likelihood of the model to
358
+ repeat information that is already in the prompt.
359
+
360
+ The presence penalty is a number between -1 (increase repetition)
361
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
362
+
363
+ @default 0
364
+ */
365
+ presencePenalty?: number;
366
+ /**
367
+ Frequency penalty setting. It affects the likelihood of the model
368
+ to repeatedly use the same words or phrases.
369
+
370
+ The frequency penalty is a number between -1 (increase repetition)
371
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
372
+
373
+ @default 0
374
+ */
375
+ frequencyPenalty?: number;
376
+ /**
377
+ The seed (integer) to use for random sampling. If set and supported
378
+ by the model, calls will generate deterministic results.
379
+ */
380
+ seed?: number;
381
+ /**
382
+ Maximum number of retries. Set to 0 to disable retries.
383
+
384
+ @default 2
385
+ */
386
+ maxRetries?: number;
387
+ /**
388
+ Abort signal.
389
+ */
390
+ abortSignal?: AbortSignal;
391
+ };
392
+
393
+ /**
394
+ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
395
+ */
396
+ type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
397
+ /**
398
+ Converts data content to a base64-encoded string.
399
+
400
+ @param content - Data content to convert.
401
+ @returns Base64-encoded string.
402
+ */
403
+ declare function convertDataContentToBase64String(content: DataContent): string;
404
+ /**
405
+ Converts data content to a Uint8Array.
406
+
407
+ @param content - Data content to convert.
408
+ @returns Uint8Array.
409
+ */
410
+ declare function convertDataContentToUint8Array(content: DataContent): Uint8Array;
411
+
412
+ /**
413
+ Text content part of a prompt. It contains a string of text.
414
+ */
415
+ interface TextPart$1 {
416
+ type: 'text';
417
+ /**
418
+ The text content.
419
+ */
420
+ text: string;
421
+ }
422
+ /**
423
+ Image content part of a prompt. It contains an image.
424
+ */
425
+ interface ImagePart {
426
+ type: 'image';
427
+ /**
428
+ Image data. Can either be:
429
+
430
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
431
+ - URL: a URL that points to the image
432
+ */
433
+ image: DataContent | URL;
434
+ /**
435
+ Optional mime type of the image.
436
+ */
437
+ mimeType?: string;
438
+ }
439
+ /**
440
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
441
+ */
442
+ interface ToolCallPart {
443
+ type: 'tool-call';
444
+ /**
445
+ ID of the tool call. This ID is used to match the tool call with the tool result.
446
+ */
447
+ toolCallId: string;
448
+ /**
449
+ Name of the tool that is being called.
450
+ */
451
+ toolName: string;
452
+ /**
453
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
454
+ */
455
+ args: unknown;
456
+ }
457
+ /**
458
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
459
+ */
460
+ interface ToolResultPart {
461
+ type: 'tool-result';
462
+ /**
463
+ ID of the tool call that this result is associated with.
464
+ */
465
+ toolCallId: string;
466
+ /**
467
+ Name of the tool that generated this result.
468
+ */
469
+ toolName: string;
470
+ /**
471
+ Result of the tool call. This is a JSON-serializable object.
472
+ */
473
+ result: unknown;
474
+ }
475
+
476
+ /**
477
+ A message that can be used in the `messages` field of a prompt.
478
+ It can be a user message, an assistant message, or a tool message.
479
+ */
480
+ type ExperimentalMessage = ExperimentalUserMessage | ExperimentalAssistantMessage | ExperimentalToolMessage;
481
+ /**
482
+ A user message. It can contain text or a combination of text and images.
483
+ */
484
+ type ExperimentalUserMessage = {
485
+ role: 'user';
486
+ content: UserContent;
487
+ };
488
+ /**
489
+ Content of a user message. It can be a string or an array of text and image parts.
490
+ */
491
+ type UserContent = string | Array<TextPart$1 | ImagePart>;
492
+ /**
493
+ An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
494
+ */
495
+ type ExperimentalAssistantMessage = {
496
+ role: 'assistant';
497
+ content: AssistantContent;
498
+ };
499
+ /**
500
+ Content of an assistant message. It can be a string or an array of text and tool call parts.
501
+ */
502
+ type AssistantContent = string | Array<TextPart$1 | ToolCallPart>;
503
+ /**
504
+ A tool message. It contains the result of one or more tool calls.
505
+ */
506
+ type ExperimentalToolMessage = {
507
+ role: 'tool';
508
+ content: ToolContent;
509
+ };
510
+ /**
511
+ Content of a tool message. It is an array of tool result parts.
512
+ */
513
+ type ToolContent = Array<ToolResultPart>;
514
+
515
+ /**
516
+ Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
517
+ */
518
+ type Prompt = {
519
+ /**
520
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
521
+ */
522
+ system?: string;
523
+ /**
524
+ A simple text prompt. You can either use `prompt` or `messages` but not both.
525
+ */
526
+ prompt?: string;
527
+ /**
528
+ A list of messsages. You can either use `prompt` or `messages` but not both.
529
+ */
530
+ messages?: Array<ExperimentalMessage>;
531
+ };
532
+
533
+ /**
534
+ Generate a structured, typed object for a given prompt and schema using a language model.
535
+
536
+ This function does not stream the output. If you want to stream the output, use `experimental_streamObject` instead.
537
+
538
+ @param model - The language model to use.
539
+ @param schema - The schema of the object that the model should generate.
540
+
541
+ @param system - A system message that will be part of the prompt.
542
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
543
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
544
+
545
+ @param maxTokens - Maximum number of tokens to generate.
546
+ @param temperature - Temperature setting.
547
+ This is a number between 0 (almost no randomness) and 1 (very random).
548
+ It is recommended to set either `temperature` or `topP`, but not both.
549
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
550
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
551
+ It is recommended to set either `temperature` or `topP`, but not both.
552
+ @param presencePenalty - Presence penalty setting.
553
+ It affects the likelihood of the model to repeat information that is already in the prompt.
554
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
555
+ 0 means no penalty.
556
+ @param frequencyPenalty - Frequency penalty setting.
557
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
558
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
559
+ 0 means no penalty.
560
+ @param seed - The seed (integer) to use for random sampling.
561
+ If set and supported by the model, calls will generate deterministic results.
562
+
563
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
564
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
565
+
566
+ @returns
567
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
568
+ */
569
+ declare function experimental_generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
570
+ /**
571
+ The language model to use.
572
+ */
573
+ model: LanguageModelV1;
574
+ /**
575
+ The schema of the object that the model should generate.
576
+ */
577
+ schema: z.Schema<T>;
578
+ /**
579
+ The mode to use for object generation. Not all models support all modes.
580
+
581
+ Default and recommended: 'auto' (best mode for the model).
582
+ */
583
+ mode?: 'auto' | 'json' | 'tool' | 'grammar';
584
+ }): Promise<GenerateObjectResult<T>>;
585
+ /**
586
+ The result of a `generateObject` call.
587
+ */
588
+ declare class GenerateObjectResult<T> {
589
+ /**
590
+ The generated object (typed according to the schema).
591
+ */
592
+ readonly object: T;
593
+ /**
594
+ The reason why the generation finished.
595
+ */
596
+ readonly finishReason: LanguageModelV1FinishReason;
597
+ /**
598
+ The token usage of the generated text.
599
+ */
600
+ readonly usage: TokenUsage;
601
+ /**
602
+ Warnings from the model provider (e.g. unsupported settings)
603
+ */
604
+ readonly warnings: LanguageModelV1CallWarning[] | undefined;
605
+ constructor(options: {
606
+ object: T;
607
+ finishReason: LanguageModelV1FinishReason;
608
+ usage: TokenUsage;
609
+ warnings: LanguageModelV1CallWarning[] | undefined;
610
+ });
611
+ }
612
+
613
+ type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
614
+
615
+ /**
616
+ Create a type from an object with all keys and nested keys set to optional.
617
+ The helper supports normal objects and Zod schemas (which are resolved automatically).
618
+ It always recurses into arrays.
619
+
620
+ Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
621
+ */
622
+ type DeepPartial<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends z.Schema<any> ? DeepPartial<T['_type']> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartial<ItemType | undefined>> : Array<DeepPartial<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
623
+ type PartialMap<KeyType, ValueType> = {} & Map<DeepPartial<KeyType>, DeepPartial<ValueType>>;
624
+ type PartialSet<T> = {} & Set<DeepPartial<T>>;
625
+ type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartial<KeyType>, DeepPartial<ValueType>>;
626
+ type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartial<T>>;
627
+ type PartialObject<ObjectType extends object> = {
628
+ [KeyType in keyof ObjectType]?: DeepPartial<ObjectType[KeyType]>;
629
+ };
630
+
631
+ /**
632
+ Generate a structured, typed object for a given prompt and schema using a language model.
633
+
634
+ This function streams the output. If you do not want to stream the output, use `experimental_generateObject` instead.
635
+
636
+ @param model - The language model to use.
637
+ @param schema - The schema of the object that the model should generate.
638
+
639
+ @param system - A system message that will be part of the prompt.
640
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
641
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
642
+
643
+ @param maxTokens - Maximum number of tokens to generate.
644
+ @param temperature - Temperature setting.
645
+ This is a number between 0 (almost no randomness) and 1 (very random).
646
+ It is recommended to set either `temperature` or `topP`, but not both.
647
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
648
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
649
+ It is recommended to set either `temperature` or `topP`, but not both.
650
+ @param presencePenalty - Presence penalty setting.
651
+ It affects the likelihood of the model to repeat information that is already in the prompt.
652
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
653
+ 0 means no penalty.
654
+ @param frequencyPenalty - Frequency penalty setting.
655
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
656
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
657
+ 0 means no penalty.
658
+ @param seed - The seed (integer) to use for random sampling.
659
+ If set and supported by the model, calls will generate deterministic results.
660
+
661
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
662
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
663
+
664
+ @return
665
+ A result object for accessing the partial object stream and additional information.
666
+ */
667
+ declare function experimental_streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
668
+ /**
669
+ The language model to use.
670
+ */
671
+ model: LanguageModelV1;
672
+ /**
673
+ The schema of the object that the model should generate.
674
+ */
675
+ schema: z.Schema<T>;
676
+ /**
677
+ The mode to use for object generation. Not all models support all modes.
678
+
679
+ Default and recommended: 'auto' (best mode for the model).
680
+ */
681
+ mode?: 'auto' | 'json' | 'tool' | 'grammar';
682
+ }): Promise<StreamObjectResult<T>>;
683
+ /**
684
+ The result of a `streamObject` call that contains the partial object stream and additional information.
685
+ */
686
+ declare class StreamObjectResult<T> {
687
+ private readonly originalStream;
688
+ /**
689
+ Warnings from the model provider (e.g. unsupported settings)
690
+ */
691
+ readonly warnings: LanguageModelV1CallWarning[] | undefined;
692
+ constructor({ stream, warnings, }: {
693
+ stream: ReadableStream<string | ErrorStreamPart>;
694
+ warnings: LanguageModelV1CallWarning[] | undefined;
695
+ });
696
+ get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
697
+ }
698
+ type ErrorStreamPart = {
699
+ type: 'error';
700
+ error: unknown;
701
+ };
702
+
703
+ /**
704
+ A tool contains the description and the schema of the input that the tool expects.
705
+ This enables the language model to generate the input.
706
+
707
+ The tool can also contain an optional execute function for the actual execution function of the tool.
708
+ */
709
+ interface ExperimentalTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
710
+ /**
711
+ An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
712
+ */
713
+ description?: string;
714
+ /**
715
+ The schema of the input that the tool expects. The language model will use this to generate the input.
716
+ Use descriptions to make the input understandable for the language model.
717
+ */
718
+ parameters: PARAMETERS;
719
+ /**
720
+ An optional execute function for the actual execution function of the tool.
721
+ If not provided, the tool will not be executed automatically.
722
+ */
723
+ execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
724
+ }
725
+ /**
726
+ Helper function for inferring the execute args of a tool.
727
+ */
728
+ declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: ExperimentalTool<PARAMETERS, RESULT> & {
729
+ execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
730
+ }): ExperimentalTool<PARAMETERS, RESULT> & {
731
+ execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
732
+ };
733
+ declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: ExperimentalTool<PARAMETERS, RESULT> & {
734
+ execute?: undefined;
735
+ }): ExperimentalTool<PARAMETERS, RESULT> & {
736
+ execute: undefined;
737
+ };
738
+
739
+ /**
740
+ Create a union of the given object's values, and optionally specify which keys to get the values from.
741
+
742
+ Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
743
+
744
+ @example
745
+ ```
746
+ // data.json
747
+ {
748
+ 'foo': 1,
749
+ 'bar': 2,
750
+ 'biz': 3
751
+ }
752
+
753
+ // main.ts
754
+ import type {ValueOf} from 'type-fest';
755
+ import data = require('./data.json');
756
+
757
+ export function getData(name: string): ValueOf<typeof data> {
758
+ return data[name];
759
+ }
760
+
761
+ export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
762
+ return data[name];
763
+ }
764
+
765
+ // file.ts
766
+ import {getData, onlyBar} from './main';
767
+
768
+ getData('foo');
769
+ //=> 1
770
+
771
+ onlyBar('foo');
772
+ //=> TypeError ...
773
+
774
+ onlyBar('bar');
775
+ //=> 2
776
+ ```
777
+ * @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
778
+ */
779
+ type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
780
+
781
+ type ToToolCall<TOOLS extends Record<string, ExperimentalTool>> = ValueOf<{
782
+ [NAME in keyof TOOLS]: {
783
+ toolCallId: string;
784
+ toolName: NAME & string;
785
+ args: z.infer<TOOLS[NAME]['parameters']>;
786
+ };
787
+ }>;
788
+ type ToToolCallArray<TOOLS extends Record<string, ExperimentalTool>> = Array<ToToolCall<TOOLS>>;
789
+
790
+ type ToToolsWithExecute<TOOLS extends Record<string, ExperimentalTool>> = {
791
+ [K in keyof TOOLS as TOOLS[K] extends {
792
+ execute: any;
793
+ } ? K : never]: TOOLS[K];
794
+ };
795
+ type ToToolsWithDefinedExecute<TOOLS extends Record<string, ExperimentalTool>> = {
796
+ [K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
797
+ };
798
+ type ToToolResultObject<TOOLS extends Record<string, ExperimentalTool>> = ValueOf<{
799
+ [NAME in keyof TOOLS]: {
800
+ toolCallId: string;
801
+ toolName: NAME & string;
802
+ args: z.infer<TOOLS[NAME]['parameters']>;
803
+ result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
804
+ };
805
+ }>;
806
+ type ToToolResult<TOOLS extends Record<string, ExperimentalTool>> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
807
+ type ToToolResultArray<TOOLS extends Record<string, ExperimentalTool>> = Array<ToToolResult<TOOLS>>;
808
+
809
+ /**
810
+ Generate a text and call tools for a given prompt using a language model.
811
+
812
+ This function does not stream the output. If you want to stream the output, use `experimental_streamText` instead.
813
+
814
+ @param model - The language model to use.
815
+ @param tools - The tools that the model can call. The model needs to support calling tools.
816
+
817
+ @param system - A system message that will be part of the prompt.
818
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
819
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
820
+
821
+ @param maxTokens - Maximum number of tokens to generate.
822
+ @param temperature - Temperature setting.
823
+ This is a number between 0 (almost no randomness) and 1 (very random).
824
+ It is recommended to set either `temperature` or `topP`, but not both.
825
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
826
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
827
+ It is recommended to set either `temperature` or `topP`, but not both.
828
+ @param presencePenalty - Presence penalty setting.
829
+ It affects the likelihood of the model to repeat information that is already in the prompt.
830
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
831
+ 0 means no penalty.
832
+ @param frequencyPenalty - Frequency penalty setting.
833
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
834
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
835
+ 0 means no penalty.
836
+ @param seed - The seed (integer) to use for random sampling.
837
+ If set and supported by the model, calls will generate deterministic results.
838
+
839
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
840
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
841
+
842
+ @returns
843
+ A result object that contains the generated text, the results of the tool calls, and additional information.
844
+ */
845
+ declare function experimental_generateText<TOOLS extends Record<string, ExperimentalTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
846
+ /**
847
+ The language model to use.
848
+ */
849
+ model: LanguageModelV1;
850
+ /**
851
+ The tools that the model can call. The model needs to support calling tools.
852
+ */
853
+ tools?: TOOLS;
854
+ }): Promise<GenerateTextResult<TOOLS>>;
855
+ /**
856
+ The result of a `generateText` call.
857
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
858
+ */
859
+ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>> {
860
+ /**
861
+ The generated text.
862
+ */
863
+ readonly text: string;
864
+ /**
865
+ The tool calls that were made during the generation.
866
+ */
867
+ readonly toolCalls: ToToolCallArray<TOOLS>;
868
+ /**
869
+ The results of the tool calls.
870
+ */
871
+ readonly toolResults: ToToolResultArray<TOOLS>;
872
+ /**
873
+ The reason why the generation finished.
874
+ */
875
+ readonly finishReason: LanguageModelV1FinishReason;
876
+ /**
877
+ The token usage of the generated text.
878
+ */
879
+ readonly usage: TokenUsage;
880
+ /**
881
+ Warnings from the model provider (e.g. unsupported settings)
882
+ */
883
+ readonly warnings: LanguageModelV1CallWarning[] | undefined;
884
+ constructor(options: {
885
+ text: string;
886
+ toolCalls: ToToolCallArray<TOOLS>;
887
+ toolResults: ToToolResultArray<TOOLS>;
888
+ finishReason: LanguageModelV1FinishReason;
889
+ usage: TokenUsage;
890
+ warnings: LanguageModelV1CallWarning[] | undefined;
891
+ });
892
+ }
893
+
894
+ /**
895
+ Generate a text and call tools for a given prompt using a language model.
896
+
897
+ This function streams the output. If you do not want to stream the output, use `experimental_generateText` instead.
898
+
899
+ @param model - The language model to use.
900
+ @param tools - The tools that the model can call. The model needs to support calling tools.
901
+
902
+ @param system - A system message that will be part of the prompt.
903
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
904
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
905
+
906
+ @param maxTokens - Maximum number of tokens to generate.
907
+ @param temperature - Temperature setting.
908
+ This is a number between 0 (almost no randomness) and 1 (very random).
909
+ It is recommended to set either `temperature` or `topP`, but not both.
910
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
911
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
912
+ It is recommended to set either `temperature` or `topP`, but not both.
913
+ @param presencePenalty - Presence penalty setting.
914
+ It affects the likelihood of the model to repeat information that is already in the prompt.
915
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
916
+ 0 means no penalty.
917
+ @param frequencyPenalty - Frequency penalty setting.
918
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
919
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
920
+ 0 means no penalty.
921
+ @param seed - The seed (integer) to use for random sampling.
922
+ If set and supported by the model, calls will generate deterministic results.
923
+
924
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
925
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
926
+
927
+ @return
928
+ A result object for accessing different stream types and additional information.
929
+ */
930
+ declare function experimental_streamText<TOOLS extends Record<string, ExperimentalTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
931
+ /**
932
+ The language model to use.
933
+ */
934
+ model: LanguageModelV1;
935
+ /**
936
+ The tools that the model can call. The model needs to support calling tools.
937
+ */
938
+ tools?: TOOLS;
939
+ }): Promise<StreamTextResult<TOOLS>>;
940
+ type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
941
+ type: 'text-delta';
942
+ textDelta: string;
943
+ } | ({
944
+ type: 'tool-call';
945
+ } & ToToolCall<TOOLS>) | {
946
+ type: 'error';
947
+ error: unknown;
948
+ } | ({
949
+ type: 'tool-result';
950
+ } & ToToolResult<TOOLS>) | {
951
+ type: 'finish';
952
+ finishReason: LanguageModelV1FinishReason;
953
+ usage: {
954
+ promptTokens: number;
955
+ completionTokens: number;
956
+ totalTokens: number;
957
+ };
958
+ };
959
+ /**
960
+ A result object for accessing different stream types and additional information.
961
+ */
962
+ declare class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
963
+ private readonly originalStream;
964
+ /**
965
+ Warnings from the model provider (e.g. unsupported settings)
966
+ */
967
+ readonly warnings: LanguageModelV1CallWarning[] | undefined;
968
+ constructor({ stream, warnings, }: {
969
+ stream: ReadableStream<TextStreamPart<TOOLS>>;
970
+ warnings: LanguageModelV1CallWarning[] | undefined;
971
+ });
972
+ /**
973
+ A text stream that returns only the generated text deltas. You can use it
974
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
975
+ stream will throw the error.
976
+ */
977
+ get textStream(): AsyncIterableStream<string>;
978
+ /**
979
+ A stream with all events, including text deltas, tool calls, tool results, and
980
+ errors.
981
+ You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
982
+ stream will throw the error.
983
+ */
984
+ get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>>;
985
+ /**
986
+ Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
987
+ It can be used with the `useChat` and `useCompletion` hooks.
988
+
989
+ @param callbacks
990
+ Stream callbacks that will be called when the stream emits events.
991
+
992
+ @returns an `AIStream` object.
993
+ */
994
+ toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
995
+ }
996
+
6
997
  interface FunctionCall {
7
998
  /**
8
999
  * The arguments to call the function with, as generated by the model in JSON
@@ -342,7 +1333,11 @@ declare const StreamStringPrefixes: {
342
1333
  readonly message_annotations: "8";
343
1334
  };
344
1335
 
345
- declare const nanoid: (size?: number | undefined) => string;
1336
+ /**
1337
+ * Generates a 7-character random string to use for IDs. Not secure.
1338
+ */
1339
+ declare const generateId: (size?: number | undefined) => string;
1340
+
346
1341
  declare function createChunkDecoder(): (chunk: Uint8Array | undefined) => string;
347
1342
  declare function createChunkDecoder(complex: false): (chunk: Uint8Array | undefined) => string;
348
1343
  declare function createChunkDecoder(complex: true): (chunk: Uint8Array | undefined) => StreamPartType[];
@@ -817,6 +1812,7 @@ interface AWSBedrockResponse {
817
1812
  };
818
1813
  }>;
819
1814
  }
1815
+ declare function AWSBedrockAnthropicMessagesStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
820
1816
  declare function AWSBedrockAnthropicStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
821
1817
  declare function AWSBedrockCohereStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
822
1818
  declare function AWSBedrockLlama2Stream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
@@ -998,4 +1994,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
998
1994
  status?: number;
999
1995
  }): void;
1000
1996
 
1001
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantMessage, COMPLEX_HEADER, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataMessage, Function, FunctionCall, FunctionCallHandler, FunctionCallPayload, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamString, StreamingTextResponse, Tool, ToolCall, ToolCallHandler, ToolCallPayload, ToolChoice, UseChatOptions, UseCompletionOptions, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, isStreamStringEqualToType, nanoid, readableFromAsyncIterable, streamToResponse, trimStartOfStreamHelper };
1997
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, COMPLEX_HEADER, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ErrorStreamPart, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamObjectResult, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, isStreamStringEqualToType, generateId as nanoid, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };