ai 3.0.14 → 3.0.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +375 -56
- package/dist/index.d.ts +375 -56
- package/dist/index.js +195 -135
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +195 -135
- package/dist/index.mjs.map +1 -1
- package/mistral/dist/index.d.mts +4 -1
- package/mistral/dist/index.d.ts +4 -1
- package/mistral/dist/index.js +15 -15
- package/mistral/dist/index.js.map +1 -1
- package/mistral/dist/index.mjs +15 -15
- package/mistral/dist/index.mjs.map +1 -1
- package/openai/dist/index.d.mts +4 -0
- package/openai/dist/index.d.ts +4 -0
- package/openai/dist/index.js +19 -19
- package/openai/dist/index.js.map +1 -1
- package/openai/dist/index.mjs +19 -19
- package/openai/dist/index.mjs.map +1 -1
- package/package.json +8 -8
- package/rsc/dist/index.d.ts +21 -3
- package/rsc/dist/rsc-client.d.mts +1 -1
- package/rsc/dist/rsc-client.mjs +2 -0
- package/rsc/dist/rsc-client.mjs.map +1 -1
- package/rsc/dist/rsc-server.d.mts +2 -2
- package/rsc/dist/rsc-server.mjs +1 -1
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/rsc/dist/rsc-shared.d.mts +20 -2
- package/rsc/dist/rsc-shared.mjs +75 -2
- package/rsc/dist/rsc-shared.mjs.map +1 -1
- package/{ai-model-specification → spec}/dist/index.d.mts +4 -0
- package/{ai-model-specification → spec}/dist/index.d.ts +4 -0
- package/{ai-model-specification → spec}/dist/index.js +29 -29
- package/{ai-model-specification → spec}/dist/index.mjs +25 -25
- /package/{ai-model-specification → spec}/dist/index.js.map +0 -0
- /package/{ai-model-specification → spec}/dist/index.mjs.map +0 -0
package/dist/index.d.mts
CHANGED
@@ -198,6 +198,10 @@ type LanguageModelV1FunctionToolCall = {
|
|
198
198
|
args: string;
|
199
199
|
};
|
200
200
|
|
201
|
+
/**
|
202
|
+
* Experimental: Specification for a language model that implements the language model
|
203
|
+
* interface version 1.
|
204
|
+
*/
|
201
205
|
type LanguageModelV1 = {
|
202
206
|
/**
|
203
207
|
* The language model must specify which language model interface
|
@@ -328,140 +332,277 @@ type TokenUsage = {
|
|
328
332
|
|
329
333
|
type CallSettings = {
|
330
334
|
/**
|
331
|
-
|
335
|
+
Maximum number of tokens to generate.
|
332
336
|
*/
|
333
337
|
maxTokens?: number;
|
334
338
|
/**
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
339
|
+
Temperature setting. This is a number between 0 (almost no randomness) and
|
340
|
+
1 (very random).
|
341
|
+
|
342
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
343
|
+
|
344
|
+
@default 0
|
341
345
|
*/
|
342
346
|
temperature?: number;
|
343
347
|
/**
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
348
|
+
Nucleus sampling. This is a number between 0 and 1.
|
349
|
+
|
350
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
351
|
+
are considered.
|
352
|
+
|
353
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
350
354
|
*/
|
351
355
|
topP?: number;
|
352
356
|
/**
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
357
|
+
Presence penalty setting. It affects the likelihood of the model to
|
358
|
+
repeat information that is already in the prompt.
|
359
|
+
|
360
|
+
The presence penalty is a number between -1 (increase repetition)
|
361
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
362
|
+
|
363
|
+
@default 0
|
360
364
|
*/
|
361
365
|
presencePenalty?: number;
|
362
366
|
/**
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
367
|
+
Frequency penalty setting. It affects the likelihood of the model
|
368
|
+
to repeatedly use the same words or phrases.
|
369
|
+
|
370
|
+
The frequency penalty is a number between -1 (increase repetition)
|
371
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
372
|
+
|
373
|
+
@default 0
|
370
374
|
*/
|
371
375
|
frequencyPenalty?: number;
|
372
376
|
/**
|
373
|
-
|
374
|
-
|
377
|
+
The seed (integer) to use for random sampling. If set and supported
|
378
|
+
by the model, calls will generate deterministic results.
|
375
379
|
*/
|
376
380
|
seed?: number;
|
377
381
|
/**
|
378
|
-
|
379
|
-
|
380
|
-
|
382
|
+
Maximum number of retries. Set to 0 to disable retries.
|
383
|
+
|
384
|
+
@default 2
|
381
385
|
*/
|
382
386
|
maxRetries?: number;
|
383
387
|
/**
|
384
|
-
|
388
|
+
Abort signal.
|
385
389
|
*/
|
386
390
|
abortSignal?: AbortSignal;
|
387
391
|
};
|
388
392
|
|
389
393
|
/**
|
390
|
-
|
394
|
+
Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
|
391
395
|
*/
|
392
396
|
type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
|
397
|
+
/**
|
398
|
+
Converts data content to a base64-encoded string.
|
399
|
+
|
400
|
+
@param content - Data content to convert.
|
401
|
+
@returns Base64-encoded string.
|
402
|
+
*/
|
393
403
|
declare function convertDataContentToBase64String(content: DataContent): string;
|
404
|
+
/**
|
405
|
+
Converts data content to a Uint8Array.
|
406
|
+
|
407
|
+
@param content - Data content to convert.
|
408
|
+
@returns Uint8Array.
|
409
|
+
*/
|
394
410
|
declare function convertDataContentToUint8Array(content: DataContent): Uint8Array;
|
395
411
|
|
412
|
+
/**
|
413
|
+
Text content part of a prompt. It contains a string of text.
|
414
|
+
*/
|
396
415
|
interface TextPart$1 {
|
397
416
|
type: 'text';
|
398
417
|
/**
|
399
|
-
|
418
|
+
The text content.
|
400
419
|
*/
|
401
420
|
text: string;
|
402
421
|
}
|
422
|
+
/**
|
423
|
+
Image content part of a prompt. It contains an image.
|
424
|
+
*/
|
403
425
|
interface ImagePart {
|
404
426
|
type: 'image';
|
405
427
|
/**
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
428
|
+
Image data. Can either be:
|
429
|
+
|
430
|
+
- data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
|
431
|
+
- URL: a URL that points to the image
|
410
432
|
*/
|
411
433
|
image: DataContent | URL;
|
412
434
|
/**
|
413
|
-
|
435
|
+
Optional mime type of the image.
|
414
436
|
*/
|
415
437
|
mimeType?: string;
|
416
438
|
}
|
439
|
+
/**
|
440
|
+
Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
|
441
|
+
*/
|
417
442
|
interface ToolCallPart {
|
418
443
|
type: 'tool-call';
|
444
|
+
/**
|
445
|
+
ID of the tool call. This ID is used to match the tool call with the tool result.
|
446
|
+
*/
|
419
447
|
toolCallId: string;
|
448
|
+
/**
|
449
|
+
Name of the tool that is being called.
|
450
|
+
*/
|
420
451
|
toolName: string;
|
452
|
+
/**
|
453
|
+
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
|
454
|
+
*/
|
421
455
|
args: unknown;
|
422
456
|
}
|
457
|
+
/**
|
458
|
+
Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
|
459
|
+
*/
|
423
460
|
interface ToolResultPart {
|
424
461
|
type: 'tool-result';
|
462
|
+
/**
|
463
|
+
ID of the tool call that this result is associated with.
|
464
|
+
*/
|
425
465
|
toolCallId: string;
|
466
|
+
/**
|
467
|
+
Name of the tool that generated this result.
|
468
|
+
*/
|
426
469
|
toolName: string;
|
470
|
+
/**
|
471
|
+
Result of the tool call. This is a JSON-serializable object.
|
472
|
+
*/
|
427
473
|
result: unknown;
|
428
474
|
}
|
429
475
|
|
476
|
+
/**
|
477
|
+
A message that can be used in the `messages` field of a prompt.
|
478
|
+
It can be a user message, an assistant message, or a tool message.
|
479
|
+
*/
|
430
480
|
type ExperimentalMessage = ExperimentalUserMessage | ExperimentalAssistantMessage | ExperimentalToolMessage;
|
481
|
+
/**
|
482
|
+
A user message. It can contain text or a combination of text and images.
|
483
|
+
*/
|
431
484
|
type ExperimentalUserMessage = {
|
432
485
|
role: 'user';
|
433
486
|
content: UserContent;
|
434
487
|
};
|
488
|
+
/**
|
489
|
+
Content of a user message. It can be a string or an array of text and image parts.
|
490
|
+
*/
|
491
|
+
type UserContent = string | Array<TextPart$1 | ImagePart>;
|
492
|
+
/**
|
493
|
+
An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
|
494
|
+
*/
|
435
495
|
type ExperimentalAssistantMessage = {
|
436
496
|
role: 'assistant';
|
437
497
|
content: AssistantContent;
|
438
498
|
};
|
499
|
+
/**
|
500
|
+
Content of an assistant message. It can be a string or an array of text and tool call parts.
|
501
|
+
*/
|
502
|
+
type AssistantContent = string | Array<TextPart$1 | ToolCallPart>;
|
503
|
+
/**
|
504
|
+
A tool message. It contains the result of one or more tool calls.
|
505
|
+
*/
|
439
506
|
type ExperimentalToolMessage = {
|
440
507
|
role: 'tool';
|
441
508
|
content: ToolContent;
|
442
509
|
};
|
443
|
-
|
444
|
-
|
510
|
+
/**
|
511
|
+
Content of a tool message. It is an array of tool result parts.
|
512
|
+
*/
|
445
513
|
type ToolContent = Array<ToolResultPart>;
|
446
514
|
|
515
|
+
/**
|
516
|
+
Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
|
517
|
+
*/
|
447
518
|
type Prompt = {
|
519
|
+
/**
|
520
|
+
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
521
|
+
*/
|
448
522
|
system?: string;
|
523
|
+
/**
|
524
|
+
A simple text prompt. You can either use `prompt` or `messages` but not both.
|
525
|
+
*/
|
449
526
|
prompt?: string;
|
527
|
+
/**
|
528
|
+
A list of messsages. You can either use `prompt` or `messages` but not both.
|
529
|
+
*/
|
450
530
|
messages?: Array<ExperimentalMessage>;
|
451
531
|
};
|
452
532
|
|
453
533
|
/**
|
454
|
-
|
534
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
535
|
+
|
536
|
+
This function does not stream the output. If you want to stream the output, use `experimental_streamObject` instead.
|
537
|
+
|
538
|
+
@param model - The language model to use.
|
539
|
+
|
540
|
+
@param schema - The schema of the object that the model should generate.
|
541
|
+
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
542
|
+
|
543
|
+
@param system - A system message that will be part of the prompt.
|
544
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
545
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
546
|
+
|
547
|
+
@param maxTokens - Maximum number of tokens to generate.
|
548
|
+
@param temperature - Temperature setting.
|
549
|
+
This is a number between 0 (almost no randomness) and 1 (very random).
|
550
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
551
|
+
@param topP - Nucleus sampling. This is a number between 0 and 1.
|
552
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
|
553
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
554
|
+
@param presencePenalty - Presence penalty setting.
|
555
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
556
|
+
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
557
|
+
0 means no penalty.
|
558
|
+
@param frequencyPenalty - Frequency penalty setting.
|
559
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
560
|
+
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
561
|
+
0 means no penalty.
|
562
|
+
@param seed - The seed (integer) to use for random sampling.
|
563
|
+
If set and supported by the model, calls will generate deterministic results.
|
564
|
+
|
565
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
566
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
567
|
+
|
568
|
+
@returns
|
569
|
+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
455
570
|
*/
|
456
571
|
declare function experimental_generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
572
|
+
/**
|
573
|
+
The language model to use.
|
574
|
+
*/
|
457
575
|
model: LanguageModelV1;
|
576
|
+
/**
|
577
|
+
The schema of the object that the model should generate.
|
578
|
+
*/
|
458
579
|
schema: z.Schema<T>;
|
580
|
+
/**
|
581
|
+
The mode to use for object generation. Not all models support all modes.
|
582
|
+
|
583
|
+
Default and recommended: 'auto' (best mode for the model).
|
584
|
+
*/
|
459
585
|
mode?: 'auto' | 'json' | 'tool' | 'grammar';
|
460
586
|
}): Promise<GenerateObjectResult<T>>;
|
587
|
+
/**
|
588
|
+
The result of a `generateObject` call.
|
589
|
+
*/
|
461
590
|
declare class GenerateObjectResult<T> {
|
591
|
+
/**
|
592
|
+
The generated object (typed according to the schema).
|
593
|
+
*/
|
462
594
|
readonly object: T;
|
595
|
+
/**
|
596
|
+
The reason why the generation finished.
|
597
|
+
*/
|
463
598
|
readonly finishReason: LanguageModelV1FinishReason;
|
599
|
+
/**
|
600
|
+
The token usage of the generated text.
|
601
|
+
*/
|
464
602
|
readonly usage: TokenUsage;
|
603
|
+
/**
|
604
|
+
Warnings from the model provider (e.g. unsupported settings)
|
605
|
+
*/
|
465
606
|
readonly warnings: LanguageModelV1CallWarning[] | undefined;
|
466
607
|
constructor(options: {
|
467
608
|
object: T;
|
@@ -490,15 +631,67 @@ type PartialObject<ObjectType extends object> = {
|
|
490
631
|
};
|
491
632
|
|
492
633
|
/**
|
493
|
-
|
634
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
635
|
+
|
636
|
+
This function streams the output. If you do not want to stream the output, use `experimental_generateObject` instead.
|
637
|
+
|
638
|
+
@param model - The language model to use.
|
639
|
+
|
640
|
+
@param schema - The schema of the object that the model should generate.
|
641
|
+
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
642
|
+
|
643
|
+
@param system - A system message that will be part of the prompt.
|
644
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
645
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
646
|
+
|
647
|
+
@param maxTokens - Maximum number of tokens to generate.
|
648
|
+
@param temperature - Temperature setting.
|
649
|
+
This is a number between 0 (almost no randomness) and 1 (very random).
|
650
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
651
|
+
@param topP - Nucleus sampling. This is a number between 0 and 1.
|
652
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
|
653
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
654
|
+
@param presencePenalty - Presence penalty setting.
|
655
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
656
|
+
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
657
|
+
0 means no penalty.
|
658
|
+
@param frequencyPenalty - Frequency penalty setting.
|
659
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
660
|
+
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
661
|
+
0 means no penalty.
|
662
|
+
@param seed - The seed (integer) to use for random sampling.
|
663
|
+
If set and supported by the model, calls will generate deterministic results.
|
664
|
+
|
665
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
666
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
667
|
+
|
668
|
+
@return
|
669
|
+
A result object for accessing the partial object stream and additional information.
|
494
670
|
*/
|
495
671
|
declare function experimental_streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
672
|
+
/**
|
673
|
+
The language model to use.
|
674
|
+
*/
|
496
675
|
model: LanguageModelV1;
|
676
|
+
/**
|
677
|
+
The schema of the object that the model should generate.
|
678
|
+
*/
|
497
679
|
schema: z.Schema<T>;
|
680
|
+
/**
|
681
|
+
The mode to use for object generation. Not all models support all modes.
|
682
|
+
|
683
|
+
Default and recommended: 'auto' (best mode for the model).
|
684
|
+
*/
|
498
685
|
mode?: 'auto' | 'json' | 'tool' | 'grammar';
|
499
686
|
}): Promise<StreamObjectResult<T>>;
|
687
|
+
/**
|
688
|
+
The result of a `streamObject` call that contains the partial object stream and additional information.
|
689
|
+
*/
|
500
690
|
declare class StreamObjectResult<T> {
|
501
691
|
private readonly originalStream;
|
692
|
+
/**
|
693
|
+
Warnings from the model provider (e.g. unsupported settings)
|
694
|
+
*/
|
502
695
|
readonly warnings: LanguageModelV1CallWarning[] | undefined;
|
503
696
|
constructor({ stream, warnings, }: {
|
504
697
|
stream: ReadableStream<string | ErrorStreamPart>;
|
@@ -512,29 +705,29 @@ type ErrorStreamPart = {
|
|
512
705
|
};
|
513
706
|
|
514
707
|
/**
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
708
|
+
A tool contains the description and the schema of the input that the tool expects.
|
709
|
+
This enables the language model to generate the input.
|
710
|
+
|
711
|
+
The tool can also contain an optional execute function for the actual execution function of the tool.
|
519
712
|
*/
|
520
713
|
interface ExperimentalTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
|
521
714
|
/**
|
522
|
-
|
715
|
+
An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
|
523
716
|
*/
|
524
717
|
description?: string;
|
525
718
|
/**
|
526
|
-
|
527
|
-
|
719
|
+
The schema of the input that the tool expects. The language model will use this to generate the input.
|
720
|
+
Use descriptions to make the input understandable for the language model.
|
528
721
|
*/
|
529
722
|
parameters: PARAMETERS;
|
530
723
|
/**
|
531
|
-
|
532
|
-
|
724
|
+
An optional execute function for the actual execution function of the tool.
|
725
|
+
If not provided, the tool will not be executed automatically.
|
533
726
|
*/
|
534
727
|
execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
|
535
728
|
}
|
536
729
|
/**
|
537
|
-
|
730
|
+
Helper function for inferring the execute args of a tool.
|
538
731
|
*/
|
539
732
|
declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: ExperimentalTool<PARAMETERS, RESULT> & {
|
540
733
|
execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
|
@@ -618,18 +811,79 @@ type ToToolResult<TOOLS extends Record<string, ExperimentalTool>> = ToToolResult
|
|
618
811
|
type ToToolResultArray<TOOLS extends Record<string, ExperimentalTool>> = Array<ToToolResult<TOOLS>>;
|
619
812
|
|
620
813
|
/**
|
621
|
-
|
814
|
+
Generate a text and call tools for a given prompt using a language model.
|
815
|
+
|
816
|
+
This function does not stream the output. If you want to stream the output, use `experimental_streamText` instead.
|
817
|
+
|
818
|
+
@param model - The language model to use.
|
819
|
+
@param tools - The tools that the model can call. The model needs to support calling tools.
|
820
|
+
|
821
|
+
@param system - A system message that will be part of the prompt.
|
822
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
823
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
824
|
+
|
825
|
+
@param maxTokens - Maximum number of tokens to generate.
|
826
|
+
@param temperature - Temperature setting.
|
827
|
+
This is a number between 0 (almost no randomness) and 1 (very random).
|
828
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
829
|
+
@param topP - Nucleus sampling. This is a number between 0 and 1.
|
830
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
|
831
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
832
|
+
@param presencePenalty - Presence penalty setting.
|
833
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
834
|
+
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
835
|
+
0 means no penalty.
|
836
|
+
@param frequencyPenalty - Frequency penalty setting.
|
837
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
838
|
+
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
839
|
+
0 means no penalty.
|
840
|
+
@param seed - The seed (integer) to use for random sampling.
|
841
|
+
If set and supported by the model, calls will generate deterministic results.
|
842
|
+
|
843
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
844
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
845
|
+
|
846
|
+
@returns
|
847
|
+
A result object that contains the generated text, the results of the tool calls, and additional information.
|
622
848
|
*/
|
623
849
|
declare function experimental_generateText<TOOLS extends Record<string, ExperimentalTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
850
|
+
/**
|
851
|
+
The language model to use.
|
852
|
+
*/
|
624
853
|
model: LanguageModelV1;
|
854
|
+
/**
|
855
|
+
The tools that the model can call. The model needs to support calling tools.
|
856
|
+
*/
|
625
857
|
tools?: TOOLS;
|
626
858
|
}): Promise<GenerateTextResult<TOOLS>>;
|
859
|
+
/**
|
860
|
+
The result of a `generateText` call.
|
861
|
+
It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
|
862
|
+
*/
|
627
863
|
declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>> {
|
864
|
+
/**
|
865
|
+
The generated text.
|
866
|
+
*/
|
628
867
|
readonly text: string;
|
868
|
+
/**
|
869
|
+
The tool calls that were made during the generation.
|
870
|
+
*/
|
629
871
|
readonly toolCalls: ToToolCallArray<TOOLS>;
|
872
|
+
/**
|
873
|
+
The results of the tool calls.
|
874
|
+
*/
|
630
875
|
readonly toolResults: ToToolResultArray<TOOLS>;
|
876
|
+
/**
|
877
|
+
The reason why the generation finished.
|
878
|
+
*/
|
631
879
|
readonly finishReason: LanguageModelV1FinishReason;
|
880
|
+
/**
|
881
|
+
The token usage of the generated text.
|
882
|
+
*/
|
632
883
|
readonly usage: TokenUsage;
|
884
|
+
/**
|
885
|
+
Warnings from the model provider (e.g. unsupported settings)
|
886
|
+
*/
|
633
887
|
readonly warnings: LanguageModelV1CallWarning[] | undefined;
|
634
888
|
constructor(options: {
|
635
889
|
text: string;
|
@@ -642,10 +896,49 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
|
|
642
896
|
}
|
643
897
|
|
644
898
|
/**
|
645
|
-
|
899
|
+
Generate a text and call tools for a given prompt using a language model.
|
900
|
+
|
901
|
+
This function streams the output. If you do not want to stream the output, use `experimental_generateText` instead.
|
902
|
+
|
903
|
+
@param model - The language model to use.
|
904
|
+
@param tools - The tools that the model can call. The model needs to support calling tools.
|
905
|
+
|
906
|
+
@param system - A system message that will be part of the prompt.
|
907
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
908
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
909
|
+
|
910
|
+
@param maxTokens - Maximum number of tokens to generate.
|
911
|
+
@param temperature - Temperature setting.
|
912
|
+
This is a number between 0 (almost no randomness) and 1 (very random).
|
913
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
914
|
+
@param topP - Nucleus sampling. This is a number between 0 and 1.
|
915
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
|
916
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
917
|
+
@param presencePenalty - Presence penalty setting.
|
918
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
919
|
+
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
920
|
+
0 means no penalty.
|
921
|
+
@param frequencyPenalty - Frequency penalty setting.
|
922
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
923
|
+
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
924
|
+
0 means no penalty.
|
925
|
+
@param seed - The seed (integer) to use for random sampling.
|
926
|
+
If set and supported by the model, calls will generate deterministic results.
|
927
|
+
|
928
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
929
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
930
|
+
|
931
|
+
@return
|
932
|
+
A result object for accessing different stream types and additional information.
|
646
933
|
*/
|
647
934
|
declare function experimental_streamText<TOOLS extends Record<string, ExperimentalTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
935
|
+
/**
|
936
|
+
The language model to use.
|
937
|
+
*/
|
648
938
|
model: LanguageModelV1;
|
939
|
+
/**
|
940
|
+
The tools that the model can call. The model needs to support calling tools.
|
941
|
+
*/
|
649
942
|
tools?: TOOLS;
|
650
943
|
}): Promise<StreamTextResult<TOOLS>>;
|
651
944
|
type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
|
@@ -667,15 +960,41 @@ type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
|
|
667
960
|
totalTokens: number;
|
668
961
|
};
|
669
962
|
};
|
963
|
+
/**
|
964
|
+
A result object for accessing different stream types and additional information.
|
965
|
+
*/
|
670
966
|
declare class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
|
671
967
|
private readonly originalStream;
|
968
|
+
/**
|
969
|
+
Warnings from the model provider (e.g. unsupported settings)
|
970
|
+
*/
|
672
971
|
readonly warnings: LanguageModelV1CallWarning[] | undefined;
|
673
972
|
constructor({ stream, warnings, }: {
|
674
973
|
stream: ReadableStream<TextStreamPart<TOOLS>>;
|
675
974
|
warnings: LanguageModelV1CallWarning[] | undefined;
|
676
975
|
});
|
976
|
+
/**
|
977
|
+
A text stream that returns only the generated text deltas. You can use it
|
978
|
+
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
979
|
+
stream will throw the error.
|
980
|
+
*/
|
677
981
|
get textStream(): AsyncIterableStream<string>;
|
982
|
+
/**
|
983
|
+
A stream with all events, including text deltas, tool calls, tool results, and
|
984
|
+
errors.
|
985
|
+
You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
986
|
+
stream will throw the error.
|
987
|
+
*/
|
678
988
|
get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>>;
|
989
|
+
/**
|
990
|
+
Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
|
991
|
+
It can be used with the `useChat` and `useCompletion` hooks.
|
992
|
+
|
993
|
+
@param callbacks
|
994
|
+
Stream callbacks that will be called when the stream emits events.
|
995
|
+
|
996
|
+
@returns an `AIStream` object.
|
997
|
+
*/
|
679
998
|
toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
|
680
999
|
}
|
681
1000
|
|