ai 3.0.14 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -198,6 +198,10 @@ type LanguageModelV1FunctionToolCall = {
198
198
  args: string;
199
199
  };
200
200
 
201
+ /**
202
+ * Experimental: Specification for a language model that implements the language model
203
+ * interface version 1.
204
+ */
201
205
  type LanguageModelV1 = {
202
206
  /**
203
207
  * The language model must specify which language model interface
@@ -328,140 +332,275 @@ type TokenUsage = {
328
332
 
329
333
  type CallSettings = {
330
334
  /**
331
- * Maximum number of tokens to generate.
335
+ Maximum number of tokens to generate.
332
336
  */
333
337
  maxTokens?: number;
334
338
  /**
335
- * Temperature setting. This is a number between 0 (almost no randomness) and
336
- * 1 (very random).
337
- *
338
- * It is recommended to set either `temperature` or `topP`, but not both.
339
- *
340
- * @default 0
339
+ Temperature setting. This is a number between 0 (almost no randomness) and
340
+ 1 (very random).
341
+
342
+ It is recommended to set either `temperature` or `topP`, but not both.
343
+
344
+ @default 0
341
345
  */
342
346
  temperature?: number;
343
347
  /**
344
- * Nucleus sampling. This is a number between 0 and 1.
345
- *
346
- * E.g. 0.1 would mean that only tokens with the top 10% probability mass
347
- * are considered.
348
- *
349
- * It is recommended to set either `temperature` or `topP`, but not both.
348
+ Nucleus sampling. This is a number between 0 and 1.
349
+
350
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
351
+ are considered.
352
+
353
+ It is recommended to set either `temperature` or `topP`, but not both.
350
354
  */
351
355
  topP?: number;
352
356
  /**
353
- * Presence penalty setting. It affects the likelihood of the model to
354
- * repeat information that is already in the prompt.
355
- *
356
- * The presence penalty is a number between -1 (increase repetition)
357
- * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
358
- *
359
- * @default 0
357
+ Presence penalty setting. It affects the likelihood of the model to
358
+ repeat information that is already in the prompt.
359
+
360
+ The presence penalty is a number between -1 (increase repetition)
361
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
362
+
363
+ @default 0
360
364
  */
361
365
  presencePenalty?: number;
362
366
  /**
363
- * Frequency penalty setting. It affects the likelihood of the model
364
- * to repeatedly use the same words or phrases.
365
- *
366
- * The frequency penalty is a number between -1 (increase repetition)
367
- * and 1 (maximum penalty, decrease repetition). 0 means no penalty.
368
- *
369
- * @default 0
367
+ Frequency penalty setting. It affects the likelihood of the model
368
+ to repeatedly use the same words or phrases.
369
+
370
+ The frequency penalty is a number between -1 (increase repetition)
371
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
372
+
373
+ @default 0
370
374
  */
371
375
  frequencyPenalty?: number;
372
376
  /**
373
- * The seed (integer) to use for random sampling. If set and supported
374
- * by the model, calls will generate deterministic results.
377
+ The seed (integer) to use for random sampling. If set and supported
378
+ by the model, calls will generate deterministic results.
375
379
  */
376
380
  seed?: number;
377
381
  /**
378
- * Maximum number of retries. Set to 0 to disable retries.
379
- *
380
- * @default 2
382
+ Maximum number of retries. Set to 0 to disable retries.
383
+
384
+ @default 2
381
385
  */
382
386
  maxRetries?: number;
383
387
  /**
384
- * Abort signal.
388
+ Abort signal.
385
389
  */
386
390
  abortSignal?: AbortSignal;
387
391
  };
388
392
 
389
393
  /**
390
- * Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
394
+ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
391
395
  */
392
396
  type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
397
+ /**
398
+ Converts data content to a base64-encoded string.
399
+
400
+ @param content - Data content to convert.
401
+ @returns Base64-encoded string.
402
+ */
393
403
  declare function convertDataContentToBase64String(content: DataContent): string;
404
+ /**
405
+ Converts data content to a Uint8Array.
406
+
407
+ @param content - Data content to convert.
408
+ @returns Uint8Array.
409
+ */
394
410
  declare function convertDataContentToUint8Array(content: DataContent): Uint8Array;
395
411
 
412
+ /**
413
+ Text content part of a prompt. It contains a string of text.
414
+ */
396
415
  interface TextPart$1 {
397
416
  type: 'text';
398
417
  /**
399
- * The text content.
418
+ The text content.
400
419
  */
401
420
  text: string;
402
421
  }
422
+ /**
423
+ Image content part of a prompt. It contains an image.
424
+ */
403
425
  interface ImagePart {
404
426
  type: 'image';
405
427
  /**
406
- * Image data. Can either be:
407
- *
408
- * - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
409
- * - URL: a URL that points to the image
428
+ Image data. Can either be:
429
+
430
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
431
+ - URL: a URL that points to the image
410
432
  */
411
433
  image: DataContent | URL;
412
434
  /**
413
- * Optional mime type of the image.
435
+ Optional mime type of the image.
414
436
  */
415
437
  mimeType?: string;
416
438
  }
439
+ /**
440
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
441
+ */
417
442
  interface ToolCallPart {
418
443
  type: 'tool-call';
444
+ /**
445
+ ID of the tool call. This ID is used to match the tool call with the tool result.
446
+ */
419
447
  toolCallId: string;
448
+ /**
449
+ Name of the tool that is being called.
450
+ */
420
451
  toolName: string;
452
+ /**
453
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
454
+ */
421
455
  args: unknown;
422
456
  }
457
+ /**
458
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
459
+ */
423
460
  interface ToolResultPart {
424
461
  type: 'tool-result';
462
+ /**
463
+ ID of the tool call that this result is associated with.
464
+ */
425
465
  toolCallId: string;
466
+ /**
467
+ Name of the tool that generated this result.
468
+ */
426
469
  toolName: string;
470
+ /**
471
+ Result of the tool call. This is a JSON-serializable object.
472
+ */
427
473
  result: unknown;
428
474
  }
429
475
 
476
+ /**
477
+ A message that can be used in the `messages` field of a prompt.
478
+ It can be a user message, an assistant message, or a tool message.
479
+ */
430
480
  type ExperimentalMessage = ExperimentalUserMessage | ExperimentalAssistantMessage | ExperimentalToolMessage;
481
+ /**
482
+ A user message. It can contain text or a combination of text and images.
483
+ */
431
484
  type ExperimentalUserMessage = {
432
485
  role: 'user';
433
486
  content: UserContent;
434
487
  };
488
+ /**
489
+ Content of a user message. It can be a string or an array of text and image parts.
490
+ */
491
+ type UserContent = string | Array<TextPart$1 | ImagePart>;
492
+ /**
493
+ An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
494
+ */
435
495
  type ExperimentalAssistantMessage = {
436
496
  role: 'assistant';
437
497
  content: AssistantContent;
438
498
  };
499
+ /**
500
+ Content of an assistant message. It can be a string or an array of text and tool call parts.
501
+ */
502
+ type AssistantContent = string | Array<TextPart$1 | ToolCallPart>;
503
+ /**
504
+ A tool message. It contains the result of one or more tool calls.
505
+ */
439
506
  type ExperimentalToolMessage = {
440
507
  role: 'tool';
441
508
  content: ToolContent;
442
509
  };
443
- type UserContent = string | Array<TextPart$1 | ImagePart>;
444
- type AssistantContent = string | Array<TextPart$1 | ToolCallPart>;
510
+ /**
511
+ Content of a tool message. It is an array of tool result parts.
512
+ */
445
513
  type ToolContent = Array<ToolResultPart>;
446
514
 
515
+ /**
516
+ Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
517
+ */
447
518
  type Prompt = {
519
+ /**
520
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
521
+ */
448
522
  system?: string;
523
+ /**
524
+ A simple text prompt. You can either use `prompt` or `messages` but not both.
525
+ */
449
526
  prompt?: string;
527
+ /**
528
+ A list of messsages. You can either use `prompt` or `messages` but not both.
529
+ */
450
530
  messages?: Array<ExperimentalMessage>;
451
531
  };
452
532
 
453
533
  /**
454
- * Generate a structured, typed object using a language model.
534
+ Generate a structured, typed object for a given prompt and schema using a language model.
535
+
536
+ This function does not stream the output. If you want to stream the output, use `experimental_streamObject` instead.
537
+
538
+ @param model - The language model to use.
539
+ @param schema - The schema of the object that the model should generate.
540
+
541
+ @param system - A system message that will be part of the prompt.
542
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
543
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
544
+
545
+ @param maxTokens - Maximum number of tokens to generate.
546
+ @param temperature - Temperature setting.
547
+ This is a number between 0 (almost no randomness) and 1 (very random).
548
+ It is recommended to set either `temperature` or `topP`, but not both.
549
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
550
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
551
+ It is recommended to set either `temperature` or `topP`, but not both.
552
+ @param presencePenalty - Presence penalty setting.
553
+ It affects the likelihood of the model to repeat information that is already in the prompt.
554
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
555
+ 0 means no penalty.
556
+ @param frequencyPenalty - Frequency penalty setting.
557
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
558
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
559
+ 0 means no penalty.
560
+ @param seed - The seed (integer) to use for random sampling.
561
+ If set and supported by the model, calls will generate deterministic results.
562
+
563
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
564
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
565
+
566
+ @returns
567
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
455
568
  */
456
569
  declare function experimental_generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
570
+ /**
571
+ The language model to use.
572
+ */
457
573
  model: LanguageModelV1;
574
+ /**
575
+ The schema of the object that the model should generate.
576
+ */
458
577
  schema: z.Schema<T>;
578
+ /**
579
+ The mode to use for object generation. Not all models support all modes.
580
+
581
+ Default and recommended: 'auto' (best mode for the model).
582
+ */
459
583
  mode?: 'auto' | 'json' | 'tool' | 'grammar';
460
584
  }): Promise<GenerateObjectResult<T>>;
585
+ /**
586
+ The result of a `generateObject` call.
587
+ */
461
588
  declare class GenerateObjectResult<T> {
589
+ /**
590
+ The generated object (typed according to the schema).
591
+ */
462
592
  readonly object: T;
593
+ /**
594
+ The reason why the generation finished.
595
+ */
463
596
  readonly finishReason: LanguageModelV1FinishReason;
597
+ /**
598
+ The token usage of the generated text.
599
+ */
464
600
  readonly usage: TokenUsage;
601
+ /**
602
+ Warnings from the model provider (e.g. unsupported settings)
603
+ */
465
604
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
466
605
  constructor(options: {
467
606
  object: T;
@@ -490,15 +629,65 @@ type PartialObject<ObjectType extends object> = {
490
629
  };
491
630
 
492
631
  /**
493
- * Stream an object as a partial object stream.
632
+ Generate a structured, typed object for a given prompt and schema using a language model.
633
+
634
+ This function streams the output. If you do not want to stream the output, use `experimental_generateObject` instead.
635
+
636
+ @param model - The language model to use.
637
+ @param schema - The schema of the object that the model should generate.
638
+
639
+ @param system - A system message that will be part of the prompt.
640
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
641
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
642
+
643
+ @param maxTokens - Maximum number of tokens to generate.
644
+ @param temperature - Temperature setting.
645
+ This is a number between 0 (almost no randomness) and 1 (very random).
646
+ It is recommended to set either `temperature` or `topP`, but not both.
647
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
648
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
649
+ It is recommended to set either `temperature` or `topP`, but not both.
650
+ @param presencePenalty - Presence penalty setting.
651
+ It affects the likelihood of the model to repeat information that is already in the prompt.
652
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
653
+ 0 means no penalty.
654
+ @param frequencyPenalty - Frequency penalty setting.
655
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
656
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
657
+ 0 means no penalty.
658
+ @param seed - The seed (integer) to use for random sampling.
659
+ If set and supported by the model, calls will generate deterministic results.
660
+
661
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
662
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
663
+
664
+ @return
665
+ A result object for accessing the partial object stream and additional information.
494
666
  */
495
667
  declare function experimental_streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
668
+ /**
669
+ The language model to use.
670
+ */
496
671
  model: LanguageModelV1;
672
+ /**
673
+ The schema of the object that the model should generate.
674
+ */
497
675
  schema: z.Schema<T>;
676
+ /**
677
+ The mode to use for object generation. Not all models support all modes.
678
+
679
+ Default and recommended: 'auto' (best mode for the model).
680
+ */
498
681
  mode?: 'auto' | 'json' | 'tool' | 'grammar';
499
682
  }): Promise<StreamObjectResult<T>>;
683
+ /**
684
+ The result of a `streamObject` call that contains the partial object stream and additional information.
685
+ */
500
686
  declare class StreamObjectResult<T> {
501
687
  private readonly originalStream;
688
+ /**
689
+ Warnings from the model provider (e.g. unsupported settings)
690
+ */
502
691
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
503
692
  constructor({ stream, warnings, }: {
504
693
  stream: ReadableStream<string | ErrorStreamPart>;
@@ -512,29 +701,29 @@ type ErrorStreamPart = {
512
701
  };
513
702
 
514
703
  /**
515
- * A tool contains the description and the schema of the input that the tool expects.
516
- * This enables the language model to generate the input.
517
- *
518
- * The tool can also contain an optional execute function for the actual execution function of the tool.
704
+ A tool contains the description and the schema of the input that the tool expects.
705
+ This enables the language model to generate the input.
706
+
707
+ The tool can also contain an optional execute function for the actual execution function of the tool.
519
708
  */
520
709
  interface ExperimentalTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
521
710
  /**
522
- * A optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
711
+ An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
523
712
  */
524
713
  description?: string;
525
714
  /**
526
- * The schema of the input that the tool expects. The language model will use this to generate the input.
527
- * Use descriptions to make the input understandable for the language model.
715
+ The schema of the input that the tool expects. The language model will use this to generate the input.
716
+ Use descriptions to make the input understandable for the language model.
528
717
  */
529
718
  parameters: PARAMETERS;
530
719
  /**
531
- * An optional execute function for the actual execution function of the tool.
532
- * If not provided, the tool will not be executed automatically.
720
+ An optional execute function for the actual execution function of the tool.
721
+ If not provided, the tool will not be executed automatically.
533
722
  */
534
723
  execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
535
724
  }
536
725
  /**
537
- * Helper function for inferring the execute args of a tool.
726
+ Helper function for inferring the execute args of a tool.
538
727
  */
539
728
  declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: ExperimentalTool<PARAMETERS, RESULT> & {
540
729
  execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
@@ -618,18 +807,79 @@ type ToToolResult<TOOLS extends Record<string, ExperimentalTool>> = ToToolResult
618
807
  type ToToolResultArray<TOOLS extends Record<string, ExperimentalTool>> = Array<ToToolResult<TOOLS>>;
619
808
 
620
809
  /**
621
- * Generate a text and call tools using a language model.
810
+ Generate a text and call tools for a given prompt using a language model.
811
+
812
+ This function does not stream the output. If you want to stream the output, use `experimental_streamText` instead.
813
+
814
+ @param model - The language model to use.
815
+ @param tools - The tools that the model can call. The model needs to support calling tools.
816
+
817
+ @param system - A system message that will be part of the prompt.
818
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
819
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
820
+
821
+ @param maxTokens - Maximum number of tokens to generate.
822
+ @param temperature - Temperature setting.
823
+ This is a number between 0 (almost no randomness) and 1 (very random).
824
+ It is recommended to set either `temperature` or `topP`, but not both.
825
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
826
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
827
+ It is recommended to set either `temperature` or `topP`, but not both.
828
+ @param presencePenalty - Presence penalty setting.
829
+ It affects the likelihood of the model to repeat information that is already in the prompt.
830
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
831
+ 0 means no penalty.
832
+ @param frequencyPenalty - Frequency penalty setting.
833
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
834
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
835
+ 0 means no penalty.
836
+ @param seed - The seed (integer) to use for random sampling.
837
+ If set and supported by the model, calls will generate deterministic results.
838
+
839
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
840
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
841
+
842
+ @returns
843
+ A result object that contains the generated text, the results of the tool calls, and additional information.
622
844
  */
623
845
  declare function experimental_generateText<TOOLS extends Record<string, ExperimentalTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
846
+ /**
847
+ The language model to use.
848
+ */
624
849
  model: LanguageModelV1;
850
+ /**
851
+ The tools that the model can call. The model needs to support calling tools.
852
+ */
625
853
  tools?: TOOLS;
626
854
  }): Promise<GenerateTextResult<TOOLS>>;
855
+ /**
856
+ The result of a `generateText` call.
857
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
858
+ */
627
859
  declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>> {
860
+ /**
861
+ The generated text.
862
+ */
628
863
  readonly text: string;
864
+ /**
865
+ The tool calls that were made during the generation.
866
+ */
629
867
  readonly toolCalls: ToToolCallArray<TOOLS>;
868
+ /**
869
+ The results of the tool calls.
870
+ */
630
871
  readonly toolResults: ToToolResultArray<TOOLS>;
872
+ /**
873
+ The reason why the generation finished.
874
+ */
631
875
  readonly finishReason: LanguageModelV1FinishReason;
876
+ /**
877
+ The token usage of the generated text.
878
+ */
632
879
  readonly usage: TokenUsage;
880
+ /**
881
+ Warnings from the model provider (e.g. unsupported settings)
882
+ */
633
883
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
634
884
  constructor(options: {
635
885
  text: string;
@@ -642,10 +892,49 @@ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>>
642
892
  }
643
893
 
644
894
  /**
645
- * Stream text generated by a language model.
895
+ Generate a text and call tools for a given prompt using a language model.
896
+
897
+ This function streams the output. If you do not want to stream the output, use `experimental_generateText` instead.
898
+
899
+ @param model - The language model to use.
900
+ @param tools - The tools that the model can call. The model needs to support calling tools.
901
+
902
+ @param system - A system message that will be part of the prompt.
903
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
904
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
905
+
906
+ @param maxTokens - Maximum number of tokens to generate.
907
+ @param temperature - Temperature setting.
908
+ This is a number between 0 (almost no randomness) and 1 (very random).
909
+ It is recommended to set either `temperature` or `topP`, but not both.
910
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
911
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
912
+ It is recommended to set either `temperature` or `topP`, but not both.
913
+ @param presencePenalty - Presence penalty setting.
914
+ It affects the likelihood of the model to repeat information that is already in the prompt.
915
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
916
+ 0 means no penalty.
917
+ @param frequencyPenalty - Frequency penalty setting.
918
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
919
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
920
+ 0 means no penalty.
921
+ @param seed - The seed (integer) to use for random sampling.
922
+ If set and supported by the model, calls will generate deterministic results.
923
+
924
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
925
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
926
+
927
+ @return
928
+ A result object for accessing different stream types and additional information.
646
929
  */
647
930
  declare function experimental_streamText<TOOLS extends Record<string, ExperimentalTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
931
+ /**
932
+ The language model to use.
933
+ */
648
934
  model: LanguageModelV1;
935
+ /**
936
+ The tools that the model can call. The model needs to support calling tools.
937
+ */
649
938
  tools?: TOOLS;
650
939
  }): Promise<StreamTextResult<TOOLS>>;
651
940
  type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
@@ -667,15 +956,41 @@ type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
667
956
  totalTokens: number;
668
957
  };
669
958
  };
959
+ /**
960
+ A result object for accessing different stream types and additional information.
961
+ */
670
962
  declare class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
671
963
  private readonly originalStream;
964
+ /**
965
+ Warnings from the model provider (e.g. unsupported settings)
966
+ */
672
967
  readonly warnings: LanguageModelV1CallWarning[] | undefined;
673
968
  constructor({ stream, warnings, }: {
674
969
  stream: ReadableStream<TextStreamPart<TOOLS>>;
675
970
  warnings: LanguageModelV1CallWarning[] | undefined;
676
971
  });
972
+ /**
973
+ A text stream that returns only the generated text deltas. You can use it
974
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
975
+ stream will throw the error.
976
+ */
677
977
  get textStream(): AsyncIterableStream<string>;
978
+ /**
979
+ A stream with all events, including text deltas, tool calls, tool results, and
980
+ errors.
981
+ You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
982
+ stream will throw the error.
983
+ */
678
984
  get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>>;
985
+ /**
986
+ Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
987
+ It can be used with the `useChat` and `useCompletion` hooks.
988
+
989
+ @param callbacks
990
+ Stream callbacks that will be called when the stream emits events.
991
+
992
+ @returns an `AIStream` object.
993
+ */
679
994
  toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
680
995
  }
681
996