ai 5.0.0-canary.20 → 5.0.0-canary.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -288,15 +288,11 @@ type CallSettings = {
288
288
  */
289
289
  maxOutputTokens?: number;
290
290
  /**
291
- Temperature setting. This is a number between 0 (almost no randomness) and
292
- 1 (very random).
291
+ Temperature setting. The range depends on the provider and model.
293
292
 
294
293
  It is recommended to set either `temperature` or `topP`, but not both.
295
- Use `null` to use the provider's default temperature.
296
-
297
- @default 0
298
294
  */
299
- temperature?: number | null;
295
+ temperature?: number;
300
296
  /**
301
297
  Nucleus sampling. This is a number between 0 and 1.
302
298
 
@@ -357,13 +353,6 @@ type CallSettings = {
357
353
  headers?: Record<string, string | undefined>;
358
354
  };
359
355
 
360
- /**
361
- * Validates call settings and sets default values.
362
- */
363
- declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, stopSequences, seed, }: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries' | 'temperature'> & {
364
- temperature?: number;
365
- };
366
-
367
356
  /**
368
357
  Tool choice for the generation. It supports the following settings:
369
358
 
@@ -458,6 +447,11 @@ declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolC
458
447
  toolChoice: LanguageModelV2ToolChoice | undefined;
459
448
  };
460
449
 
450
+ /**
451
+ * Validates call settings and returns a new object with limited values.
452
+ */
453
+ declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, seed, stopSequences, }: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>;
454
+
461
455
  type RetryFunction = <OUTPUT>(fn: () => PromiseLike<OUTPUT>) => PromiseLike<OUTPUT>;
462
456
 
463
457
  /**
@@ -288,15 +288,11 @@ type CallSettings = {
288
288
  */
289
289
  maxOutputTokens?: number;
290
290
  /**
291
- Temperature setting. This is a number between 0 (almost no randomness) and
292
- 1 (very random).
291
+ Temperature setting. The range depends on the provider and model.
293
292
 
294
293
  It is recommended to set either `temperature` or `topP`, but not both.
295
- Use `null` to use the provider's default temperature.
296
-
297
- @default 0
298
294
  */
299
- temperature?: number | null;
295
+ temperature?: number;
300
296
  /**
301
297
  Nucleus sampling. This is a number between 0 and 1.
302
298
 
@@ -357,13 +353,6 @@ type CallSettings = {
357
353
  headers?: Record<string, string | undefined>;
358
354
  };
359
355
 
360
- /**
361
- * Validates call settings and sets default values.
362
- */
363
- declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, stopSequences, seed, }: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries' | 'temperature'> & {
364
- temperature?: number;
365
- };
366
-
367
356
  /**
368
357
  Tool choice for the generation. It supports the following settings:
369
358
 
@@ -458,6 +447,11 @@ declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolC
458
447
  toolChoice: LanguageModelV2ToolChoice | undefined;
459
448
  };
460
449
 
450
+ /**
451
+ * Validates call settings and returns a new object with limited values.
452
+ */
453
+ declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, seed, stopSequences, }: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>;
454
+
461
455
  type RetryFunction = <OUTPUT>(fn: () => PromiseLike<OUTPUT>) => PromiseLike<OUTPUT>;
462
456
 
463
457
  /**
@@ -481,125 +481,6 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
481
481
  }
482
482
  }
483
483
 
484
- // src/error/invalid-argument-error.ts
485
- var import_provider4 = require("@ai-sdk/provider");
486
- var name3 = "AI_InvalidArgumentError";
487
- var marker3 = `vercel.ai.error.${name3}`;
488
- var symbol3 = Symbol.for(marker3);
489
- var _a3;
490
- var InvalidArgumentError = class extends import_provider4.AISDKError {
491
- constructor({
492
- parameter,
493
- value,
494
- message
495
- }) {
496
- super({
497
- name: name3,
498
- message: `Invalid argument for parameter ${parameter}: ${message}`
499
- });
500
- this[_a3] = true;
501
- this.parameter = parameter;
502
- this.value = value;
503
- }
504
- static isInstance(error) {
505
- return import_provider4.AISDKError.hasMarker(error, marker3);
506
- }
507
- };
508
- _a3 = symbol3;
509
-
510
- // core/prompt/prepare-call-settings.ts
511
- function prepareCallSettings({
512
- maxOutputTokens,
513
- temperature,
514
- topP,
515
- topK,
516
- presencePenalty,
517
- frequencyPenalty,
518
- stopSequences,
519
- seed
520
- }) {
521
- if (maxOutputTokens != null) {
522
- if (!Number.isInteger(maxOutputTokens)) {
523
- throw new InvalidArgumentError({
524
- parameter: "maxOutputTokens",
525
- value: maxOutputTokens,
526
- message: "maxOutputTokens must be an integer"
527
- });
528
- }
529
- if (maxOutputTokens < 1) {
530
- throw new InvalidArgumentError({
531
- parameter: "maxOutputTokens",
532
- value: maxOutputTokens,
533
- message: "maxOutputTokens must be >= 1"
534
- });
535
- }
536
- }
537
- if (temperature != null) {
538
- if (typeof temperature !== "number") {
539
- throw new InvalidArgumentError({
540
- parameter: "temperature",
541
- value: temperature,
542
- message: "temperature must be a number"
543
- });
544
- }
545
- }
546
- if (topP != null) {
547
- if (typeof topP !== "number") {
548
- throw new InvalidArgumentError({
549
- parameter: "topP",
550
- value: topP,
551
- message: "topP must be a number"
552
- });
553
- }
554
- }
555
- if (topK != null) {
556
- if (typeof topK !== "number") {
557
- throw new InvalidArgumentError({
558
- parameter: "topK",
559
- value: topK,
560
- message: "topK must be a number"
561
- });
562
- }
563
- }
564
- if (presencePenalty != null) {
565
- if (typeof presencePenalty !== "number") {
566
- throw new InvalidArgumentError({
567
- parameter: "presencePenalty",
568
- value: presencePenalty,
569
- message: "presencePenalty must be a number"
570
- });
571
- }
572
- }
573
- if (frequencyPenalty != null) {
574
- if (typeof frequencyPenalty !== "number") {
575
- throw new InvalidArgumentError({
576
- parameter: "frequencyPenalty",
577
- value: frequencyPenalty,
578
- message: "frequencyPenalty must be a number"
579
- });
580
- }
581
- }
582
- if (seed != null) {
583
- if (!Number.isInteger(seed)) {
584
- throw new InvalidArgumentError({
585
- parameter: "seed",
586
- value: seed,
587
- message: "seed must be an integer"
588
- });
589
- }
590
- }
591
- return {
592
- maxOutputTokens,
593
- temperature: temperature != null ? temperature : temperature === null ? void 0 : 0,
594
- topP,
595
- topK,
596
- presencePenalty,
597
- frequencyPenalty,
598
- stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
599
- seed
600
- };
601
- }
602
-
603
484
  // core/prompt/prepare-tools-and-tool-choice.ts
604
485
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
605
486
 
@@ -653,7 +534,7 @@ function prepareToolsAndToolChoice({
653
534
  }
654
535
 
655
536
  // core/prompt/standardize-prompt.ts
656
- var import_provider5 = require("@ai-sdk/provider");
537
+ var import_provider4 = require("@ai-sdk/provider");
657
538
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
658
539
  var import_zod7 = require("zod");
659
540
 
@@ -785,19 +666,19 @@ var modelMessageSchema = import_zod6.z.union([
785
666
  // core/prompt/standardize-prompt.ts
786
667
  async function standardizePrompt(prompt) {
787
668
  if (prompt.prompt == null && prompt.messages == null) {
788
- throw new import_provider5.InvalidPromptError({
669
+ throw new import_provider4.InvalidPromptError({
789
670
  prompt,
790
671
  message: "prompt or messages must be defined"
791
672
  });
792
673
  }
793
674
  if (prompt.prompt != null && prompt.messages != null) {
794
- throw new import_provider5.InvalidPromptError({
675
+ throw new import_provider4.InvalidPromptError({
795
676
  prompt,
796
677
  message: "prompt and messages cannot be defined at the same time"
797
678
  });
798
679
  }
799
680
  if (prompt.system != null && typeof prompt.system !== "string") {
800
- throw new import_provider5.InvalidPromptError({
681
+ throw new import_provider4.InvalidPromptError({
801
682
  prompt,
802
683
  message: "system must be a string"
803
684
  });
@@ -810,13 +691,13 @@ async function standardizePrompt(prompt) {
810
691
  } else if (prompt.messages != null) {
811
692
  messages = prompt.messages;
812
693
  } else {
813
- throw new import_provider5.InvalidPromptError({
694
+ throw new import_provider4.InvalidPromptError({
814
695
  prompt,
815
696
  message: "prompt or messages must be defined"
816
697
  });
817
698
  }
818
699
  if (messages.length === 0) {
819
- throw new import_provider5.InvalidPromptError({
700
+ throw new import_provider4.InvalidPromptError({
820
701
  prompt,
821
702
  message: "messages must not be empty"
822
703
  });
@@ -826,7 +707,7 @@ async function standardizePrompt(prompt) {
826
707
  schema: import_zod7.z.array(modelMessageSchema)
827
708
  });
828
709
  if (!validationResult.success) {
829
- throw new import_provider5.InvalidPromptError({
710
+ throw new import_provider4.InvalidPromptError({
830
711
  prompt,
831
712
  message: "messages must be an array of ModelMessage",
832
713
  cause: validationResult.error
@@ -838,6 +719,125 @@ async function standardizePrompt(prompt) {
838
719
  };
839
720
  }
840
721
 
722
+ // src/error/invalid-argument-error.ts
723
+ var import_provider5 = require("@ai-sdk/provider");
724
+ var name3 = "AI_InvalidArgumentError";
725
+ var marker3 = `vercel.ai.error.${name3}`;
726
+ var symbol3 = Symbol.for(marker3);
727
+ var _a3;
728
+ var InvalidArgumentError = class extends import_provider5.AISDKError {
729
+ constructor({
730
+ parameter,
731
+ value,
732
+ message
733
+ }) {
734
+ super({
735
+ name: name3,
736
+ message: `Invalid argument for parameter ${parameter}: ${message}`
737
+ });
738
+ this[_a3] = true;
739
+ this.parameter = parameter;
740
+ this.value = value;
741
+ }
742
+ static isInstance(error) {
743
+ return import_provider5.AISDKError.hasMarker(error, marker3);
744
+ }
745
+ };
746
+ _a3 = symbol3;
747
+
748
+ // core/prompt/prepare-call-settings.ts
749
+ function prepareCallSettings({
750
+ maxOutputTokens,
751
+ temperature,
752
+ topP,
753
+ topK,
754
+ presencePenalty,
755
+ frequencyPenalty,
756
+ seed,
757
+ stopSequences
758
+ }) {
759
+ if (maxOutputTokens != null) {
760
+ if (!Number.isInteger(maxOutputTokens)) {
761
+ throw new InvalidArgumentError({
762
+ parameter: "maxOutputTokens",
763
+ value: maxOutputTokens,
764
+ message: "maxOutputTokens must be an integer"
765
+ });
766
+ }
767
+ if (maxOutputTokens < 1) {
768
+ throw new InvalidArgumentError({
769
+ parameter: "maxOutputTokens",
770
+ value: maxOutputTokens,
771
+ message: "maxOutputTokens must be >= 1"
772
+ });
773
+ }
774
+ }
775
+ if (temperature != null) {
776
+ if (typeof temperature !== "number") {
777
+ throw new InvalidArgumentError({
778
+ parameter: "temperature",
779
+ value: temperature,
780
+ message: "temperature must be a number"
781
+ });
782
+ }
783
+ }
784
+ if (topP != null) {
785
+ if (typeof topP !== "number") {
786
+ throw new InvalidArgumentError({
787
+ parameter: "topP",
788
+ value: topP,
789
+ message: "topP must be a number"
790
+ });
791
+ }
792
+ }
793
+ if (topK != null) {
794
+ if (typeof topK !== "number") {
795
+ throw new InvalidArgumentError({
796
+ parameter: "topK",
797
+ value: topK,
798
+ message: "topK must be a number"
799
+ });
800
+ }
801
+ }
802
+ if (presencePenalty != null) {
803
+ if (typeof presencePenalty !== "number") {
804
+ throw new InvalidArgumentError({
805
+ parameter: "presencePenalty",
806
+ value: presencePenalty,
807
+ message: "presencePenalty must be a number"
808
+ });
809
+ }
810
+ }
811
+ if (frequencyPenalty != null) {
812
+ if (typeof frequencyPenalty !== "number") {
813
+ throw new InvalidArgumentError({
814
+ parameter: "frequencyPenalty",
815
+ value: frequencyPenalty,
816
+ message: "frequencyPenalty must be a number"
817
+ });
818
+ }
819
+ }
820
+ if (seed != null) {
821
+ if (!Number.isInteger(seed)) {
822
+ throw new InvalidArgumentError({
823
+ parameter: "seed",
824
+ value: seed,
825
+ message: "seed must be an integer"
826
+ });
827
+ }
828
+ }
829
+ return {
830
+ maxOutputTokens,
831
+ temperature,
832
+ topP,
833
+ topK,
834
+ presencePenalty,
835
+ frequencyPenalty,
836
+ stopSequences,
837
+ seed
838
+ };
839
+ }
840
+
841
841
  // src/util/retry-with-exponential-backoff.ts
842
842
  var import_provider7 = require("@ai-sdk/provider");
843
843
  var import_provider_utils6 = require("@ai-sdk/provider-utils");