catniff 0.4.0 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/backend.d.ts CHANGED
@@ -1,83 +1,4 @@
1
- import { Tensor, TensorValue } from "./core";
1
+ import { Tensor } from "./core";
2
2
  export interface Backend {
3
- sum?(tensor: Tensor, dims?: number[] | number, keepDims?: boolean): Tensor;
4
- prod?(tensor: Tensor, dims?: number[] | number, keepDims?: boolean): Tensor;
5
- mean?(tensor: Tensor, dims?: number[] | number, keepDims?: boolean): Tensor;
6
- max?(tensor: Tensor, dims?: number[] | number, keepDims?: boolean): Tensor;
7
- min?(tensor: Tensor, dims?: number[] | number, keepDims?: boolean): Tensor;
8
- softmax?(tensor: Tensor, dims?: number[] | number): Tensor;
9
- add?(self: Tensor, other: Tensor | TensorValue): Tensor;
10
- sub?(self: Tensor, other: Tensor | TensorValue): Tensor;
11
- mul?(self: Tensor, other: Tensor | TensorValue): Tensor;
12
- pow?(self: Tensor, other: Tensor | TensorValue): Tensor;
13
- div?(self: Tensor, other: Tensor | TensorValue): Tensor;
14
- remainder?(self: Tensor, other: Tensor | TensorValue): Tensor;
15
- ge?(self: Tensor, other: Tensor | TensorValue): Tensor;
16
- le?(self: Tensor, other: Tensor | TensorValue): Tensor;
17
- gt?(self: Tensor, other: Tensor | TensorValue): Tensor;
18
- lt?(self: Tensor, other: Tensor | TensorValue): Tensor;
19
- eq?(self: Tensor, other: Tensor | TensorValue): Tensor;
20
- ne?(self: Tensor, other: Tensor | TensorValue): Tensor;
21
- logicalAnd?(self: Tensor, other: Tensor | TensorValue): Tensor;
22
- logicalOr?(self: Tensor, other: Tensor | TensorValue): Tensor;
23
- logicalXor?(self: Tensor, other: Tensor | TensorValue): Tensor;
24
- logicalNot?(self: Tensor): Tensor;
25
- bitwiseAnd?(self: Tensor, other: Tensor | TensorValue): Tensor;
26
- bitwiseOr?(self: Tensor, other: Tensor | TensorValue): Tensor;
27
- bitwiseXor?(self: Tensor, other: Tensor | TensorValue): Tensor;
28
- bitwiseNot?(self: Tensor): Tensor;
29
- bitwiseLeftShift?(self: Tensor, other: Tensor | TensorValue): Tensor;
30
- bitwiseRightShift?(self: Tensor, other: Tensor | TensorValue): Tensor;
31
- neg?(self: Tensor): Tensor;
32
- reciprocal?(self: Tensor): Tensor;
33
- square?(self: Tensor): Tensor;
34
- abs?(self: Tensor): Tensor;
35
- sign?(self: Tensor): Tensor;
36
- sin?(self: Tensor): Tensor;
37
- cos?(self: Tensor): Tensor;
38
- tan?(self: Tensor): Tensor;
39
- asin?(self: Tensor): Tensor;
40
- acos?(self: Tensor): Tensor;
41
- atan?(self: Tensor): Tensor;
42
- atan2?(self: Tensor): Tensor;
43
- sinh?(self: Tensor): Tensor;
44
- cosh?(self: Tensor): Tensor;
45
- asinh?(self: Tensor): Tensor;
46
- acosh?(self: Tensor): Tensor;
47
- atanh?(self: Tensor): Tensor;
48
- deg2rad?(self: Tensor): Tensor;
49
- rad2deg?(self: Tensor): Tensor;
50
- sqrt?(self: Tensor): Tensor;
51
- rsqrt?(self: Tensor): Tensor;
52
- exp?(self: Tensor): Tensor;
53
- exp2?(self: Tensor): Tensor;
54
- expm1?(self: Tensor): Tensor;
55
- log?(self: Tensor): Tensor;
56
- log2?(self: Tensor): Tensor;
57
- log10?(self: Tensor): Tensor;
58
- log1p?(self: Tensor): Tensor;
59
- relu?(self: Tensor): Tensor;
60
- sigmoid?(self: Tensor): Tensor;
61
- tanh?(self: Tensor): Tensor;
62
- softplus?(self: Tensor): Tensor;
63
- softsign?(self: Tensor): Tensor;
64
- silu?(self: Tensor): Tensor;
65
- mish?(self: Tensor): Tensor;
66
- maximum?(self: Tensor, other: Tensor | TensorValue): Tensor;
67
- minimum?(self: Tensor, other: Tensor | TensorValue): Tensor;
68
- round?(self: Tensor): Tensor;
69
- floor?(self: Tensor): Tensor;
70
- ceil?(self: Tensor): Tensor;
71
- trunc?(self: Tensor): Tensor;
72
- frac?(self: Tensor): Tensor;
73
- clip?(self: Tensor, min: number, max: number): Tensor;
74
- erf?(self: Tensor): Tensor;
75
- erfc?(self: Tensor): Tensor;
76
- erfinv?(self: Tensor): Tensor;
77
- dot?(self: Tensor, other: Tensor | TensorValue): Tensor;
78
- mm?(self: Tensor, other: Tensor | TensorValue): Tensor;
79
- bmm?(self: Tensor, other: Tensor | TensorValue): Tensor;
80
- mv?(self: Tensor, other: Tensor | TensorValue): Tensor;
81
- matmul?(self: Tensor, other: Tensor | TensorValue): Tensor;
82
- to?(tensor: Tensor): Tensor;
3
+ transfer(tensor: Tensor): Tensor;
83
4
  }
package/dist/core.js CHANGED
@@ -337,11 +337,6 @@ class Tensor {
337
337
  }
338
338
  // Tensor sum reduction
339
339
  sum(dims, keepDims = false) {
340
- // Use backend of tensor's device if available, or else fallback to cpu
341
- const backend = Tensor.backends.get(this.device);
342
- if (backend && backend.sum) {
343
- return backend.sum(this, dims, keepDims);
344
- }
345
340
  if (typeof this.value === "number")
346
341
  return this;
347
342
  if (typeof dims === "number") {
@@ -395,11 +390,6 @@ class Tensor {
395
390
  }
396
391
  // Tensor product reduction
397
392
  prod(dims, keepDims = false) {
398
- // Use backend of tensor's device if available, or else fallback to cpu
399
- const backend = Tensor.backends.get(this.device);
400
- if (backend && backend.prod) {
401
- return backend.prod(this, dims, keepDims);
402
- }
403
393
  if (typeof this.value === "number")
404
394
  return this;
405
395
  if (typeof dims === "number") {
@@ -451,11 +441,6 @@ class Tensor {
451
441
  }
452
442
  // Tensor mean reduction
453
443
  mean(dims, keepDims = false) {
454
- // Use backend of tensor's device if available, or else fallback to cpu
455
- const backend = Tensor.backends.get(this.device);
456
- if (backend && backend.mean) {
457
- return backend.mean(this, dims, keepDims);
458
- }
459
444
  if (typeof this.value === "number")
460
445
  return this;
461
446
  if (typeof dims === "number") {
@@ -514,11 +499,6 @@ class Tensor {
514
499
  }
515
500
  // Tensor maximum reduction
516
501
  max(dims, keepDims = false) {
517
- // Use backend of tensor's device if available, or else fallback to cpu
518
- const backend = Tensor.backends.get(this.device);
519
- if (backend && backend.max) {
520
- return backend.max(this, dims, keepDims);
521
- }
522
502
  if (typeof this.value === "number")
523
503
  return this;
524
504
  if (typeof dims === "number") {
@@ -572,11 +552,6 @@ class Tensor {
572
552
  }
573
553
  // Tensor minimum reduction
574
554
  min(dims, keepDims = false) {
575
- // Use backend of tensor's device if available, or else fallback to cpu
576
- const backend = Tensor.backends.get(this.device);
577
- if (backend && backend.min) {
578
- return backend.min(this, dims, keepDims);
579
- }
580
555
  if (typeof this.value === "number")
581
556
  return this;
582
557
  if (typeof dims === "number") {
@@ -630,11 +605,6 @@ class Tensor {
630
605
  }
631
606
  // Tensor product reduction
632
607
  softmax(dims) {
633
- // Use backend of tensor's device if available, or else fallback to cpu
634
- const backend = Tensor.backends.get(this.device);
635
- if (backend && backend.softmax) {
636
- return backend.softmax(this, dims);
637
- }
638
608
  if (typeof this.value === "number")
639
609
  return this;
640
610
  if (typeof dims === "number") {
@@ -699,488 +669,228 @@ class Tensor {
699
669
  }
700
670
  // Tensor element-wise addition
701
671
  add(other) {
702
- // Use backend of tensor's device if available, or else fallback to cpu
703
- const backend = Tensor.backends.get(this.device);
704
- if (backend && backend.add) {
705
- return backend.add(this, other);
706
- }
707
672
  return this.elementWiseABDAG(other, (a, b) => a + b, (self, other, outGrad) => outGrad, (self, other, outGrad) => outGrad);
708
673
  }
709
674
  // Tensor element-wise subtraction
710
675
  sub(other) {
711
- // Use backend of tensor's device if available, or else fallback to cpu
712
- const backend = Tensor.backends.get(this.device);
713
- if (backend && backend.sub) {
714
- return backend.sub(this, other);
715
- }
716
676
  return this.elementWiseABDAG(other, (a, b) => a - b, (self, other, outGrad) => outGrad, (self, other, outGrad) => outGrad.neg());
717
677
  }
718
678
  subtract = this.sub;
719
679
  // Tensor element-wise multiplication
720
680
  mul(other) {
721
- // Use backend of tensor's device if available, or else fallback to cpu
722
- const backend = Tensor.backends.get(this.device);
723
- if (backend && backend.mul) {
724
- return backend.mul(this, other);
725
- }
726
681
  return this.elementWiseABDAG(other, (a, b) => a * b, (self, other, outGrad) => outGrad.mul(other), (self, other, outGrad) => outGrad.mul(self));
727
682
  }
728
683
  multiply = this.mul;
729
684
  // Tensor element-wise power
730
685
  pow(other) {
731
- // Use backend of tensor's device if available, or else fallback to cpu
732
- const backend = Tensor.backends.get(this.device);
733
- if (backend && backend.pow) {
734
- return backend.pow(this, other);
735
- }
736
686
  return this.elementWiseABDAG(other, (a, b) => a ** b, (self, other, outGrad) => outGrad.mul(other.mul(self.pow(other.sub(1)))), (self, other, outGrad) => outGrad.mul(self.pow(other).mul(self.log())));
737
687
  }
738
688
  // Tensor element-wise division
739
689
  div(other) {
740
- // Use backend of tensor's device if available, or else fallback to cpu
741
- const backend = Tensor.backends.get(this.device);
742
- if (backend && backend.div) {
743
- return backend.div(this, other);
744
- }
745
690
  return this.elementWiseABDAG(other, (a, b) => a / b, (self, other, outGrad) => outGrad.div(other), (self, other, outGrad) => outGrad.mul(self.neg().div(other.square())));
746
691
  }
747
692
  divide = this.div;
748
693
  // Tensor element-wise modulo
749
694
  remainder(other) {
750
- // Use backend of tensor's device if available, or else fallback to cpu
751
- const backend = Tensor.backends.get(this.device);
752
- if (backend && backend.remainder) {
753
- return backend.remainder(this, other);
754
- }
755
695
  return this.elementWiseABDAG(other, (a, b) => a % b);
756
696
  }
757
697
  // Tensor element-wise greater or equal comparison
758
698
  ge(other) {
759
- // Use backend of tensor's device if available, or else fallback to cpu
760
- const backend = Tensor.backends.get(this.device);
761
- if (backend && backend.ge) {
762
- return backend.ge(this, other);
763
- }
764
699
  return this.elementWiseABDAG(other, (a, b) => a >= b ? 1 : 0);
765
700
  }
766
701
  greaterEqual = this.ge;
767
702
  // Tensor element-wise less or equal comparison
768
703
  le(other) {
769
- // Use backend of tensor's device if available, or else fallback to cpu
770
- const backend = Tensor.backends.get(this.device);
771
- if (backend && backend.le) {
772
- return backend.le(this, other);
773
- }
774
704
  return this.elementWiseABDAG(other, (a, b) => a <= b ? 1 : 0);
775
705
  }
776
706
  lessEqual = this.le;
777
707
  // Tensor element-wise greater-than comparison
778
708
  gt(other) {
779
- // Use backend of tensor's device if available, or else fallback to cpu
780
- const backend = Tensor.backends.get(this.device);
781
- if (backend && backend.gt) {
782
- return backend.gt(this, other);
783
- }
784
709
  return this.elementWiseABDAG(other, (a, b) => a > b ? 1 : 0);
785
710
  }
786
711
  greater = this.gt;
787
712
  // Tensor element-wise less-than comparison
788
713
  lt(other) {
789
- // Use backend of tensor's device if available, or else fallback to cpu
790
- const backend = Tensor.backends.get(this.device);
791
- if (backend && backend.lt) {
792
- return backend.lt(this, other);
793
- }
794
714
  return this.elementWiseABDAG(other, (a, b) => a < b ? 1 : 0);
795
715
  }
796
716
  less = this.lt;
797
717
  // Tensor element-wise equality comparison
798
718
  eq(other) {
799
- // Use backend of tensor's device if available, or else fallback to cpu
800
- const backend = Tensor.backends.get(this.device);
801
- if (backend && backend.eq) {
802
- return backend.eq(this, other);
803
- }
804
719
  return this.elementWiseABDAG(other, (a, b) => a === b ? 1 : 0);
805
720
  }
806
721
  equal = this.eq;
807
722
  // Tensor element-wise not equality comparison
808
723
  ne(other) {
809
- // Use backend of tensor's device if available, or else fallback to cpu
810
- const backend = Tensor.backends.get(this.device);
811
- if (backend && backend.ne) {
812
- return backend.ne(this, other);
813
- }
814
724
  return this.elementWiseABDAG(other, (a, b) => a !== b ? 1 : 0);
815
725
  }
816
726
  notEqual = this.ne;
817
727
  // Tensor element-wise logical and
818
728
  logicalAnd(other) {
819
- // Use backend of tensor's device if available, or else fallback to cpu
820
- const backend = Tensor.backends.get(this.device);
821
- if (backend && backend.logicalAnd) {
822
- return backend.logicalAnd(this, other);
823
- }
824
729
  return this.elementWiseABDAG(other, (a, b) => a === 1 && b === 1 ? 1 : 0);
825
730
  }
826
731
  // Tensor element-wise logical or
827
732
  logicalOr(other) {
828
- // Use backend of tensor's device if available, or else fallback to cpu
829
- const backend = Tensor.backends.get(this.device);
830
- if (backend && backend.logicalOr) {
831
- return backend.logicalOr(this, other);
832
- }
833
733
  return this.elementWiseABDAG(other, (a, b) => a === 1 || b === 1 ? 1 : 0);
834
734
  }
835
735
  // Tensor element-wise logical xor
836
736
  logicalXor(other) {
837
- // Use backend of tensor's device if available, or else fallback to cpu
838
- const backend = Tensor.backends.get(this.device);
839
- if (backend && backend.logicalXor) {
840
- return backend.logicalXor(this, other);
841
- }
842
737
  return this.elementWiseABDAG(other, (a, b) => (a === 1 || b === 1) && a !== b ? 1 : 0);
843
738
  }
844
739
  // Tensor element-wise logical not
845
740
  logicalNot() {
846
- // Use backend of tensor's device if available, or else fallback to cpu
847
- const backend = Tensor.backends.get(this.device);
848
- if (backend && backend.logicalNot) {
849
- return backend.logicalNot(this);
850
- }
851
741
  return this.elementWiseSelfDAG((a) => a === 1 ? 0 : 1);
852
742
  }
853
743
  // Tensor element-wise bitwise and
854
744
  bitwiseAnd(other) {
855
- // Use backend of tensor's device if available, or else fallback to cpu
856
- const backend = Tensor.backends.get(this.device);
857
- if (backend && backend.bitwiseAnd) {
858
- return backend.bitwiseAnd(this, other);
859
- }
860
745
  return this.elementWiseABDAG(other, (a, b) => a & b);
861
746
  }
862
747
  // Tensor element-wise bitwise or
863
748
  bitwiseOr(other) {
864
- // Use backend of tensor's device if available, or else fallback to cpu
865
- const backend = Tensor.backends.get(this.device);
866
- if (backend && backend.bitwiseOr) {
867
- return backend.bitwiseOr(this, other);
868
- }
869
749
  return this.elementWiseABDAG(other, (a, b) => a | b);
870
750
  }
871
751
  // Tensor element-wise bitwise xor
872
752
  bitwiseXor(other) {
873
- // Use backend of tensor's device if available, or else fallback to cpu
874
- const backend = Tensor.backends.get(this.device);
875
- if (backend && backend.bitwiseXor) {
876
- return backend.bitwiseXor(this, other);
877
- }
878
753
  return this.elementWiseABDAG(other, (a, b) => a ^ b);
879
754
  }
880
755
  // Tensor element-wise bitwise not
881
756
  bitwiseNot() {
882
- // Use backend of tensor's device if available, or else fallback to cpu
883
- const backend = Tensor.backends.get(this.device);
884
- if (backend && backend.bitwiseNot) {
885
- return backend.bitwiseNot(this);
886
- }
887
757
  return this.elementWiseSelfDAG((a) => ~a);
888
758
  }
889
759
  // Tensor element-wise left shift
890
760
  bitwiseLeftShift(other) {
891
- // Use backend of tensor's device if available, or else fallback to cpu
892
- const backend = Tensor.backends.get(this.device);
893
- if (backend && backend.bitwiseLeftShift) {
894
- return backend.bitwiseLeftShift(this, other);
895
- }
896
761
  return this.elementWiseABDAG(other, (a, b) => a << b);
897
762
  }
898
763
  // Tensor element-wise right shift
899
764
  bitwiseRightShift(other) {
900
- // Use backend of tensor's device if available, or else fallback to cpu
901
- const backend = Tensor.backends.get(this.device);
902
- if (backend && backend.bitwiseRightShift) {
903
- return backend.bitwiseRightShift(this, other);
904
- }
905
765
  return this.elementWiseABDAG(other, (a, b) => a >> b);
906
766
  }
907
767
  // Tensor element-wise negation
908
768
  neg() {
909
- // Use backend of tensor's device if available, or else fallback to cpu
910
- const backend = Tensor.backends.get(this.device);
911
- if (backend && backend.neg) {
912
- return backend.neg(this);
913
- }
914
769
  return this.elementWiseSelfDAG((a) => -a, (self, outGrad) => outGrad.mul(-1));
915
770
  }
916
771
  negative = this.neg;
917
772
  // Tensor element-wise reciprocal
918
773
  reciprocal() {
919
- // Use backend of tensor's device if available, or else fallback to cpu
920
- const backend = Tensor.backends.get(this.device);
921
- if (backend && backend.reciprocal) {
922
- return backend.reciprocal(this);
923
- }
924
774
  return this.elementWiseSelfDAG((a) => 1 / a, (self, outGrad) => outGrad.mul(self.pow(-2).neg()));
925
775
  }
926
776
  // Tensor element-wise square
927
777
  square() {
928
- // Use backend of tensor's device if available, or else fallback to cpu
929
- const backend = Tensor.backends.get(this.device);
930
- if (backend && backend.square) {
931
- return backend.square(this);
932
- }
933
778
  return this.elementWiseSelfDAG((a) => a * a, (self, outGrad) => outGrad.mul(self.mul(2)));
934
779
  }
935
780
  // Tensor element-wise absolute
936
781
  abs() {
937
- // Use backend of tensor's device if available, or else fallback to cpu
938
- const backend = Tensor.backends.get(this.device);
939
- if (backend && backend.abs) {
940
- return backend.abs(this);
941
- }
942
782
  return this.elementWiseSelfDAG((a) => Math.abs(a), (self, outGrad) => outGrad.mul(self.sign()));
943
783
  }
944
784
  absolute = this.abs;
945
785
  // Tensor element-wise sign function
946
786
  sign() {
947
- // Use backend of tensor's device if available, or else fallback to cpu
948
- const backend = Tensor.backends.get(this.device);
949
- if (backend && backend.sign) {
950
- return backend.sign(this);
951
- }
952
787
  return this.elementWiseSelfDAG((a) => Math.sign(a));
953
788
  }
954
789
  // Tensor element-wise sin
955
790
  sin() {
956
- // Use backend of tensor's device if available, or else fallback to cpu
957
- const backend = Tensor.backends.get(this.device);
958
- if (backend && backend.sin) {
959
- return backend.sin(this);
960
- }
961
791
  return this.elementWiseSelfDAG((a) => Math.sin(a), (self, outGrad) => outGrad.mul(self.cos()));
962
792
  }
963
793
  // Tensor element-wise cos
964
794
  cos() {
965
- // Use backend of tensor's device if available, or else fallback to cpu
966
- const backend = Tensor.backends.get(this.device);
967
- if (backend && backend.cos) {
968
- return backend.cos(this);
969
- }
970
795
  return this.elementWiseSelfDAG((a) => Math.cos(a), (self, outGrad) => outGrad.mul(self.sin().neg()));
971
796
  }
972
797
  // Tensor element-wise tan
973
798
  tan() {
974
- // Use backend of tensor's device if available, or else fallback to cpu
975
- const backend = Tensor.backends.get(this.device);
976
- if (backend && backend.tan) {
977
- return backend.tan(this);
978
- }
979
799
  return this.elementWiseSelfDAG((a) => Math.tan(a), (self, outGrad) => outGrad.mul(self.tan().square().add(1)));
980
800
  }
981
801
  // Tensor element-wise asin
982
802
  asin() {
983
- // Use backend of tensor's device if available, or else fallback to cpu
984
- const backend = Tensor.backends.get(this.device);
985
- if (backend && backend.asin) {
986
- return backend.asin(this);
987
- }
988
803
  return this.elementWiseSelfDAG((a) => Math.asin(a), (self, outGrad) => outGrad.div(self.square().neg().add(1).sqrt()));
989
804
  }
990
805
  arcsin = this.asin;
991
806
  // Tensor element-wise acos
992
807
  acos() {
993
- // Use backend of tensor's device if available, or else fallback to cpu
994
- const backend = Tensor.backends.get(this.device);
995
- if (backend && backend.acos) {
996
- return backend.acos(this);
997
- }
998
808
  return this.elementWiseSelfDAG((a) => Math.acos(a), (self, outGrad) => outGrad.div(self.square().neg().add(1).sqrt()).neg());
999
809
  }
1000
810
  arccos = this.acos;
1001
811
  // Tensor element-wise atan
1002
812
  atan() {
1003
- // Use backend of tensor's device if available, or else fallback to cpu
1004
- const backend = Tensor.backends.get(this.device);
1005
- if (backend && backend.atan) {
1006
- return backend.atan(this);
1007
- }
1008
813
  return this.elementWiseSelfDAG((a) => Math.atan(a), (self, outGrad) => outGrad.div(self.square().add(1)));
1009
814
  }
1010
815
  arctan = this.atan;
1011
816
  // Tensor element-wise atan2
1012
817
  atan2(other) {
1013
- // Use backend of tensor's device if available, or else fallback to cpu
1014
- const backend = Tensor.backends.get(this.device);
1015
- if (backend && backend.atan2) {
1016
- return backend.atan2(this);
1017
- }
1018
818
  return this.elementWiseABDAG(other, (a, b) => Math.atan2(a, b), (self, other, outGrad) => outGrad.mul(other.div(self.square().add(other.square()))), (self, other, outGrad) => outGrad.mul(self.neg().div(self.square().add(other.square()))));
1019
819
  }
1020
820
  arctan2 = this.atan2;
1021
821
  // Tensor element-wise sinh
1022
822
  sinh() {
1023
- // Use backend of tensor's device if available, or else fallback to cpu
1024
- const backend = Tensor.backends.get(this.device);
1025
- if (backend && backend.sinh) {
1026
- return backend.sinh(this);
1027
- }
1028
823
  return this.elementWiseSelfDAG((a) => Math.sinh(a), (self, outGrad) => outGrad.mul(self.cosh()));
1029
824
  }
1030
825
  // Tensor element-wise cosh
1031
826
  cosh() {
1032
- // Use backend of tensor's device if available, or else fallback to cpu
1033
- const backend = Tensor.backends.get(this.device);
1034
- if (backend && backend.cosh) {
1035
- return backend.cosh(this);
1036
- }
1037
827
  return this.elementWiseSelfDAG((a) => Math.cosh(a), (self, outGrad) => outGrad.mul(self.sinh()));
1038
828
  }
1039
829
  // Tensor element-wise asinh
1040
830
  asinh() {
1041
- // Use backend of tensor's device if available, or else fallback to cpu
1042
- const backend = Tensor.backends.get(this.device);
1043
- if (backend && backend.asinh) {
1044
- return backend.asinh(this);
1045
- }
1046
831
  return this.elementWiseSelfDAG((a) => Math.asinh(a), (self, outGrad) => outGrad.div(self.square().add(1).sqrt()));
1047
832
  }
1048
833
  arcsinh = this.asinh;
1049
834
  // Tensor element-wise acosh
1050
835
  acosh() {
1051
- // Use backend of tensor's device if available, or else fallback to cpu
1052
- const backend = Tensor.backends.get(this.device);
1053
- if (backend && backend.acosh) {
1054
- return backend.acosh(this);
1055
- }
1056
836
  return this.elementWiseSelfDAG((a) => Math.acosh(a), (self, outGrad) => outGrad.div(self.add(1).sqrt().mul(self.sub(1).sqrt())));
1057
837
  }
1058
838
  arccosh = this.acosh;
1059
839
  // Tensor element-wise atanh
1060
840
  atanh() {
1061
- // Use backend of tensor's device if available, or else fallback to cpu
1062
- const backend = Tensor.backends.get(this.device);
1063
- if (backend && backend.atanh) {
1064
- return backend.atanh(this);
1065
- }
1066
841
  return this.elementWiseSelfDAG((a) => Math.atanh(a), (self, outGrad) => outGrad.div(self.square().neg().add(1)));
1067
842
  }
1068
843
  arctanh = this.atanh;
1069
844
  // Tensor element-wise degree to radian
1070
845
  deg2rad() {
1071
- // Use backend of tensor's device if available, or else fallback to cpu
1072
- const backend = Tensor.backends.get(this.device);
1073
- if (backend && backend.deg2rad) {
1074
- return backend.deg2rad(this);
1075
- }
1076
846
  return this.elementWiseSelfDAG((a) => a * (Math.PI / 180), (self, outGrad) => outGrad.mul(Math.PI / 180));
1077
847
  }
1078
848
  // Tensor element-wise radian to degree
1079
849
  rad2deg() {
1080
- // Use backend of tensor's device if available, or else fallback to cpu
1081
- const backend = Tensor.backends.get(this.device);
1082
- if (backend && backend.rad2deg) {
1083
- return backend.rad2deg(this);
1084
- }
1085
850
  return this.elementWiseSelfDAG((a) => a / (Math.PI / 180), (self, outGrad) => outGrad.div(Math.PI / 180));
1086
851
  }
1087
852
  // Tensor element-wise square root
1088
853
  sqrt() {
1089
- // Use backend of tensor's device if available, or else fallback to cpu
1090
- const backend = Tensor.backends.get(this.device);
1091
- if (backend && backend.sqrt) {
1092
- return backend.sqrt(this);
1093
- }
1094
854
  return this.elementWiseSelfDAG((a) => Math.sqrt(a), (self, outGrad) => outGrad.div(self.sqrt().mul(2)));
1095
855
  }
1096
856
  // Tensor element-wise reciprocal of square root
1097
857
  rsqrt() {
1098
- // Use backend of tensor's device if available, or else fallback to cpu
1099
- const backend = Tensor.backends.get(this.device);
1100
- if (backend && backend.rsqrt) {
1101
- return backend.rsqrt(this);
1102
- }
1103
858
  return this.elementWiseSelfDAG((a) => 1 / Math.sqrt(a), (self, outGrad) => outGrad.mul(self.pow(-1.5).mul(-0.5)));
1104
859
  }
1105
860
  // Tensor element-wise e^x
1106
861
  exp() {
1107
- // Use backend of tensor's device if available, or else fallback to cpu
1108
- const backend = Tensor.backends.get(this.device);
1109
- if (backend && backend.exp) {
1110
- return backend.exp(this);
1111
- }
1112
862
  return this.elementWiseSelfDAG((a) => Math.exp(a), (self, outGrad) => outGrad.mul(self.exp()));
1113
863
  }
1114
864
  // Tensor element-wise 2^x
1115
865
  exp2() {
1116
- // Use backend of tensor's device if available, or else fallback to cpu
1117
- const backend = Tensor.backends.get(this.device);
1118
- if (backend && backend.exp2) {
1119
- return backend.exp2(this);
1120
- }
1121
866
  return this.elementWiseSelfDAG((a) => 2 ** a, (self, outGrad) => outGrad.mul(self.exp2().mul(Math.log(2))));
1122
867
  }
1123
868
  // Tensor element-wise e^x - 1
1124
869
  expm1() {
1125
- // Use backend of tensor's device if available, or else fallback to cpu
1126
- const backend = Tensor.backends.get(this.device);
1127
- if (backend && backend.expm1) {
1128
- return backend.expm1(this);
1129
- }
1130
870
  return this.elementWiseSelfDAG((a) => Math.expm1(a), (self, outGrad) => outGrad.mul(self.exp()));
1131
871
  }
1132
872
  // Tensor element-wise natural log
1133
873
  log() {
1134
- // Use backend of tensor's device if available, or else fallback to cpu
1135
- const backend = Tensor.backends.get(this.device);
1136
- if (backend && backend.log) {
1137
- return backend.log(this);
1138
- }
1139
874
  return this.elementWiseSelfDAG((a) => Math.log(a), (self, outGrad) => outGrad.div(self));
1140
875
  }
1141
876
  // Tensor element-wise log2
1142
877
  log2() {
1143
- // Use backend of tensor's device if available, or else fallback to cpu
1144
- const backend = Tensor.backends.get(this.device);
1145
- if (backend && backend.log2) {
1146
- return backend.log2(this);
1147
- }
1148
878
  return this.elementWiseSelfDAG((a) => Math.log2(a), (self, outGrad) => outGrad.div(self.mul(Math.log(2))));
1149
879
  }
1150
880
  // Tensor element-wise log10
1151
881
  log10() {
1152
- // Use backend of tensor's device if available, or else fallback to cpu
1153
- const backend = Tensor.backends.get(this.device);
1154
- if (backend && backend.log10) {
1155
- return backend.log10(this);
1156
- }
1157
882
  return this.elementWiseSelfDAG((a) => Math.log10(a), (self, outGrad) => outGrad.div(self.mul(Math.log(10))));
1158
883
  }
1159
884
  // Tensor element-wise log(1+x)
1160
885
  log1p() {
1161
- // Use backend of tensor's device if available, or else fallback to cpu
1162
- const backend = Tensor.backends.get(this.device);
1163
- if (backend && backend.log1p) {
1164
- return backend.log1p(this);
1165
- }
1166
886
  return this.elementWiseSelfDAG((a) => Math.log1p(a), (self, outGrad) => outGrad.div(self.add(1)));
1167
887
  }
1168
888
  // Tensor element-wise relu
1169
889
  relu() {
1170
- // Use backend of tensor's device if available, or else fallback to cpu
1171
- const backend = Tensor.backends.get(this.device);
1172
- if (backend && backend.relu) {
1173
- return backend.relu(this);
1174
- }
1175
890
  return this.elementWiseSelfDAG((a) => Math.max(a, 0), (self, outGrad) => outGrad.mul(self.gt(0)));
1176
891
  }
1177
892
  // Tensor element-wise sigmoid
1178
893
  sigmoid() {
1179
- // Use backend of tensor's device if available, or else fallback to cpu
1180
- const backend = Tensor.backends.get(this.device);
1181
- if (backend && backend.sigmoid) {
1182
- return backend.sigmoid(this);
1183
- }
1184
894
  return this.elementWiseSelfDAG((a) => 1 / (1 + Math.exp(-a)), (self, outGrad) => {
1185
895
  const sig = self.sigmoid();
1186
896
  return outGrad.mul(sig).mul(sig.neg().add(1));
@@ -1188,38 +898,18 @@ class Tensor {
1188
898
  }
1189
899
  // Tensor element-wise tanh
1190
900
  tanh() {
1191
- // Use backend of tensor's device if available, or else fallback to cpu
1192
- const backend = Tensor.backends.get(this.device);
1193
- if (backend && backend.tanh) {
1194
- return backend.tanh(this);
1195
- }
1196
901
  return this.elementWiseSelfDAG((a) => Math.tanh(a), (self, outGrad) => outGrad.mul(self.tanh().square().neg().add(1)));
1197
902
  }
1198
903
  // Tensor element-wise softplus
1199
904
  softplus() {
1200
- // Use backend of tensor's device if available, or else fallback to cpu
1201
- const backend = Tensor.backends.get(this.device);
1202
- if (backend && backend.softplus) {
1203
- return backend.softplus(this);
1204
- }
1205
905
  return this.elementWiseSelfDAG((a) => Math.log1p(Math.exp(a)), (self, outGrad) => outGrad.mul(self.sigmoid()));
1206
906
  }
1207
907
  // Tensor element-wise softsign
1208
908
  softsign() {
1209
- // Use backend of tensor's device if available, or else fallback to cpu
1210
- const backend = Tensor.backends.get(this.device);
1211
- if (backend && backend.softsign) {
1212
- return backend.softsign(this);
1213
- }
1214
909
  return this.elementWiseSelfDAG((a) => a / (1 + Math.abs(a)), (self, outGrad) => outGrad.div(self.abs().add(1).square()));
1215
910
  }
1216
911
  // Tensor element-wise silu (swish)
1217
912
  silu() {
1218
- // Use backend of tensor's device if available, or else fallback to cpu
1219
- const backend = Tensor.backends.get(this.device);
1220
- if (backend && backend.silu) {
1221
- return backend.silu(this);
1222
- }
1223
913
  return this.elementWiseSelfDAG((a) => a / (1 + Math.exp(-a)), (self, outGrad) => {
1224
914
  const sig = self.sigmoid();
1225
915
  return outGrad.mul(sig.add(self.mul(sig).mul(sig.neg().add(1))));
@@ -1227,11 +917,6 @@ class Tensor {
1227
917
  }
1228
918
  // Tensor element-wise mish
1229
919
  mish() {
1230
- // Use backend of tensor's device if available, or else fallback to cpu
1231
- const backend = Tensor.backends.get(this.device);
1232
- if (backend && backend.mish) {
1233
- return backend.mish(this);
1234
- }
1235
920
  return this.elementWiseSelfDAG((a) => a * Math.tanh(Math.log1p(Math.exp(a))), (self, outGrad) => {
1236
921
  const tanhSoftPlus = self.exp().add(1).log().tanh();
1237
922
  // tanh(softplus(x)) + x * (1 - tanh²(softplus(x))) * sigmoid(x)
@@ -1241,103 +926,48 @@ class Tensor {
1241
926
  }
1242
927
  // Tensor element-wise maximum
1243
928
  maximum(other) {
1244
- // Use backend of tensor's device if available, or else fallback to cpu
1245
- const backend = Tensor.backends.get(this.device);
1246
- if (backend && backend.maximum) {
1247
- return backend.maximum(this, other);
1248
- }
1249
929
  return this.elementWiseABDAG(other, (a, b) => Math.max(a, b), (self, other, outGrad) => outGrad.mul(self.gt(other).add(self.eq(other).mul(0.5))), (self, other, outGrad) => outGrad.mul(other.gt(self).add(other.eq(self).mul(0.5))));
1250
930
  }
1251
931
  // Tensor element-wise minimum
1252
932
  minimum(other) {
1253
- // Use backend of tensor's device if available, or else fallback to cpu
1254
- const backend = Tensor.backends.get(this.device);
1255
- if (backend && backend.minimum) {
1256
- return backend.minimum(this, other);
1257
- }
1258
933
  return this.elementWiseABDAG(other, (a, b) => Math.min(a, b), (self, other, outGrad) => outGrad.mul(self.lt(other).add(self.eq(other).mul(0.5))), (self, other, outGrad) => outGrad.mul(other.lt(self).add(other.eq(self).mul(0.5))));
1259
934
  }
1260
935
  // Tensor element-wise round
1261
936
  round() {
1262
- // Use backend of tensor's device if available, or else fallback to cpu
1263
- const backend = Tensor.backends.get(this.device);
1264
- if (backend && backend.round) {
1265
- return backend.round(this);
1266
- }
1267
937
  return this.elementWiseSelfDAG((a) => Math.round(a));
1268
938
  }
1269
939
  // Tensor element-wise floor
1270
940
  floor() {
1271
- // Use backend of tensor's device if available, or else fallback to cpu
1272
- const backend = Tensor.backends.get(this.device);
1273
- if (backend && backend.floor) {
1274
- return backend.floor(this);
1275
- }
1276
941
  return this.elementWiseSelfDAG((a) => Math.floor(a));
1277
942
  }
1278
943
  // Tensor element-wise ceil
1279
944
  ceil() {
1280
- // Use backend of tensor's device if available, or else fallback to cpu
1281
- const backend = Tensor.backends.get(this.device);
1282
- if (backend && backend.ceil) {
1283
- return backend.ceil(this);
1284
- }
1285
945
  return this.elementWiseSelfDAG((a) => Math.ceil(a));
1286
946
  }
1287
947
  // Tensor element-wise truncation
1288
948
  trunc() {
1289
- // Use backend of tensor's device if available, or else fallback to cpu
1290
- const backend = Tensor.backends.get(this.device);
1291
- if (backend && backend.trunc) {
1292
- return backend.trunc(this);
1293
- }
1294
949
  return this.elementWiseSelfDAG((a) => Math.trunc(a));
1295
950
  }
1296
951
  fix = this.trunc;
1297
952
  // Tensor element-wise fraction portion
1298
953
  frac() {
1299
- // Use backend of tensor's device if available, or else fallback to cpu
1300
- const backend = Tensor.backends.get(this.device);
1301
- if (backend && backend.frac) {
1302
- return backend.frac(this);
1303
- }
1304
954
  return this.elementWiseSelfDAG((a) => a - Math.floor(a));
1305
955
  }
1306
956
  // Tensor element-wise clip and clamp
1307
957
  clip(min, max) {
1308
- // Use backend of tensor's device if available, or else fallback to cpu
1309
- const backend = Tensor.backends.get(this.device);
1310
- if (backend && backend.clip) {
1311
- return backend.clip(this, min, max);
1312
- }
1313
958
  return this.elementWiseSelfDAG((a) => Math.max(min, Math.min(max, a)), (self, outGrad) => outGrad.mul(self.ge(min).mul(self.le(max))));
1314
959
  }
1315
960
  clamp = this.clip;
1316
961
  // Tensor element-wise error function
1317
962
  erf() {
1318
- // Use backend of tensor's device if available, or else fallback to cpu
1319
- const backend = Tensor.backends.get(this.device);
1320
- if (backend && backend.erf) {
1321
- return backend.erf(this);
1322
- }
1323
963
  return this.elementWiseSelfDAG((a) => (0, utils_1.erf)(a), (self, outGrad) => outGrad.mul(self.square().neg().exp().mul(2 / Math.sqrt(Math.PI))));
1324
964
  }
1325
965
  // Tensor element-wise complementary error function
1326
966
  erfc() {
1327
- // Use backend of tensor's device if available, or else fallback to cpu
1328
- const backend = Tensor.backends.get(this.device);
1329
- if (backend && backend.erfc) {
1330
- return backend.erfc(this);
1331
- }
1332
967
  return this.elementWiseSelfDAG((a) => (0, utils_1.erfc)(a), (self, outGrad) => outGrad.mul(self.square().neg().exp().mul(2 / Math.sqrt(Math.PI)).neg()));
1333
968
  }
1334
969
  // Tensor element-wise inverse error function
1335
970
  erfinv() {
1336
- // Use backend of tensor's device if available, or else fallback to cpu
1337
- const backend = Tensor.backends.get(this.device);
1338
- if (backend && backend.erfinv) {
1339
- return backend.erfinv(this);
1340
- }
1341
971
  return this.elementWiseSelfDAG((a) => (0, utils_1.erfinv)(a), (self, outGrad) => outGrad.mul(self.erfinv().square().exp().mul(Math.sqrt(Math.PI) / 2)));
1342
972
  }
1343
973
  // Transpose
@@ -1379,11 +1009,6 @@ class Tensor {
1379
1009
  }
1380
1010
  // 1D tensor dot product
1381
1011
  dot(other) {
1382
- // Use backend of tensor's device if available, or else fallback to cpu
1383
- const backend = Tensor.backends.get(this.device);
1384
- if (backend && backend.dot) {
1385
- return backend.dot(this, other);
1386
- }
1387
1012
  other = Tensor.forceTensor(other);
1388
1013
  // Verify 1D shape
1389
1014
  if (this.shape.length !== 1 || other.shape.length !== 1) {
@@ -1422,11 +1047,6 @@ class Tensor {
1422
1047
  }
1423
1048
  // Matrix multiplication
1424
1049
  mm(other) {
1425
- // Use backend of tensor's device if available, or else fallback to cpu
1426
- const backend = Tensor.backends.get(this.device);
1427
- if (backend && backend.mm) {
1428
- return backend.mm(this, other);
1429
- }
1430
1050
  other = Tensor.forceTensor(other);
1431
1051
  // Verify 2D shape
1432
1052
  if (this.shape.length !== 2 || other.shape.length !== 2) {
@@ -1482,11 +1102,6 @@ class Tensor {
1482
1102
  }
1483
1103
  // Batched 3D tensor matmul
1484
1104
  bmm(other) {
1485
- // Use backend of tensor's device if available, or else fallback to cpu
1486
- const backend = Tensor.backends.get(this.device);
1487
- if (backend && backend.bmm) {
1488
- return backend.bmm(this, other);
1489
- }
1490
1105
  other = Tensor.forceTensor(other);
1491
1106
  // Verify 3D shape
1492
1107
  if (this.shape.length !== 3 || other.shape.length !== 3 || this.shape[0] !== other.shape[0]) {
@@ -1545,11 +1160,6 @@ class Tensor {
1545
1160
  }
1546
1161
  // Convert right-side 1D tensor to a vector (nx1 tensor) to do matmul
1547
1162
  mv(other) {
1548
- // Use backend of tensor's device if available, or else fallback to cpu
1549
- const backend = Tensor.backends.get(this.device);
1550
- if (backend && backend.mv) {
1551
- return backend.mv(this, other);
1552
- }
1553
1163
  other = Tensor.forceTensor(other);
1554
1164
  // Verify 2D shape
1555
1165
  if (this.shape.length !== 2 || other.shape.length !== 1) {
@@ -1559,11 +1169,6 @@ class Tensor {
1559
1169
  }
1560
1170
  // General matrix multiplication with different shapes
1561
1171
  matmul(other) {
1562
- // Use backend of tensor's device if available, or else fallback to cpu
1563
- const backend = Tensor.backends.get(this.device);
1564
- if (backend && backend.matmul) {
1565
- return backend.matmul(this, other);
1566
- }
1567
1172
  other = Tensor.forceTensor(other);
1568
1173
  const isThis1D = this.shape.length === 1;
1569
1174
  const isOther1D = other.shape.length === 1;
@@ -1875,10 +1480,10 @@ class Tensor {
1875
1480
  // Op to transfer tensor to another device
1876
1481
  to(device) {
1877
1482
  const backend = Tensor.backends.get(device);
1878
- if (backend && backend.to) {
1879
- return backend.to(this);
1483
+ if (backend && backend.transfer) {
1484
+ return backend.transfer(this);
1880
1485
  }
1881
- throw new Error(`No device found to transfer tensor to or "to" is not implemented for device.`);
1486
+ throw new Error(`No device found to transfer tensor to or a handler is not implemented for device.`);
1882
1487
  }
1883
1488
  }
1884
1489
  exports.Tensor = Tensor;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "catniff",
3
- "version": "0.4.0",
3
+ "version": "0.4.1",
4
4
  "description": "A small Torch-like deep learning framework for Javascript with tensor and autograd support",
5
5
  "main": "index.js",
6
6
  "scripts": {