catniff 0.2.8 → 0.2.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/core.d.ts CHANGED
@@ -26,7 +26,7 @@ export declare class Tensor {
26
26
  readonly number[]
27
27
  ];
28
28
  static broadcastShapes(shapeA: readonly number[], shapeB: readonly number[]): readonly number[];
29
- static indexToCoords(index: number, shape: readonly number[], strides: readonly number[]): number[];
29
+ static indexToCoords(index: number, strides: readonly number[]): number[];
30
30
  static coordsToUnbroadcastedIndex(coords: number[], shape: readonly number[], strides: readonly number[]): number;
31
31
  static coordsToIndex(coords: number[], strides: readonly number[]): number;
32
32
  static shapeToSize(shape: readonly number[]): number;
@@ -43,14 +43,25 @@ export declare class Tensor {
43
43
  mean(dims?: number[] | number, keepDims?: boolean): Tensor;
44
44
  add(other: TensorValue | Tensor): Tensor;
45
45
  sub(other: TensorValue | Tensor): Tensor;
46
+ subtract: (other: TensorValue | Tensor) => Tensor;
46
47
  mul(other: TensorValue | Tensor): Tensor;
48
+ multiply: (other: TensorValue | Tensor) => Tensor;
47
49
  pow(other: TensorValue | Tensor): Tensor;
48
50
  div(other: TensorValue | Tensor): Tensor;
51
+ divide: (other: TensorValue | Tensor) => Tensor;
52
+ remainder(other: TensorValue | Tensor): Tensor;
49
53
  ge(other: TensorValue | Tensor): Tensor;
54
+ greaterEqual: (other: TensorValue | Tensor) => Tensor;
50
55
  le(other: TensorValue | Tensor): Tensor;
56
+ lessEqual: (other: TensorValue | Tensor) => Tensor;
51
57
  gt(other: TensorValue | Tensor): Tensor;
58
+ greater: (other: TensorValue | Tensor) => Tensor;
52
59
  lt(other: TensorValue | Tensor): Tensor;
60
+ less: (other: TensorValue | Tensor) => Tensor;
53
61
  eq(other: TensorValue | Tensor): Tensor;
62
+ equal: (other: TensorValue | Tensor) => Tensor;
63
+ ne(other: TensorValue | Tensor): Tensor;
64
+ notEqual: (other: TensorValue | Tensor) => Tensor;
54
65
  logicalAnd(other: TensorValue | Tensor): Tensor;
55
66
  logicalOr(other: TensorValue | Tensor): Tensor;
56
67
  logicalXor(other: TensorValue | Tensor): Tensor;
@@ -62,21 +73,38 @@ export declare class Tensor {
62
73
  bitwiseLeftShift(other: TensorValue | Tensor): Tensor;
63
74
  bitwiseRightShift(other: TensorValue | Tensor): Tensor;
64
75
  neg(): Tensor;
76
+ negative: () => Tensor;
77
+ reciprocal(): Tensor;
78
+ square(): Tensor;
65
79
  abs(): Tensor;
80
+ absolute: () => Tensor;
66
81
  sign(): Tensor;
67
82
  sin(): Tensor;
68
83
  cos(): Tensor;
69
84
  tan(): Tensor;
70
85
  asin(): Tensor;
86
+ arcsin: () => Tensor;
71
87
  acos(): Tensor;
88
+ arccos: () => Tensor;
72
89
  atan(): Tensor;
90
+ arctan: () => Tensor;
91
+ atan2(other: TensorValue | Tensor): Tensor;
92
+ arctan2: (other: TensorValue | Tensor) => Tensor;
73
93
  sinh(): Tensor;
74
94
  cosh(): Tensor;
75
95
  asinh(): Tensor;
96
+ arcsinh: () => Tensor;
76
97
  acosh(): Tensor;
98
+ arccosh: () => Tensor;
77
99
  atanh(): Tensor;
100
+ arctanh: () => Tensor;
101
+ deg2rad(): Tensor;
102
+ rad2deg(): Tensor;
78
103
  sqrt(): Tensor;
104
+ rsqrt(): Tensor;
79
105
  exp(): Tensor;
106
+ exp2(): Tensor;
107
+ expm1(): Tensor;
80
108
  log(): Tensor;
81
109
  log2(): Tensor;
82
110
  log10(): Tensor;
@@ -84,7 +112,17 @@ export declare class Tensor {
84
112
  relu(): Tensor;
85
113
  sigmoid(): Tensor;
86
114
  tanh(): Tensor;
115
+ round(): Tensor;
116
+ floor(): Tensor;
117
+ ceil(): Tensor;
118
+ trunc(): Tensor;
119
+ fix: () => Tensor;
120
+ frac(): Tensor;
121
+ clip(min: number, max: number): Tensor;
122
+ clamp: (min: number, max: number) => Tensor;
87
123
  transpose(dim1: number, dim2: number): Tensor;
124
+ swapaxes: (dim1: number, dim2: number) => Tensor;
125
+ swapdims: (dim1: number, dim2: number) => Tensor;
88
126
  t(): Tensor;
89
127
  dot(other: TensorValue | Tensor): Tensor;
90
128
  mm(other: TensorValue | Tensor): Tensor;
package/dist/core.js CHANGED
@@ -94,12 +94,10 @@ class Tensor {
94
94
  return newShape;
95
95
  }
96
96
  // Utility to convert flat index to array of coordinates
97
- static indexToCoords(index, shape, strides) {
98
- const coords = new Array(shape.length);
97
+ static indexToCoords(index, strides) {
98
+ const coords = new Array(strides.length);
99
99
  let remaining = index;
100
- // Sort dimensions by stride (largest first) for correct decomposition
101
- const sortedDims = shape.map((_, i) => i).sort((a, b) => strides[b] - strides[a]);
102
- for (const dim of sortedDims) {
100
+ for (let dim = 0; dim < strides.length; dim++) {
103
101
  coords[dim] = Math.floor(remaining / strides[dim]);
104
102
  remaining %= strides[dim];
105
103
  }
@@ -152,7 +150,7 @@ class Tensor {
152
150
  const outputValue = new Array(outputSize);
153
151
  for (let i = 0; i < outputSize; i++) {
154
152
  // Get coordinates from 1D index
155
- const coordsOutput = Tensor.indexToCoords(i, outputShape, outputStrides);
153
+ const coordsOutput = Tensor.indexToCoords(i, outputStrides);
156
154
  // Convert the coordinates to 1D index of flattened A with respect to A's shape
157
155
  const indexA = Tensor.coordsToUnbroadcastedIndex(coordsOutput, paddedAShape, paddedAStrides);
158
156
  // Convert the coordinates to 1D index of flattened B with respect to B's shape
@@ -267,14 +265,19 @@ class Tensor {
267
265
  }
268
266
  }
269
267
  // Remove size-1 dims only
270
- const outShape = this.shape.filter((dim, i) => {
271
- const shouldSqueeze = dims.includes(i);
272
- if (shouldSqueeze && dim !== 1)
273
- throw new Error(`Can not squeeze dim with size ${dim}`);
274
- return !shouldSqueeze;
275
- });
276
- // Remove strides of size-1 dims
277
- const outStrides = this.strides.filter((stride, i) => !dims.includes(i));
268
+ const outShape = [], outStrides = [];
269
+ for (let index = 0; index < this.shape.length; index++) {
270
+ const dim = this.shape[index];
271
+ const stride = this.strides[index];
272
+ if (dims.includes(index)) {
273
+ if (dim !== 1)
274
+ throw new Error(`Can not squeeze dim with size ${dim}`);
275
+ }
276
+ else {
277
+ outShape.push(dim);
278
+ outStrides.push(stride);
279
+ }
280
+ }
278
281
  const outValue = outShape.length === 0 ? this.value[0] : this.value;
279
282
  const out = new Tensor(outValue, {
280
283
  shape: outShape,
@@ -353,7 +356,7 @@ class Tensor {
353
356
  }
354
357
  // Calculate new value after sum
355
358
  for (let index = 0; index < originalSize; index++) {
356
- const coords = Tensor.indexToCoords(index, this.shape, this.strides);
359
+ const coords = Tensor.indexToCoords(index, this.strides);
357
360
  // Force 0 on reduced axes to collapse into size-1 dims
358
361
  const outCoords = coords.map((val, i) => dims.includes(i) ? 0 : val);
359
362
  // Convert output coordinates to flat index
@@ -407,7 +410,7 @@ class Tensor {
407
410
  }
408
411
  // Calculate new value after multiplying
409
412
  for (let index = 0; index < originalSize; index++) {
410
- const coords = Tensor.indexToCoords(index, this.shape, this.strides);
413
+ const coords = Tensor.indexToCoords(index, this.strides);
411
414
  // Force 0 on reduced axes to collapse into size-1 dims
412
415
  const outCoords = coords.map((val, i) => dims.includes(i) ? 0 : val);
413
416
  // Convert output coordinates to flat index
@@ -424,7 +427,7 @@ class Tensor {
424
427
  if (this.requiresGrad) {
425
428
  // Grad is the product of other elements of the same axis, which is product of all els divided by the current value
426
429
  for (let index = 0; index < originalSize; index++) {
427
- const coords = Tensor.indexToCoords(index, this.shape, this.strides);
430
+ const coords = Tensor.indexToCoords(index, this.strides);
428
431
  // Force 0 on reduced axes to collapse into size-1 dims
429
432
  const outCoords = coords.map((val, i) => dims.includes(i) ? 0 : val);
430
433
  // Convert output coordinates to flat index
@@ -470,7 +473,7 @@ class Tensor {
470
473
  }
471
474
  // Calculate sums and how many elements contribute to specific positions
472
475
  for (let index = 0; index < originalSize; index++) {
473
- const coords = Tensor.indexToCoords(index, this.shape, this.strides);
476
+ const coords = Tensor.indexToCoords(index, this.strides);
474
477
  // Force 0 on reduced axes to collapse into size-1 dims
475
478
  const outCoords = coords.map((val, i) => dims.includes(i) ? 0 : val);
476
479
  // Convert output coordinates to flat index
@@ -492,7 +495,7 @@ class Tensor {
492
495
  if (this.requiresGrad) {
493
496
  // Calculate grad by assiging 1 divide by the number of contributors to the position
494
497
  for (let index = 0; index < originalSize; index++) {
495
- const coords = Tensor.indexToCoords(index, this.shape, this.strides);
498
+ const coords = Tensor.indexToCoords(index, this.strides);
496
499
  // Force 0 on reduced axes to collapse into size-1 dims
497
500
  const outCoords = coords.map((val, i) => dims.includes(i) ? 0 : val);
498
501
  // Convert output coordinates to flat index
@@ -519,10 +522,12 @@ class Tensor {
519
522
  sub(other) {
520
523
  return this.elementWiseABDAG(other, (a, b) => a - b, (self, other, outGrad) => outGrad, (self, other, outGrad) => outGrad.neg());
521
524
  }
525
+ subtract = this.sub;
522
526
  // Tensor element-wise multiplication
523
527
  mul(other) {
524
528
  return this.elementWiseABDAG(other, (a, b) => a * b, (self, other, outGrad) => outGrad.mul(other), (self, other, outGrad) => outGrad.mul(self));
525
529
  }
530
+ multiply = this.mul;
526
531
  // Tensor element-wise power
527
532
  pow(other) {
528
533
  return this.elementWiseABDAG(other, (a, b) => a ** b, (self, other, outGrad) => outGrad.mul(other.mul(self.pow(other.sub(1)))), (self, other, outGrad) => outGrad.mul(self.pow(other).mul(self.log())));
@@ -531,26 +536,41 @@ class Tensor {
531
536
  div(other) {
532
537
  return this.elementWiseABDAG(other, (a, b) => a / b, (self, other, outGrad) => outGrad.div(other), (self, other, outGrad) => outGrad.mul(self.neg().div(other.pow(2))));
533
538
  }
539
+ divide = this.div;
540
+ // Tensor element-wise modulo
541
+ remainder(other) {
542
+ return this.elementWiseABDAG(other, (a, b) => a % b);
543
+ }
534
544
  // Tensor element-wise greater or equal comparison
535
545
  ge(other) {
536
546
  return this.elementWiseABDAG(other, (a, b) => a >= b ? 1 : 0);
537
547
  }
548
+ greaterEqual = this.ge;
538
549
  // Tensor element-wise less or equal comparison
539
550
  le(other) {
540
551
  return this.elementWiseABDAG(other, (a, b) => a <= b ? 1 : 0);
541
552
  }
553
+ lessEqual = this.le;
542
554
  // Tensor element-wise greater-than comparison
543
555
  gt(other) {
544
556
  return this.elementWiseABDAG(other, (a, b) => a > b ? 1 : 0);
545
557
  }
558
+ greater = this.gt;
546
559
  // Tensor element-wise less-than comparison
547
560
  lt(other) {
548
561
  return this.elementWiseABDAG(other, (a, b) => a < b ? 1 : 0);
549
562
  }
563
+ less = this.lt;
550
564
  // Tensor element-wise equality comparison
551
565
  eq(other) {
552
566
  return this.elementWiseABDAG(other, (a, b) => a === b ? 1 : 0);
553
567
  }
568
+ equal = this.eq;
569
+ // Tensor element-wise not equality comparison
570
+ ne(other) {
571
+ return this.elementWiseABDAG(other, (a, b) => a !== b ? 1 : 0);
572
+ }
573
+ notEqual = this.ne;
554
574
  // Tensor element-wise logical and
555
575
  logicalAnd(other) {
556
576
  return this.elementWiseABDAG(other, (a, b) => a === 1 && b === 1 ? 1 : 0);
@@ -595,10 +615,20 @@ class Tensor {
595
615
  neg() {
596
616
  return this.elementWiseSelfDAG((a) => -a, (self, outGrad) => outGrad.mul(-1));
597
617
  }
618
+ negative = this.neg;
619
+ // Tensor element-wise reciprocal
620
+ reciprocal() {
621
+ return this.elementWiseSelfDAG((a) => 1 / a, (self, outGrad) => outGrad.mul(self.neg().pow(-2)));
622
+ }
623
+ // Tensor element-wise square
624
+ square() {
625
+ return this.elementWiseSelfDAG((a) => a * a, (self, outGrad) => outGrad.mul(self.mul(2)));
626
+ }
598
627
  // Tensor element-wise absolute
599
628
  abs() {
600
629
  return this.elementWiseSelfDAG((a) => Math.abs(a), (self, outGrad) => outGrad.mul(self.sign()));
601
630
  }
631
+ absolute = this.abs;
602
632
  // Tensor element-wise sign function
603
633
  sign() {
604
634
  return this.elementWiseSelfDAG((a) => Math.sign(a));
@@ -619,14 +649,22 @@ class Tensor {
619
649
  asin() {
620
650
  return this.elementWiseSelfDAG((a) => Math.asin(a), (self, outGrad) => outGrad.div(self.pow(2).neg().add(1).sqrt()));
621
651
  }
652
+ arcsin = this.asin;
622
653
  // Tensor element-wise acos
623
654
  acos() {
624
655
  return this.elementWiseSelfDAG((a) => Math.acos(a), (self, outGrad) => outGrad.div(self.pow(2).neg().add(1).sqrt()).neg());
625
656
  }
657
+ arccos = this.acos;
626
658
  // Tensor element-wise atan
627
659
  atan() {
628
660
  return this.elementWiseSelfDAG((a) => Math.atan(a), (self, outGrad) => outGrad.div(self.pow(2).add(1)));
629
661
  }
662
+ arctan = this.atan;
663
+ // Tensor element-wise atan2
664
+ atan2(other) {
665
+ return this.elementWiseABDAG(other, (a, b) => Math.atan2(a, b), (self, other, outGrad) => outGrad.mul(other.div(self.square().add(other.square()))), (self, other, outGrad) => outGrad.mul(self.neg().div(self.square().add(other.square()))));
666
+ }
667
+ arctan2 = this.atan2;
630
668
  // Tensor element-wise sinh
631
669
  sinh() {
632
670
  return this.elementWiseSelfDAG((a) => Math.sinh(a), (self, outGrad) => outGrad.mul(self.cosh()));
@@ -639,22 +677,45 @@ class Tensor {
639
677
  asinh() {
640
678
  return this.elementWiseSelfDAG((a) => Math.asinh(a), (self, outGrad) => outGrad.div(self.pow(2).add(1).sqrt()));
641
679
  }
680
+ arcsinh = this.asinh;
642
681
  // Tensor element-wise acosh
643
682
  acosh() {
644
683
  return this.elementWiseSelfDAG((a) => Math.acosh(a), (self, outGrad) => outGrad.div(self.add(1).sqrt().mul(self.sub(1).sqrt())));
645
684
  }
685
+ arccosh = this.acosh;
646
686
  // Tensor element-wise atanh
647
687
  atanh() {
648
688
  return this.elementWiseSelfDAG((a) => Math.atanh(a), (self, outGrad) => outGrad.div(self.pow(2).neg().add(1)));
649
689
  }
690
+ arctanh = this.atanh;
691
+ // Tensor element-wise degree to radian
692
+ deg2rad() {
693
+ return this.elementWiseSelfDAG((a) => a * (Math.PI / 180), (self, outGrad) => outGrad.mul(Math.PI / 180));
694
+ }
695
+ // Tensor element-wise radian to degree
696
+ rad2deg() {
697
+ return this.elementWiseSelfDAG((a) => a / (Math.PI / 180), (self, outGrad) => outGrad.div(Math.PI / 180));
698
+ }
650
699
  // Tensor element-wise square root
651
700
  sqrt() {
652
701
  return this.elementWiseSelfDAG((a) => Math.sqrt(a), (self, outGrad) => outGrad.div(self.sqrt().mul(2)));
653
702
  }
703
+ // Tensor element-wise reciprocal of square root
704
+ rsqrt() {
705
+ return this.elementWiseSelfDAG((a) => 1 / Math.sqrt(a), (self, outGrad) => outGrad.mul(self.pow(-1.5).mul(-0.5)));
706
+ }
654
707
  // Tensor element-wise e^x
655
708
  exp() {
656
709
  return this.elementWiseSelfDAG((a) => Math.exp(a), (self, outGrad) => outGrad.mul(self.exp()));
657
710
  }
711
+ // Tensor element-wise 2^x
712
+ exp2() {
713
+ return this.elementWiseSelfDAG((a) => 2 ** a, (self, outGrad) => outGrad.mul(self.exp2().mul(Math.log(2))));
714
+ }
715
+ // Tensor element-wise e^x - 1
716
+ expm1() {
717
+ return this.elementWiseSelfDAG((a) => Math.expm1(a), (self, outGrad) => outGrad.mul(self.exp()));
718
+ }
658
719
  // Tensor element-wise natural log
659
720
  log() {
660
721
  return this.elementWiseSelfDAG((a) => Math.log(a), (self, outGrad) => outGrad.div(self));
@@ -686,6 +747,32 @@ class Tensor {
686
747
  tanh() {
687
748
  return this.elementWiseSelfDAG((a) => Math.tanh(a), (self, outGrad) => outGrad.mul(self.tanh().pow(2).neg().add(1)));
688
749
  }
750
+ // Tensor element-wise round
751
+ round() {
752
+ return this.elementWiseSelfDAG((a) => Math.round(a));
753
+ }
754
+ // Tensor element-wise floor
755
+ floor() {
756
+ return this.elementWiseSelfDAG((a) => Math.floor(a));
757
+ }
758
+ // Tensor element-wise ceil
759
+ ceil() {
760
+ return this.elementWiseSelfDAG((a) => Math.ceil(a));
761
+ }
762
+ // Tensor element-wise truncation
763
+ trunc() {
764
+ return this.elementWiseSelfDAG((a) => Math.trunc(a));
765
+ }
766
+ fix = this.trunc;
767
+ // Tensor element-wise fraction portion
768
+ frac() {
769
+ return this.elementWiseSelfDAG((a) => a - Math.floor(a));
770
+ }
771
+ // Tensor element-wise clip and clamp
772
+ clip(min, max) {
773
+ return this.elementWiseSelfDAG((a) => Math.max(min, Math.min(max, a)), (self, outGrad) => outGrad.mul(self.ge(min).mul(self.le(max))));
774
+ }
775
+ clamp = this.clip;
689
776
  // Transpose
690
777
  transpose(dim1, dim2) {
691
778
  // If dimension out of bound, throw error
@@ -713,6 +800,8 @@ class Tensor {
713
800
  }
714
801
  return out;
715
802
  }
803
+ swapaxes = this.transpose;
804
+ swapdims = this.transpose;
716
805
  // Transpose 2D
717
806
  t() {
718
807
  // Verify matrix shape
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "catniff",
3
- "version": "0.2.8",
3
+ "version": "0.2.10",
4
4
  "description": "A cute autograd engine for Javascript",
5
5
  "main": "index.js",
6
6
  "scripts": {
@@ -26,7 +26,9 @@
26
26
  "neural-network",
27
27
  "machine-learning",
28
28
  "deep-learning",
29
- "micrograd"
29
+ "micrograd",
30
+ "torch",
31
+ "pytorch"
30
32
  ],
31
33
  "author": "nguyenphuminh",
32
34
  "license": "GPL-3.0",