catniff 0.6.10 → 0.6.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -132,6 +132,7 @@ All available APIs are in [`./src/`](./src/) if you want to dig deeper.
132
132
 
133
133
  * [Shakespeare-style text generator](https://github.com/nguyenphuminh/shakespeare-lm).
134
134
  * [Simple neural net for XOR calculation](./examples/xornet.js).
135
+ * [N-th order derivative calculation](./examples/nthorder.js).
135
136
  * [Tensors](./examples/tensors.js).
136
137
  * [Optimizer](./examples/optim.js).
137
138
  * [Simple quadratic equation](./examples/quadratic.js).
package/dist/core.d.ts CHANGED
@@ -23,6 +23,8 @@ export declare class Tensor {
23
23
  children: Tensor[];
24
24
  device: string;
25
25
  static training: boolean;
26
+ static noGrad: boolean;
27
+ static createGraph: boolean;
26
28
  constructor(value: TensorValue, options?: TensorOptions);
27
29
  static flattenValue(tensor: TensorValue): number[] | number;
28
30
  static getShape(tensor: TensorValue): number[];
@@ -160,6 +162,9 @@ export declare class Tensor {
160
162
  log1p(): Tensor;
161
163
  relu(): Tensor;
162
164
  leakyRelu(negativeSlope?: number): Tensor;
165
+ elu(alpha?: number): Tensor;
166
+ selu(): Tensor;
167
+ celu(alpha?: number): Tensor;
163
168
  sigmoid(): Tensor;
164
169
  tanh(): Tensor;
165
170
  softplus(): Tensor;
@@ -211,7 +216,6 @@ export declare class Tensor {
211
216
  zeroGrad?: boolean;
212
217
  }): void;
213
218
  val(): TensorValue;
214
- withGrad(requiresGrad: boolean): Tensor;
215
219
  detach(): Tensor;
216
220
  clone(): Tensor;
217
221
  replace(other: Tensor | TensorValue, allowShapeMismatch?: boolean): Tensor;
package/dist/core.js CHANGED
@@ -14,6 +14,8 @@ class Tensor {
14
14
  children;
15
15
  device;
16
16
  static training = false;
17
+ static noGrad = false;
18
+ static createGraph = false;
17
19
  constructor(value, options = {}) {
18
20
  // Storage
19
21
  this.value = Tensor.flattenValue(value);
@@ -210,12 +212,12 @@ class Tensor {
210
212
  out.gradFn = () => {
211
213
  // Disable gradient collecting of gradients themselves
212
214
  const outGrad = out.grad;
213
- const selfNoGrad = this.detach();
214
- const otherNoGrad = other.detach();
215
+ const selfWithGrad = Tensor.createGraph ? this : this.detach();
216
+ const otherWithGrad = Tensor.createGraph ? other : other.detach();
215
217
  if (this.requiresGrad)
216
- Tensor.addGrad(this, thisGrad(selfNoGrad, otherNoGrad, outGrad));
218
+ Tensor.addGrad(this, thisGrad(selfWithGrad, otherWithGrad, outGrad));
217
219
  if (other.requiresGrad)
218
- Tensor.addGrad(other, otherGrad(selfNoGrad, otherNoGrad, outGrad));
220
+ Tensor.addGrad(other, otherGrad(selfWithGrad, otherWithGrad, outGrad));
219
221
  };
220
222
  }
221
223
  return out;
@@ -231,9 +233,9 @@ class Tensor {
231
233
  out.gradFn = () => {
232
234
  // Disable gradient collecting of gradients themselves
233
235
  const outGrad = out.grad;
234
- const selfNoGrad = this.detach();
236
+ const selfWithGrad = Tensor.createGraph ? this : this.detach();
235
237
  if (this.requiresGrad)
236
- Tensor.addGrad(this, thisGrad(selfNoGrad, outGrad));
238
+ Tensor.addGrad(this, thisGrad(selfWithGrad, outGrad));
237
239
  };
238
240
  }
239
241
  return out;
@@ -1170,6 +1172,26 @@ class Tensor {
1170
1172
  return outGrad.mul(self.gt(0).add(self.le(0).mul(negativeSlope)));
1171
1173
  });
1172
1174
  }
1175
+ // Tensor element-wise elu
1176
+ elu(alpha = 1) {
1177
+ return this.elementWiseSelfDAG((a) => a > 0 ? a : alpha * (Math.expm1(a)), (self, outGrad) => {
1178
+ return outGrad.mul(self.gt(0).add(self.le(0).mul(self.exp().mul(alpha))));
1179
+ });
1180
+ }
1181
+ // Tensor element-wise selu
1182
+ selu() {
1183
+ const alpha = 1.6732632423543772848170429916717;
1184
+ const scale = 1.0507009873554804934193349852946;
1185
+ return this.elementWiseSelfDAG((a) => scale * (a >= 0 ? a : alpha * Math.expm1(a)), (self, outGrad) => {
1186
+ return outGrad.mul(self.gt(0).mul(scale).add(self.le(0).mul(self.exp().mul(alpha * scale))));
1187
+ });
1188
+ }
1189
+ // Tensor element-wise celu
1190
+ celu(alpha = 1) {
1191
+ return this.elementWiseSelfDAG((a) => a >= 0 ? a : alpha * (Math.expm1(a / alpha)), (self, outGrad) => {
1192
+ return outGrad.mul(self.gt(0).add(self.le(0).mul(self.div(alpha).exp())));
1193
+ });
1194
+ }
1173
1195
  // Tensor element-wise sigmoid
1174
1196
  sigmoid() {
1175
1197
  return this.elementWiseSelfDAG((a) => 1 / (1 + Math.exp(-a)), (self, outGrad) => {
@@ -1333,12 +1355,12 @@ class Tensor {
1333
1355
  out.gradFn = () => {
1334
1356
  // Disable gradient collecting of gradients themselves
1335
1357
  const outGrad = out.grad;
1336
- const selfNoGrad = this.detach();
1337
- const otherNoGrad = other.detach();
1358
+ const selfWithGrad = Tensor.createGraph ? this : this.detach();
1359
+ const otherWithGrad = Tensor.createGraph ? other : other.detach();
1338
1360
  if (this.requiresGrad)
1339
- Tensor.addGrad(this, outGrad.mm(otherNoGrad.t()));
1361
+ Tensor.addGrad(this, outGrad.mm(otherWithGrad.t()));
1340
1362
  if (other.requiresGrad)
1341
- Tensor.addGrad(other, selfNoGrad.t().mm(outGrad));
1363
+ Tensor.addGrad(other, selfWithGrad.t().mm(outGrad));
1342
1364
  };
1343
1365
  }
1344
1366
  return out;
@@ -1391,12 +1413,12 @@ class Tensor {
1391
1413
  out.gradFn = () => {
1392
1414
  // Disable gradient collecting of gradients themselves
1393
1415
  const outGrad = out.grad;
1394
- const selfNoGrad = this.detach();
1395
- const otherNoGrad = other.detach();
1416
+ const selfWithGrad = Tensor.createGraph ? this : this.detach();
1417
+ const otherWithGrad = Tensor.createGraph ? other : other.detach();
1396
1418
  if (this.requiresGrad)
1397
- Tensor.addGrad(this, outGrad.bmm(otherNoGrad.transpose(1, 2)));
1419
+ Tensor.addGrad(this, outGrad.bmm(otherWithGrad.transpose(1, 2)));
1398
1420
  if (other.requiresGrad)
1399
- Tensor.addGrad(other, selfNoGrad.transpose(1, 2).bmm(outGrad));
1421
+ Tensor.addGrad(other, selfWithGrad.transpose(1, 2).bmm(outGrad));
1400
1422
  };
1401
1423
  }
1402
1424
  return out;
@@ -1491,12 +1513,12 @@ class Tensor {
1491
1513
  out.gradFn = () => {
1492
1514
  other = other;
1493
1515
  const outGrad = out.grad;
1494
- const selfNoGrad = self.detach();
1495
- const otherNoGrad = other.detach();
1516
+ const selfWithGrad = Tensor.createGraph ? self : self.detach();
1517
+ const otherWithGrad = Tensor.createGraph ? other : other.detach();
1496
1518
  if (this.requiresGrad)
1497
- Tensor.addGrad(this, outGrad.matmul(otherNoGrad.transpose(-2, -1)));
1519
+ Tensor.addGrad(this, outGrad.matmul(otherWithGrad.transpose(-2, -1)));
1498
1520
  if (other.requiresGrad)
1499
- Tensor.addGrad(other, selfNoGrad.transpose(-2, -1).matmul(outGrad));
1521
+ Tensor.addGrad(other, selfWithGrad.transpose(-2, -1).matmul(outGrad));
1500
1522
  };
1501
1523
  }
1502
1524
  return out;
@@ -1781,7 +1803,7 @@ class Tensor {
1781
1803
  const visited = new Set();
1782
1804
  function build(node) {
1783
1805
  // Only collects unvisited node and node that requires gradient
1784
- if (!visited.has(node) && node.requiresGrad) {
1806
+ if (!visited.has(node) && node.requiresGrad && !Tensor.noGrad) {
1785
1807
  visited.add(node);
1786
1808
  // Reset grad to zeros if specified
1787
1809
  if (zeroGrad) {
@@ -1821,17 +1843,6 @@ class Tensor {
1821
1843
  }
1822
1844
  return buildNested(this.value, this.shape, this.strides, this.offset);
1823
1845
  }
1824
- // Returns a view of the tensor with gradient turned on/off and detaches from autograd
1825
- withGrad(requiresGrad) {
1826
- return new Tensor(this.value, {
1827
- shape: this.shape,
1828
- strides: this.strides,
1829
- offset: this.offset,
1830
- numel: this.numel,
1831
- device: this.device,
1832
- requiresGrad
1833
- });
1834
- }
1835
1846
  // Returns a view of the tensor with gradient turned off and detaches from autograd
1836
1847
  detach() {
1837
1848
  return new Tensor(this.value, {
@@ -1843,15 +1854,18 @@ class Tensor {
1843
1854
  requiresGrad: false
1844
1855
  });
1845
1856
  }
1846
- // Returns a copy of the tensor (with new data allocation) and detaches from autograd
1857
+ // Returns a copy of the tensor (with new data allocation) and keeps grad connection
1847
1858
  clone() {
1848
- return new Tensor(typeof this.value === "number" ? this.value : [...this.value], {
1849
- shape: this.shape,
1850
- strides: this.strides,
1851
- offset: this.offset,
1852
- numel: this.numel,
1853
- requiresGrad: this.requiresGrad
1854
- });
1859
+ const newValue = typeof this.value === "number" ? this.value : [...this.value];
1860
+ const out = new Tensor(newValue);
1861
+ if (this.requiresGrad) {
1862
+ out.requiresGrad = true;
1863
+ out.children.push(this);
1864
+ out.gradFn = () => {
1865
+ Tensor.addGrad(this, out.grad);
1866
+ };
1867
+ }
1868
+ return out;
1855
1869
  }
1856
1870
  // Returns this tensor with value replaced with the value of another tensor
1857
1871
  replace(other, allowShapeMismatch = false) {
package/dist/optim.d.ts CHANGED
@@ -55,10 +55,10 @@ declare class AdamW extends BaseOptimizer {
55
55
  constructor(params: Tensor[], options?: AdamWOptions);
56
56
  step(): void;
57
57
  }
58
- export declare class Optim {
59
- static BaseOptimizer: typeof BaseOptimizer;
60
- static SGD: typeof SGD;
61
- static Adam: typeof Adam;
62
- static AdamW: typeof AdamW;
63
- }
58
+ export declare const Optim: {
59
+ BaseOptimizer: typeof BaseOptimizer;
60
+ SGD: typeof SGD;
61
+ Adam: typeof Adam;
62
+ AdamW: typeof AdamW;
63
+ };
64
64
  export {};
package/dist/optim.js CHANGED
@@ -184,10 +184,9 @@ class AdamW extends BaseOptimizer {
184
184
  }
185
185
  }
186
186
  }
187
- class Optim {
188
- static BaseOptimizer = BaseOptimizer;
189
- static SGD = SGD;
190
- static Adam = Adam;
191
- static AdamW = AdamW;
192
- }
193
- exports.Optim = Optim;
187
+ exports.Optim = {
188
+ BaseOptimizer,
189
+ SGD,
190
+ Adam,
191
+ AdamW
192
+ };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "catniff",
3
- "version": "0.6.10",
3
+ "version": "0.6.12",
4
4
  "description": "A small Torch-like deep learning framework for Javascript",
5
5
  "main": "index.js",
6
6
  "scripts": {