catniff 0.1.9 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Catniff
2
2
 
3
- Catniff is a small and experimental tensor library and autograd engine inspired by [micrograd](https://github.com/karpathy/micrograd). The name is a play on "catnip" and "differentiation".
3
+ Catniff is an experimental tensor ops library and autograd engine made to be Torch-like (its name is a play on "catnip" and "differentiation"). This project is heavily under development currently, so keep in mind that APIs can be completely unstable and backwards-incompatible.
4
4
 
5
5
  ## Setup
6
6
 
@@ -13,45 +13,55 @@ npm install catniff
13
13
 
14
14
  Here is a little demo of a quadratic function:
15
15
  ```js
16
- const { Node } = require("catniff");
16
+ const { Tensor } = require("catniff");
17
17
 
18
- const x = new Node(2);
18
+ const x = new Tensor(2, { requiresGrad: true });
19
19
  const L = x.pow(2).add(x); // x^2 + x
20
20
 
21
21
  L.backward();
22
- console.log(x.grad); // 5
22
+
23
+ console.log(x.grad.val()); // 5
23
24
  ```
24
25
 
25
- ## Tensors
26
+ View all examples in [`./examples`](./examples).
26
27
 
27
- Tensors in Catniff are either numbers (scalars/0-D tensors) or multidimensional number arrays (n-D tensors).
28
+ ## Tensors
28
29
 
29
- There is a built-in `TensorMath` class to help with tensor arithmetic, for example:
30
+ Tensors in Catniff can be created by passing in a number or an nD array, and there are built-in methods that can be used to perform tensor arithmetic:
30
31
  ```js
31
- const { TensorMath } = require("catniff");
32
+ const { Tensor } = require("catniff");
33
+
34
+ // Tensor init
35
+ const A = new Tensor([ 1, 2, 3 ]);
36
+ const B = new Tensor(3);
32
37
 
33
- const A = [ 1, 2, 3 ];
34
- const B = 3;
35
- console.log(TensorMath.add(A, B));
38
+ // Tensor addition (.val() returns the raw value rather than the tensor object)
39
+ console.log(A.add(B).val());
36
40
  ```
37
41
 
38
- All available APIs are in `./src/tensor.ts`.
42
+ All available APIs are in `./src/core.ts`.
39
43
 
40
44
  ## Autograd
41
45
 
42
- To compute the gradient of our mathematical expression, we use the `Node` class to dynamically build our DAG:
46
+ To compute the gradient wrt multiple variables of our mathematical expression, we can simply set `requiresGrad` to `true`:
43
47
  ```js
44
- const { Node } = require("catniff");
45
-
46
- const X = new Node([
47
- [ 0.5, -1.0 ],
48
- [ 2.0, 0.0 ]
49
- ]);
50
-
51
- const Y = new Node([
52
- [ 1.0, -2.0 ],
53
- [ 0.5, 1.5 ]
54
- ]);
48
+ const { Tensor } = require("catniff");
49
+
50
+ const X = new Tensor(
51
+ [
52
+ [ 0.5, -1.0 ],
53
+ [ 2.0, 0.0 ]
54
+ ],
55
+ { requiresGrad: true }
56
+ );
57
+
58
+ const Y = new Tensor(
59
+ [
60
+ [ 1.0, -2.0 ],
61
+ [ 0.5, 1.5 ]
62
+ ],
63
+ { requiresGrad: true }
64
+ );
55
65
 
56
66
  const D = X.sub(Y);
57
67
  const E = D.exp();
@@ -60,14 +70,19 @@ const G = F.log();
60
70
 
61
71
  G.backward();
62
72
 
63
- console.log(X.grad, Y.grad);
73
+ // X.grad and Y.grad are tensor objects themselves, so we call .val() here to see their raw values
74
+ console.log(X.grad.val(), Y.grad.val());
64
75
  ```
65
76
 
66
- All available APIs are in `./src/autograd.ts`.
77
+ All available APIs are in `./src/core.ts`.
78
+
79
+ ## Documentation
80
+
81
+ Todo :/
67
82
 
68
83
  ## Todos
69
84
 
70
- I'm mostly just learning and playing with this currently, so there are no concrete plans yet, but here are what I currently have in mind:
85
+ I'm mostly just learning and playing with this currently, so there are no concrete plans yet, but here is what I currently have in mind:
71
86
 
72
87
  * Fix whatever is the problem right now (there are a lot of problems right now lol).
73
88
  * Add more tensor ops.
package/dist/core.d.ts ADDED
@@ -0,0 +1,88 @@
1
+ export type TensorValue = number | TensorValue[];
2
+ export interface TensorOptions {
3
+ shape?: number[];
4
+ strides?: number[];
5
+ grad?: Tensor;
6
+ requiresGrad?: boolean;
7
+ gradFn?: Function;
8
+ children?: Tensor[];
9
+ }
10
+ export declare class Tensor {
11
+ value: number[] | number;
12
+ shape: number[];
13
+ strides: number[];
14
+ grad?: Tensor;
15
+ requiresGrad: boolean;
16
+ gradFn: Function;
17
+ children: Tensor[];
18
+ constructor(value: TensorValue, options?: TensorOptions);
19
+ static flatten(tensor: TensorValue): number[] | number;
20
+ static getShape(tensor: TensorValue): number[];
21
+ static getStrides(shape: number[]): number[];
22
+ static padDims(shapeA: number[], shapeB: number[]): number[][];
23
+ static broadcastShapes(shapeA: number[], shapeB: number[]): number[];
24
+ static indexToCoords(index: number, shape: number[], strides: number[]): number[];
25
+ static coordsToIndex(coords: number[], shape: number[], strides: number[]): number;
26
+ static elementWiseAB(tA: Tensor, tB: Tensor, op: (tA: number, tB: number) => number): Tensor;
27
+ static elementWiseSelf(tA: Tensor, op: (tA: number) => number): Tensor;
28
+ elementWiseABDAG(other: TensorValue | Tensor, op: (a: number, b: number) => number, thisGrad: (self: Tensor, other: Tensor, outGrad: Tensor) => void, otherGrad: (self: Tensor, other: Tensor, outGrad: Tensor) => void): Tensor;
29
+ elementWiseSelfDAG(op: (a: number) => number, thisGrad: (self: Tensor, outGrad: Tensor) => void): Tensor;
30
+ static forceTensor(value: TensorValue | Tensor): Tensor;
31
+ static addGrad(tensor: Tensor, accumGrad: Tensor): void;
32
+ squeeze(dims?: number[] | number): Tensor;
33
+ unsqueeze(dim: number): Tensor;
34
+ sum(dims?: number[] | number, keepDims?: boolean): Tensor;
35
+ add(other: TensorValue | Tensor): Tensor;
36
+ sub(other: TensorValue | Tensor): Tensor;
37
+ mul(other: TensorValue | Tensor): Tensor;
38
+ pow(other: TensorValue | Tensor): Tensor;
39
+ div(other: TensorValue | Tensor): Tensor;
40
+ ge(other: TensorValue | Tensor): Tensor;
41
+ le(other: TensorValue | Tensor): Tensor;
42
+ gt(other: TensorValue | Tensor): Tensor;
43
+ lt(other: TensorValue | Tensor): Tensor;
44
+ eq(other: TensorValue | Tensor): Tensor;
45
+ logicalAnd(other: TensorValue | Tensor): Tensor;
46
+ logicalOr(other: TensorValue | Tensor): Tensor;
47
+ logicalXor(other: TensorValue | Tensor): Tensor;
48
+ logicalNot(): Tensor;
49
+ bitwiseAnd(other: TensorValue | Tensor): Tensor;
50
+ bitwiseOr(other: TensorValue | Tensor): Tensor;
51
+ bitwiseXor(other: TensorValue | Tensor): Tensor;
52
+ bitwiseNot(): Tensor;
53
+ bitwiseLeftShift(other: TensorValue | Tensor): Tensor;
54
+ bitwiseRightShift(other: TensorValue | Tensor): Tensor;
55
+ neg(): Tensor;
56
+ abs(): Tensor;
57
+ sign(): Tensor;
58
+ sin(): Tensor;
59
+ cos(): Tensor;
60
+ tan(): Tensor;
61
+ asin(): Tensor;
62
+ acos(): Tensor;
63
+ atan(): Tensor;
64
+ sinh(): Tensor;
65
+ cosh(): Tensor;
66
+ asinh(): Tensor;
67
+ acosh(): Tensor;
68
+ atanh(): Tensor;
69
+ sqrt(): Tensor;
70
+ exp(): Tensor;
71
+ log(): Tensor;
72
+ log2(): Tensor;
73
+ log10(): Tensor;
74
+ log1p(): Tensor;
75
+ relu(): Tensor;
76
+ sigmoid(): Tensor;
77
+ tanh(): Tensor;
78
+ transpose(dim1: number, dim2: number): Tensor;
79
+ t(): Tensor;
80
+ dot(other: TensorValue | Tensor): Tensor;
81
+ mm(other: TensorValue | Tensor): Tensor;
82
+ mv(other: TensorValue | Tensor): Tensor;
83
+ matmul(other: TensorValue | Tensor): Tensor;
84
+ static fullLike(tensor: Tensor, num: number, options?: TensorOptions): Tensor;
85
+ backward(): void;
86
+ val(): any;
87
+ withGrad(requiresGrad: boolean): Tensor;
88
+ }