catniff 0.1.10 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +39 -36
- package/dist/core.d.ts +88 -0
- package/dist/core.js +760 -0
- package/index.js +1 -2
- package/package.json +1 -1
- package/dist/autograd.d.ts +0 -112
- package/dist/autograd.js +0 -547
- package/dist/tensor.d.ts +0 -62
- package/dist/tensor.js +0 -336
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Catniff
|
|
2
2
|
|
|
3
|
-
Catniff is an experimental tensor ops library and autograd engine
|
|
3
|
+
Catniff is an experimental tensor ops library and autograd engine made to be Torch-like (its name is a play on "catnip" and "differentiation"). This project is heavily under development currently, so keep in mind that APIs can be completely unstable and backwards-incompatible.
|
|
4
4
|
|
|
5
5
|
## Setup
|
|
6
6
|
|
|
@@ -13,57 +13,55 @@ npm install catniff
|
|
|
13
13
|
|
|
14
14
|
Here is a little demo of a quadratic function:
|
|
15
15
|
```js
|
|
16
|
-
const {
|
|
16
|
+
const { Tensor } = require("catniff");
|
|
17
17
|
|
|
18
|
-
const x = new
|
|
18
|
+
const x = new Tensor(2, { requiresGrad: true });
|
|
19
19
|
const L = x.pow(2).add(x); // x^2 + x
|
|
20
20
|
|
|
21
21
|
L.backward();
|
|
22
|
-
|
|
22
|
+
|
|
23
|
+
console.log(x.grad.val()); // 5
|
|
23
24
|
```
|
|
24
25
|
|
|
25
26
|
View all examples in [`./examples`](./examples).
|
|
26
27
|
|
|
27
28
|
## Tensors
|
|
28
29
|
|
|
29
|
-
Tensors in Catniff
|
|
30
|
-
|
|
31
|
-
There is a built-in `TensorMath` class to help with tensor arithmetic, for example:
|
|
30
|
+
Tensors in Catniff can be created by passing in a number or an nD array, and there are built-in methods that can be used to perform tensor arithmetic:
|
|
32
31
|
```js
|
|
33
|
-
const {
|
|
32
|
+
const { Tensor } = require("catniff");
|
|
34
33
|
|
|
35
|
-
|
|
36
|
-
const
|
|
37
|
-
|
|
38
|
-
```
|
|
39
|
-
|
|
40
|
-
If you want to be concise, you can use `TM` or `TMath`:
|
|
41
|
-
```js
|
|
42
|
-
const { TM, TMath } = require("catniff");
|
|
34
|
+
// Tensor init
|
|
35
|
+
const A = new Tensor([ 1, 2, 3 ]);
|
|
36
|
+
const B = new Tensor(3);
|
|
43
37
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
console.log(TM.add(A, B));
|
|
47
|
-
console.log(TMath.add(A, B));
|
|
38
|
+
// Tensor addition (.val() returns the raw value rather than the tensor object)
|
|
39
|
+
console.log(A.add(B).val());
|
|
48
40
|
```
|
|
49
41
|
|
|
50
|
-
All available APIs are in `./src/
|
|
42
|
+
All available APIs are in `./src/core.ts`.
|
|
51
43
|
|
|
52
44
|
## Autograd
|
|
53
45
|
|
|
54
|
-
To compute the gradient wrt
|
|
46
|
+
To compute the gradient wrt multiple variables of our mathematical expression, we can simply set `requiresGrad` to `true`:
|
|
55
47
|
```js
|
|
56
|
-
const {
|
|
57
|
-
|
|
58
|
-
const X = new
|
|
59
|
-
[
|
|
60
|
-
|
|
61
|
-
]
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
48
|
+
const { Tensor } = require("catniff");
|
|
49
|
+
|
|
50
|
+
const X = new Tensor(
|
|
51
|
+
[
|
|
52
|
+
[ 0.5, -1.0 ],
|
|
53
|
+
[ 2.0, 0.0 ]
|
|
54
|
+
],
|
|
55
|
+
{ requiresGrad: true }
|
|
56
|
+
);
|
|
57
|
+
|
|
58
|
+
const Y = new Tensor(
|
|
59
|
+
[
|
|
60
|
+
[ 1.0, -2.0 ],
|
|
61
|
+
[ 0.5, 1.5 ]
|
|
62
|
+
],
|
|
63
|
+
{ requiresGrad: true }
|
|
64
|
+
);
|
|
67
65
|
|
|
68
66
|
const D = X.sub(Y);
|
|
69
67
|
const E = D.exp();
|
|
@@ -72,14 +70,19 @@ const G = F.log();
|
|
|
72
70
|
|
|
73
71
|
G.backward();
|
|
74
72
|
|
|
75
|
-
|
|
73
|
+
// X.grad and Y.grad are tensor objects themselves, so we call .val() here to see their raw values
|
|
74
|
+
console.log(X.grad.val(), Y.grad.val());
|
|
76
75
|
```
|
|
77
76
|
|
|
78
|
-
All available APIs are in `./src/
|
|
77
|
+
All available APIs are in `./src/core.ts`.
|
|
78
|
+
|
|
79
|
+
## Documentation
|
|
80
|
+
|
|
81
|
+
Todo :/
|
|
79
82
|
|
|
80
83
|
## Todos
|
|
81
84
|
|
|
82
|
-
I'm mostly just learning and playing with this currently, so there are no concrete plans yet, but here
|
|
85
|
+
I'm mostly just learning and playing with this currently, so there are no concrete plans yet, but here is what I currently have in mind:
|
|
83
86
|
|
|
84
87
|
* Fix whatever is the problem right now (there are a lot of problems right now lol).
|
|
85
88
|
* Add more tensor ops.
|
package/dist/core.d.ts
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
export type TensorValue = number | TensorValue[];
|
|
2
|
+
export interface TensorOptions {
|
|
3
|
+
shape?: number[];
|
|
4
|
+
strides?: number[];
|
|
5
|
+
grad?: Tensor;
|
|
6
|
+
requiresGrad?: boolean;
|
|
7
|
+
gradFn?: Function;
|
|
8
|
+
children?: Tensor[];
|
|
9
|
+
}
|
|
10
|
+
export declare class Tensor {
|
|
11
|
+
value: number[] | number;
|
|
12
|
+
shape: number[];
|
|
13
|
+
strides: number[];
|
|
14
|
+
grad?: Tensor;
|
|
15
|
+
requiresGrad: boolean;
|
|
16
|
+
gradFn: Function;
|
|
17
|
+
children: Tensor[];
|
|
18
|
+
constructor(value: TensorValue, options?: TensorOptions);
|
|
19
|
+
static flatten(tensor: TensorValue): number[] | number;
|
|
20
|
+
static getShape(tensor: TensorValue): number[];
|
|
21
|
+
static getStrides(shape: number[]): number[];
|
|
22
|
+
static padShape(stridesA: number[], stridesB: number[], shapeA: number[], shapeB: number[]): number[][];
|
|
23
|
+
static broadcastShapes(shapeA: number[], shapeB: number[]): number[];
|
|
24
|
+
static indexToCoords(index: number, shape: number[], strides: number[]): number[];
|
|
25
|
+
static coordsToIndex(coords: number[], shape: number[], strides: number[]): number;
|
|
26
|
+
static elementWiseAB(tA: Tensor, tB: Tensor, op: (tA: number, tB: number) => number): Tensor;
|
|
27
|
+
static elementWiseSelf(tA: Tensor, op: (tA: number) => number): Tensor;
|
|
28
|
+
elementWiseABDAG(other: TensorValue | Tensor, op: (a: number, b: number) => number, thisGrad?: (self: Tensor, other: Tensor, outGrad: Tensor) => Tensor, otherGrad?: (self: Tensor, other: Tensor, outGrad: Tensor) => Tensor): Tensor;
|
|
29
|
+
elementWiseSelfDAG(op: (a: number) => number, thisGrad?: (self: Tensor, outGrad: Tensor) => Tensor): Tensor;
|
|
30
|
+
static forceTensor(value: TensorValue | Tensor): Tensor;
|
|
31
|
+
static addGrad(tensor: Tensor, accumGrad: Tensor): void;
|
|
32
|
+
squeeze(dims?: number[] | number): Tensor;
|
|
33
|
+
unsqueeze(dim: number): Tensor;
|
|
34
|
+
sum(dims?: number[] | number, keepDims?: boolean): Tensor;
|
|
35
|
+
add(other: TensorValue | Tensor): Tensor;
|
|
36
|
+
sub(other: TensorValue | Tensor): Tensor;
|
|
37
|
+
mul(other: TensorValue | Tensor): Tensor;
|
|
38
|
+
pow(other: TensorValue | Tensor): Tensor;
|
|
39
|
+
div(other: TensorValue | Tensor): Tensor;
|
|
40
|
+
ge(other: TensorValue | Tensor): Tensor;
|
|
41
|
+
le(other: TensorValue | Tensor): Tensor;
|
|
42
|
+
gt(other: TensorValue | Tensor): Tensor;
|
|
43
|
+
lt(other: TensorValue | Tensor): Tensor;
|
|
44
|
+
eq(other: TensorValue | Tensor): Tensor;
|
|
45
|
+
logicalAnd(other: TensorValue | Tensor): Tensor;
|
|
46
|
+
logicalOr(other: TensorValue | Tensor): Tensor;
|
|
47
|
+
logicalXor(other: TensorValue | Tensor): Tensor;
|
|
48
|
+
logicalNot(): Tensor;
|
|
49
|
+
bitwiseAnd(other: TensorValue | Tensor): Tensor;
|
|
50
|
+
bitwiseOr(other: TensorValue | Tensor): Tensor;
|
|
51
|
+
bitwiseXor(other: TensorValue | Tensor): Tensor;
|
|
52
|
+
bitwiseNot(): Tensor;
|
|
53
|
+
bitwiseLeftShift(other: TensorValue | Tensor): Tensor;
|
|
54
|
+
bitwiseRightShift(other: TensorValue | Tensor): Tensor;
|
|
55
|
+
neg(): Tensor;
|
|
56
|
+
abs(): Tensor;
|
|
57
|
+
sign(): Tensor;
|
|
58
|
+
sin(): Tensor;
|
|
59
|
+
cos(): Tensor;
|
|
60
|
+
tan(): Tensor;
|
|
61
|
+
asin(): Tensor;
|
|
62
|
+
acos(): Tensor;
|
|
63
|
+
atan(): Tensor;
|
|
64
|
+
sinh(): Tensor;
|
|
65
|
+
cosh(): Tensor;
|
|
66
|
+
asinh(): Tensor;
|
|
67
|
+
acosh(): Tensor;
|
|
68
|
+
atanh(): Tensor;
|
|
69
|
+
sqrt(): Tensor;
|
|
70
|
+
exp(): Tensor;
|
|
71
|
+
log(): Tensor;
|
|
72
|
+
log2(): Tensor;
|
|
73
|
+
log10(): Tensor;
|
|
74
|
+
log1p(): Tensor;
|
|
75
|
+
relu(): Tensor;
|
|
76
|
+
sigmoid(): Tensor;
|
|
77
|
+
tanh(): Tensor;
|
|
78
|
+
transpose(dim1: number, dim2: number): Tensor;
|
|
79
|
+
t(): Tensor;
|
|
80
|
+
dot(other: TensorValue | Tensor): Tensor;
|
|
81
|
+
mm(other: TensorValue | Tensor): Tensor;
|
|
82
|
+
mv(other: TensorValue | Tensor): Tensor;
|
|
83
|
+
matmul(other: TensorValue | Tensor): Tensor;
|
|
84
|
+
static fullLike(tensor: Tensor, num: number, options?: TensorOptions): Tensor;
|
|
85
|
+
backward(): void;
|
|
86
|
+
val(): any;
|
|
87
|
+
withGrad(requiresGrad: boolean): Tensor;
|
|
88
|
+
}
|