catniff 0.1.9 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/tensor.d.ts DELETED
@@ -1,60 +0,0 @@
1
- export type Tensor = number | Tensor[];
2
- export declare class TensorMath {
3
- static create(num: number, shape: number[]): Tensor;
4
- static getShape(tA: Tensor): number[];
5
- static padShape(tA: Tensor, tB: Tensor): [Tensor[], Tensor[]];
6
- static elementWiseAB(tA: Tensor, tB: Tensor, op: (tA: number, tB: number) => number): Tensor;
7
- static elementWiseSelf(tA: Tensor, op: (tA: number) => number): Tensor;
8
- static add(tA: Tensor, tB: Tensor): Tensor;
9
- static sub(tA: Tensor, tB: Tensor): Tensor;
10
- static mul(tA: Tensor, tB: Tensor): Tensor;
11
- static pow(tA: Tensor, tB: Tensor): Tensor;
12
- static div(tA: Tensor, tB: Tensor): Tensor;
13
- static gt(tA: Tensor, tB: Tensor): Tensor;
14
- static lt(tA: Tensor, tB: Tensor): Tensor;
15
- static ge(tA: Tensor, tB: Tensor): Tensor;
16
- static le(tA: Tensor, tB: Tensor): Tensor;
17
- static eq(tA: Tensor, tB: Tensor): Tensor;
18
- static logicalAnd(tA: Tensor, tB: Tensor): Tensor;
19
- static logicalOr(tA: Tensor, tB: Tensor): Tensor;
20
- static logicalXor(tA: Tensor, tB: Tensor): Tensor;
21
- static logicalNot(tA: Tensor): Tensor;
22
- static bitwiseAnd(tA: Tensor, tB: Tensor): Tensor;
23
- static bitwiseOr(tA: Tensor, tB: Tensor): Tensor;
24
- static bitwiseXor(tA: Tensor, tB: Tensor): Tensor;
25
- static bitwiseNot(tA: Tensor): Tensor;
26
- static bitwiseLeftShift(tA: Tensor, tB: Tensor): Tensor;
27
- static bitwiseRightShift(tA: Tensor, tB: Tensor): Tensor;
28
- static neg(tA: Tensor): Tensor;
29
- static abs(tA: Tensor): Tensor;
30
- static sign(tA: Tensor): Tensor;
31
- static sin(tA: Tensor): Tensor;
32
- static cos(tA: Tensor): Tensor;
33
- static tan(tA: Tensor): Tensor;
34
- static asin(tA: Tensor): Tensor;
35
- static acos(tA: Tensor): Tensor;
36
- static atan(tA: Tensor): Tensor;
37
- static sinh(tA: Tensor): Tensor;
38
- static cosh(tA: Tensor): Tensor;
39
- static asinh(tA: Tensor): Tensor;
40
- static acosh(tA: Tensor): Tensor;
41
- static atanh(tA: Tensor): Tensor;
42
- static sqrt(tA: Tensor): Tensor;
43
- static exp(tA: Tensor): Tensor;
44
- static log(tA: Tensor): Tensor;
45
- static log2(tA: Tensor): Tensor;
46
- static log10(tA: Tensor): Tensor;
47
- static log1p(tA: Tensor): Tensor;
48
- static relu(tA: Tensor): Tensor;
49
- static sigmoid(tA: Tensor): Tensor;
50
- static tanh(tA: Tensor): Tensor;
51
- static squeezeAxis(tA: Tensor, axis: number): Tensor;
52
- static squeeze(tA: Tensor, dims?: number[] | number): Tensor;
53
- static sumAxis(tA: Tensor, axis: number): Tensor;
54
- static sum(tA: Tensor, dims?: number[] | number, keepDims?: boolean): Tensor;
55
- static t(tA: Tensor): Tensor;
56
- static dot(tA: Tensor, tB: Tensor): Tensor;
57
- static mm(tA: Tensor, tB: Tensor): Tensor;
58
- static mv(tA: Tensor, tB: Tensor): Tensor;
59
- static matmul(tA: Tensor, tB: Tensor): Tensor;
60
- }
package/dist/tensor.js DELETED
@@ -1,334 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.TensorMath = void 0;
4
- class TensorMath {
5
- static create(num, shape) {
6
- if (shape.length === 0) {
7
- return num;
8
- }
9
- const [dim, ...rest] = shape;
10
- const out = [];
11
- for (let i = 0; i < dim; i++) {
12
- out.push(TensorMath.create(num, rest));
13
- }
14
- return out;
15
- }
16
- static getShape(tA) {
17
- const shape = [];
18
- let subA = tA;
19
- while (Array.isArray(subA)) {
20
- shape.push(subA.length);
21
- subA = subA[0];
22
- }
23
- return shape;
24
- }
25
- static padShape(tA, tB) {
26
- let dimA = TensorMath.getShape(tA).length;
27
- let dimB = TensorMath.getShape(tB).length;
28
- while (dimA < dimB) {
29
- dimA++;
30
- tA = [tA];
31
- }
32
- while (dimA > dimB) {
33
- dimB++;
34
- tB = [tB];
35
- }
36
- return [tA, tB];
37
- }
38
- static elementWiseAB(tA, tB, op) {
39
- if (typeof tA === "number" && typeof tB === "number") {
40
- return op(tA, tB);
41
- }
42
- [tA, tB] = TensorMath.padShape(tA, tB);
43
- const outLen = Math.max(tA.length, tB.length);
44
- if (tA.length !== tB.length && tA.length !== 1 && tB.length !== 1) {
45
- throw new Error("Inputs are incompatible tensors");
46
- }
47
- const result = [];
48
- for (let i = 0; i < outLen; i++) {
49
- const subA = tA[tA.length === 1 ? 0 : i];
50
- const subB = tB[tB.length === 1 ? 0 : i];
51
- result.push(TensorMath.elementWiseAB(subA, subB, op));
52
- }
53
- return result;
54
- }
55
- static elementWiseSelf(tA, op) {
56
- if (typeof tA === "number") {
57
- return op(tA);
58
- }
59
- else {
60
- return tA.map(subA => TensorMath.elementWiseSelf(subA, op));
61
- }
62
- }
63
- static add(tA, tB) {
64
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA + tB);
65
- }
66
- static sub(tA, tB) {
67
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA - tB);
68
- }
69
- static mul(tA, tB) {
70
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA * tB);
71
- }
72
- static pow(tA, tB) {
73
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA ** tB);
74
- }
75
- static div(tA, tB) {
76
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA / tB);
77
- }
78
- static gt(tA, tB) {
79
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA > tB ? 1 : 0);
80
- }
81
- static lt(tA, tB) {
82
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA < tB ? 1 : 0);
83
- }
84
- static ge(tA, tB) {
85
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA >= tB ? 1 : 0);
86
- }
87
- static le(tA, tB) {
88
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA <= tB ? 1 : 0);
89
- }
90
- static eq(tA, tB) {
91
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA === tB ? 1 : 0);
92
- }
93
- static logicalAnd(tA, tB) {
94
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA === 1 && tB === 1 ? 1 : 0);
95
- }
96
- static logicalOr(tA, tB) {
97
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA === 1 || tB === 1 ? 1 : 0);
98
- }
99
- static logicalXor(tA, tB) {
100
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => (tA === 1 || tB === 1) && tA !== tB ? 1 : 0);
101
- }
102
- static logicalNot(tA) {
103
- return TensorMath.elementWiseSelf(tA, (tA) => tA === 1 ? 0 : 1);
104
- }
105
- static bitwiseAnd(tA, tB) {
106
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA & tB);
107
- }
108
- static bitwiseOr(tA, tB) {
109
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA | tB);
110
- }
111
- static bitwiseXor(tA, tB) {
112
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA ^ tB);
113
- }
114
- static bitwiseNot(tA) {
115
- return TensorMath.elementWiseSelf(tA, (tA) => ~tA);
116
- }
117
- static bitwiseLeftShift(tA, tB) {
118
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA << tB);
119
- }
120
- static bitwiseRightShift(tA, tB) {
121
- return TensorMath.elementWiseAB(tA, tB, (tA, tB) => tA >> tB);
122
- }
123
- static neg(tA) {
124
- return TensorMath.elementWiseSelf(tA, (tA) => -tA);
125
- }
126
- static abs(tA) {
127
- return TensorMath.elementWiseSelf(tA, (tA) => Math.abs(tA));
128
- }
129
- static sign(tA) {
130
- return TensorMath.elementWiseSelf(tA, (tA) => Math.sign(tA));
131
- }
132
- static sin(tA) {
133
- return TensorMath.elementWiseSelf(tA, (tA) => Math.sin(tA));
134
- }
135
- static cos(tA) {
136
- return TensorMath.elementWiseSelf(tA, (tA) => Math.cos(tA));
137
- }
138
- static tan(tA) {
139
- return TensorMath.elementWiseSelf(tA, (tA) => Math.tan(tA));
140
- }
141
- static asin(tA) {
142
- return TensorMath.elementWiseSelf(tA, (tA) => Math.asin(tA));
143
- }
144
- static acos(tA) {
145
- return TensorMath.elementWiseSelf(tA, (tA) => Math.acos(tA));
146
- }
147
- static atan(tA) {
148
- return TensorMath.elementWiseSelf(tA, (tA) => Math.atan(tA));
149
- }
150
- static sinh(tA) {
151
- return TensorMath.elementWiseSelf(tA, (tA) => Math.sinh(tA));
152
- }
153
- static cosh(tA) {
154
- return TensorMath.elementWiseSelf(tA, (tA) => Math.cosh(tA));
155
- }
156
- static asinh(tA) {
157
- return TensorMath.elementWiseSelf(tA, (tA) => Math.asinh(tA));
158
- }
159
- static acosh(tA) {
160
- return TensorMath.elementWiseSelf(tA, (tA) => Math.acosh(tA));
161
- }
162
- static atanh(tA) {
163
- return TensorMath.elementWiseSelf(tA, (tA) => Math.atanh(tA));
164
- }
165
- static sqrt(tA) {
166
- return TensorMath.elementWiseSelf(tA, (tA) => Math.sqrt(tA));
167
- }
168
- static exp(tA) {
169
- return TensorMath.elementWiseSelf(tA, (tA) => Math.exp(tA));
170
- }
171
- static log(tA) {
172
- return TensorMath.elementWiseSelf(tA, (tA) => Math.log(tA));
173
- }
174
- static log2(tA) {
175
- return TensorMath.elementWiseSelf(tA, (tA) => Math.log2(tA));
176
- }
177
- static log10(tA) {
178
- return TensorMath.elementWiseSelf(tA, (tA) => Math.log10(tA));
179
- }
180
- static log1p(tA) {
181
- return TensorMath.elementWiseSelf(tA, (tA) => Math.log(tA));
182
- }
183
- static relu(tA) {
184
- return TensorMath.elementWiseSelf(tA, (tA) => Math.max(tA, 0));
185
- }
186
- static sigmoid(tA) {
187
- return TensorMath.elementWiseSelf(tA, (tA) => 1 / (1 + Math.exp(-tA)));
188
- }
189
- static tanh(tA) {
190
- return TensorMath.elementWiseSelf(tA, (tA) => Math.tanh(tA));
191
- }
192
- static squeezeAxis(tA, axis) {
193
- if (typeof tA === "number")
194
- return tA;
195
- if (axis === 0) {
196
- return tA[0];
197
- }
198
- else {
199
- return tA.map(slice => TensorMath.squeezeAxis(slice, axis - 1));
200
- }
201
- }
202
- static squeeze(tA, dims) {
203
- if (typeof tA === "number")
204
- return tA;
205
- if (typeof dims === "number") {
206
- dims = [dims];
207
- }
208
- if (typeof dims === "undefined") {
209
- const shape = TensorMath.getShape(tA);
210
- dims = [];
211
- for (let index = 0; index < shape.length; index++) {
212
- if (shape[index] === 1) {
213
- dims.push(index);
214
- }
215
- }
216
- }
217
- dims = [...dims].sort((a, b) => b - a);
218
- let out = tA;
219
- for (const axis of dims) {
220
- out = TensorMath.squeezeAxis(out, axis);
221
- }
222
- return out;
223
- }
224
- static sumAxis(tA, axis) {
225
- if (typeof tA === "number")
226
- return tA;
227
- if (axis === 0) {
228
- let result = tA[0];
229
- for (let i = 1; i < tA.length; i++) {
230
- result = TensorMath.add(result, tA[i]);
231
- }
232
- return [result];
233
- }
234
- else {
235
- return tA.map(slice => TensorMath.sumAxis(slice, axis - 1));
236
- }
237
- }
238
- static sum(tA, dims, keepDims = false) {
239
- if (typeof tA === "number")
240
- return tA;
241
- if (typeof dims === "number") {
242
- dims = [dims];
243
- }
244
- if (typeof dims === "undefined") {
245
- dims = Array.from({ length: TensorMath.getShape(tA).length }, (_, index) => index);
246
- }
247
- dims = [...dims].sort((a, b) => b - a);
248
- let out = tA;
249
- for (const axis of dims) {
250
- out = TensorMath.sumAxis(out, axis);
251
- }
252
- return keepDims ? out : TensorMath.squeeze(out, dims);
253
- }
254
- static t(tA) {
255
- const shapeA = TensorMath.getShape(tA);
256
- if (shapeA.length !== 2)
257
- throw new Error("Input is not a matrix");
258
- const matA = tA;
259
- const matARows = matA.length;
260
- const matACols = matA[0].length;
261
- const matATranspose = Array.from({ length: matACols }, () => new Array(matARows).fill(0));
262
- for (let i = 0; i < matARows; i++) {
263
- for (let j = 0; j < matACols; j++) {
264
- matATranspose[j][i] = matA[i][j];
265
- }
266
- }
267
- return matATranspose;
268
- }
269
- static dot(tA, tB) {
270
- const shapeA = TensorMath.getShape(tA);
271
- const shapeB = TensorMath.getShape(tB);
272
- if (shapeA.length !== 1 || shapeB.length !== 1 || shapeA[0] !== shapeB[0])
273
- throw new Error("Inputs are not 1D tensors");
274
- const vectLen = shapeA[0];
275
- const vectA = tA;
276
- const vectB = tB;
277
- let sum = 0;
278
- for (let index = 0; index < vectLen; index++) {
279
- sum += vectA[index] * vectB[index];
280
- }
281
- return sum;
282
- }
283
- static mm(tA, tB) {
284
- const shapeA = TensorMath.getShape(tA);
285
- const shapeB = TensorMath.getShape(tB);
286
- if (shapeA.length !== 2 || shapeB.length !== 2)
287
- throw new Error("Inputs are not matrices");
288
- const matA = tA;
289
- const matB = tB;
290
- const matARows = matA.length;
291
- const matACols = matA[0].length;
292
- const matBRows = matB.length;
293
- const matBCols = matB[0].length;
294
- if (matACols !== matBRows)
295
- throw new Error("Invalid matrices shape for multiplication");
296
- const matC = Array.from({ length: matARows }, () => new Array(matBCols).fill(0));
297
- for (let i = 0; i < matARows; i++) {
298
- for (let j = 0; j < matBCols; j++) {
299
- for (let k = 0; k < matACols; k++) {
300
- matC[i][j] += matA[i][k] * matB[k][j];
301
- }
302
- }
303
- }
304
- return matC;
305
- }
306
- static mv(tA, tB) {
307
- const shapeA = TensorMath.getShape(tA);
308
- const shapeB = TensorMath.getShape(tB);
309
- if (shapeA.length !== 2 || shapeB.length !== 1)
310
- throw new Error("Input is not a 2D and 1D tensor pair");
311
- const matA = tA;
312
- const matB = tB.map(el => [el]); // Turn the 1D tensor into a nx1 matrix (vector)
313
- return TensorMath.mm(matA, matB).map(el => el[0]);
314
- }
315
- static matmul(tA, tB) {
316
- const shapeA = TensorMath.getShape(tA);
317
- const shapeB = TensorMath.getShape(tB);
318
- if (shapeA.length === 1 && shapeB.length === 1) {
319
- return TensorMath.dot(tA, tB);
320
- }
321
- else if (shapeA.length === 1 && shapeB.length === 2) {
322
- return TensorMath.mm([tA], tB)[0];
323
- }
324
- else if (shapeA.length === 2 && shapeB.length === 1) {
325
- return TensorMath.mv(tA, tB);
326
- }
327
- else if (shapeA.length === 2 && shapeB.length === 2) {
328
- return TensorMath.mm(tA, tB);
329
- }
330
- // Batched matmul will come when general nD transpose is done
331
- throw new Error(`Shapes [] and [] are not supported`);
332
- }
333
- }
334
- exports.TensorMath = TensorMath;