mini-jstorch 1.1.9 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -1
- package/engine/MainEngine.js +185 -124
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
## Mini-JSTorch ##
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
## SORRY GUYS FOR THE DISTURB BECAUSE VERSION 1.1.9 I FORGOT PLACE THE FULL SYSTEM FOR THE UPDATE NOT PLACED ON MAINENGINE BUT AT EXPERIMENTS FILES
|
|
4
|
+
|
|
5
|
+
# IMPORTANT!
|
|
4
6
|
|
|
5
7
|
This Module will not gonna Run *Well* In at GPU or a Backend.
|
|
6
8
|
We're will gonna *Optimize* So all User can still use this Module.
|
|
@@ -128,3 +130,4 @@ inputs.forEach(inp => {
|
|
|
128
130
|
- **This module is implemented entirely in pure JavaScript.**
|
|
129
131
|
- **The `Dummy` folder contains modules used for development, testing, and debugging before integration into the main engine.**
|
|
130
132
|
- **This module was created by a `single` developer.**
|
|
133
|
+
- **Date 23 Would be `CRAZY`.**
|
package/engine/MainEngine.js
CHANGED
|
@@ -1,148 +1,209 @@
|
|
|
1
|
-
// ================================
|
|
2
|
-
// MINI JS AI ENGINE v1
|
|
3
|
-
// ================================
|
|
4
1
|
|
|
5
|
-
//
|
|
6
|
-
|
|
7
|
-
|
|
2
|
+
// MAINENGINE FILES [PACK IN AT ONE FILES]
|
|
3
|
+
// CURRENT VERSIONS: 0.0.4
|
|
4
|
+
// AUTHOR: Rizal
|
|
5
|
+
// LICENSE: MIT(R)
|
|
8
6
|
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
const transpose = m => m[0].map((_,i)=>m.map(row=>row[i]));
|
|
7
|
+
// Utils
|
|
8
|
+
export function zeros(rows, cols) {
|
|
9
|
+
return Array.from({length: rows}, () => Array(cols).fill(0));
|
|
10
|
+
}
|
|
14
11
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
};
|
|
21
|
-
const dActivations = {
|
|
22
|
-
relu: x => x.map(v=>v>0?1:0),
|
|
23
|
-
linear: x => x.map(_=>1),
|
|
24
|
-
leakyRelu: (x, alpha=0.01) => x.map(v=>v>0?1:alpha)
|
|
25
|
-
};
|
|
26
|
-
|
|
27
|
-
// Dense layer with manual grad & auto-grad
|
|
28
|
-
class Dense {
|
|
29
|
-
constructor(inputSize, outputSize, activation='linear'){
|
|
30
|
-
this.inputSize = inputSize;
|
|
31
|
-
this.outputSize = outputSize;
|
|
32
|
-
this.activation = activation;
|
|
33
|
-
this.W = Array.from({length:inputSize},()=>Array.from({length:outputSize},()=>randn()*Math.sqrt(2/inputSize)));
|
|
34
|
-
this.b = Array(outputSize).fill(0);
|
|
35
|
-
|
|
36
|
-
// Adam variables
|
|
37
|
-
this.mW = mulScalar(this.W,0);
|
|
38
|
-
this.vW = mulScalar(this.W,0);
|
|
39
|
-
this.mb = Array(outputSize).fill(0);
|
|
40
|
-
this.vb = Array(outputSize).fill(0);
|
|
41
|
-
this.lastInput = null;
|
|
42
|
-
this.lastOutput = null;
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
forward(X){
|
|
46
|
-
this.lastInput = X;
|
|
47
|
-
let output = dot(X,this.W);
|
|
48
|
-
output = output.map((row,i)=>row.map((v,j)=>v+this.b[j]));
|
|
49
|
-
this.lastOutput = output.map(row => Activations[this.activation](row));
|
|
50
|
-
return this.lastOutput;
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
backward(dLoss, lr=0.001, beta1=0.9, beta2=0.999, t=1){
|
|
54
|
-
const flatOut = this.lastOutput.flat();
|
|
55
|
-
const actGrad = dActivations[this.activation](flatOut);
|
|
56
|
-
const dOut = dLoss.flat().map((v,i)=>v*actGrad[i]);
|
|
57
|
-
|
|
58
|
-
const gradW = Array.from({length:this.inputSize},()=>Array(this.outputSize).fill(0));
|
|
59
|
-
const gradB = Array(this.outputSize).fill(0);
|
|
60
|
-
|
|
61
|
-
for(let k=0;k<this.lastInput.length;k++){
|
|
62
|
-
for(let i=0;i<this.inputSize;i++){
|
|
63
|
-
for(let j=0;j<this.outputSize;j++){
|
|
64
|
-
gradW[i][j] += this.lastInput[k][i]*dOut[j]/this.lastInput.length;
|
|
65
|
-
}
|
|
66
|
-
}
|
|
67
|
-
}
|
|
68
|
-
for(let j=0;j<this.outputSize;j++) gradB[j] = dOut[j]/this.lastInput.length;
|
|
69
|
-
|
|
70
|
-
// Adam update with bias correction
|
|
71
|
-
for(let i=0;i<this.inputSize;i++){
|
|
72
|
-
for(let j=0;j<this.outputSize;j++){
|
|
73
|
-
this.mW[i][j] = beta1*this.mW[i][j]+(1-beta1)*gradW[i][j];
|
|
74
|
-
this.vW[i][j] = beta2*this.vW[i][j]+(1-beta2)*gradW[i][j]*gradW[i][j];
|
|
75
|
-
const mHat = this.mW[i][j]/(1-Math.pow(beta1,t));
|
|
76
|
-
const vHat = this.vW[i][j]/(1-Math.pow(beta2,t));
|
|
77
|
-
this.W[i][j] -= lr*mHat/(Math.sqrt(vHat)+1e-8);
|
|
78
|
-
}
|
|
79
|
-
}
|
|
12
|
+
export function randomMatrix(rows, cols, scale=0.1) {
|
|
13
|
+
return Array.from({length: rows}, () =>
|
|
14
|
+
Array.from({length: cols}, () => (Math.random()*2-1)*scale)
|
|
15
|
+
);
|
|
16
|
+
}
|
|
80
17
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
}
|
|
18
|
+
export function softmax(x) {
|
|
19
|
+
const maxVal = Math.max(...x);
|
|
20
|
+
const exps = x.map(v => Math.exp(v - maxVal));
|
|
21
|
+
const sumExps = exps.reduce((a,b)=>a+b, 0);
|
|
22
|
+
return exps.map(v => v / sumExps);
|
|
23
|
+
}
|
|
88
24
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
25
|
+
export function crossEntropy(pred, target) {
|
|
26
|
+
const eps = 1e-12;
|
|
27
|
+
return -target.reduce((sum, t, i) => sum + t * Math.log(pred[i] + eps), 0);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export function addMatrices(a, b) {
|
|
31
|
+
return a.map((row, i) =>
|
|
32
|
+
row.map((v, j) => v + (b[i] && b[i][j] !== undefined ? b[i][j] : 0))
|
|
33
|
+
);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export function dot(a, b) {
|
|
37
|
+
const result = zeros(a.length, b[0].length);
|
|
38
|
+
for (let i=0;i<a.length;i++) {
|
|
39
|
+
for (let j=0;j<b[0].length;j++) {
|
|
40
|
+
let sum=0;
|
|
41
|
+
for (let k=0;k<a[0].length;k++) sum += a[i][k]*b[k][j];
|
|
42
|
+
result[i][j]=sum;
|
|
97
43
|
}
|
|
98
|
-
return dNext;
|
|
99
44
|
}
|
|
45
|
+
return result;
|
|
100
46
|
}
|
|
101
47
|
|
|
102
|
-
//
|
|
103
|
-
class
|
|
104
|
-
constructor(
|
|
105
|
-
this.
|
|
106
|
-
this.
|
|
48
|
+
// Layers
|
|
49
|
+
export class Linear {
|
|
50
|
+
constructor(inputDim, outputDim) {
|
|
51
|
+
this.W = randomMatrix(inputDim, outputDim);
|
|
52
|
+
this.b = Array(outputDim).fill(0);
|
|
53
|
+
this.gradW = zeros(inputDim, outputDim);
|
|
54
|
+
this.gradb = Array(outputDim).fill(0);
|
|
55
|
+
this.x = null;
|
|
107
56
|
}
|
|
108
57
|
|
|
109
|
-
forward(
|
|
110
|
-
|
|
58
|
+
forward(x) {
|
|
59
|
+
this.x = x;
|
|
60
|
+
const out = dot(x, this.W);
|
|
61
|
+
return out.map((row,i) => row.map((v,j)=>v+this.b[j]));
|
|
111
62
|
}
|
|
112
63
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
64
|
+
backward(grad) {
|
|
65
|
+
// grad shape: batch x outputDim
|
|
66
|
+
for (let i=0;i<this.W.length;i++)
|
|
67
|
+
for (let j=0;j<this.W[0].length;j++)
|
|
68
|
+
this.gradW[i][j] = this.x.reduce((sum,row,k)=>sum+row[i]*grad[k][j],0);
|
|
69
|
+
|
|
70
|
+
for (let j=0;j<this.b.length;j++)
|
|
71
|
+
this.gradb[j] = grad.reduce((sum,row)=>sum+row[j],0);
|
|
72
|
+
|
|
73
|
+
// propagate to input
|
|
74
|
+
const gradInput = zeros(this.x.length, this.W.length);
|
|
75
|
+
for (let i=0;i<this.x.length;i++)
|
|
76
|
+
for (let j=0;j<this.W.length;j++)
|
|
77
|
+
for (let k=0;k<this.W[0].length;k++)
|
|
78
|
+
gradInput[i][j]+=grad[i][k]*this.W[j][k];
|
|
79
|
+
return gradInput;
|
|
80
|
+
}
|
|
117
81
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
}
|
|
82
|
+
parameters() {
|
|
83
|
+
return [
|
|
84
|
+
{param: this.W, grad: this.gradW},
|
|
85
|
+
{param: [this.b], grad: [this.gradb]} // wrap b in array for consistency
|
|
86
|
+
];
|
|
87
|
+
}
|
|
88
|
+
}
|
|
122
89
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
90
|
+
// Activations
|
|
91
|
+
export class ReLU {
|
|
92
|
+
constructor() { this.out=null; }
|
|
93
|
+
forward(x) {
|
|
94
|
+
this.out = Array.isArray(x[0]) ? x.map(r=>r.map(v=>Math.max(0,v))) : x.map(v=>Math.max(0,v));
|
|
95
|
+
return this.out;
|
|
96
|
+
}
|
|
97
|
+
backward(grad) {
|
|
98
|
+
return Array.isArray(grad[0])
|
|
99
|
+
? grad.map((r,i)=>r.map((v,j)=>v*(this.out[i][j]>0?1:0)))
|
|
100
|
+
: grad.map((v,i)=>v*(this.out[i]>0?1:0));
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
export class Sigmoid {
|
|
105
|
+
constructor() { this.out=null; }
|
|
106
|
+
forward(x) {
|
|
107
|
+
const sigmoidFn = v=>1/(1+Math.exp(-v));
|
|
108
|
+
this.out = Array.isArray(x[0]) ? x.map(r=>r.map(sigmoidFn)) : x.map(sigmoidFn);
|
|
109
|
+
return this.out;
|
|
110
|
+
}
|
|
111
|
+
backward(grad) {
|
|
112
|
+
return Array.isArray(grad[0])
|
|
113
|
+
? grad.map((r,i)=>r.map((v,j)=>v*this.out[i][j]*(1-this.out[i][j])))
|
|
114
|
+
: grad.map((v,i)=>v*this.out[i]*(1-this.out[i]));
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Loss wrapper
|
|
119
|
+
export class CrossEntropyLoss {
|
|
120
|
+
forward(pred, target) {
|
|
121
|
+
// pred: batch x classes, target: batch x classes
|
|
122
|
+
const losses = pred.map((p,i)=>crossEntropy(softmax(p), target[i]));
|
|
123
|
+
this.pred = pred;
|
|
124
|
+
this.target = target;
|
|
125
|
+
return losses.reduce((a,b)=>a+b,0)/pred.length;
|
|
126
126
|
}
|
|
127
127
|
|
|
128
|
-
|
|
129
|
-
|
|
128
|
+
backward() {
|
|
129
|
+
// gradient of softmax + CE
|
|
130
|
+
const grad = [];
|
|
131
|
+
for (let i=0;i<this.pred.length;i++) {
|
|
132
|
+
const s = softmax(this.pred[i]);
|
|
133
|
+
grad.push(s.map((v,j)=>v-this.target[i][j]));
|
|
134
|
+
}
|
|
135
|
+
return grad.map(r=>r.map(v=>v/this.pred.length));
|
|
130
136
|
}
|
|
131
137
|
}
|
|
132
138
|
|
|
133
|
-
//
|
|
134
|
-
|
|
135
|
-
|
|
139
|
+
// Optimizer: Adam
|
|
140
|
+
export class Adam {
|
|
141
|
+
constructor(params, lr=0.001, beta1=0.9, beta2=0.999, eps=1e-8) {
|
|
142
|
+
this.params = params;
|
|
143
|
+
this.lr = lr;
|
|
144
|
+
this.beta1 = beta1;
|
|
145
|
+
this.beta2 = beta2;
|
|
146
|
+
this.eps = eps;
|
|
147
|
+
this.m = params.map(p=>zeros(p.param.length, p.param[0].length || 1));
|
|
148
|
+
this.v = params.map(p=>zeros(p.param.length, p.param[0].length || 1));
|
|
149
|
+
this.t=0;
|
|
150
|
+
}
|
|
136
151
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
]);
|
|
152
|
+
step() {
|
|
153
|
+
this.t++;
|
|
154
|
+
this.params.forEach((p,idx)=>{
|
|
155
|
+
for (let i=0;i<p.param.length;i++){
|
|
156
|
+
for (let j=0;j<(p.param[0].length||1);j++){
|
|
157
|
+
const g = p.grad[i][j];
|
|
158
|
+
this.m[idx][i][j] = this.beta1*this.m[idx][i][j] + (1-this.beta1)*g;
|
|
159
|
+
this.v[idx][i][j] = this.beta2*this.v[idx][i][j] + (1-this.beta2)*g*g;
|
|
160
|
+
const mHat = this.m[idx][i][j]/(1-Math.pow(this.beta1,this.t));
|
|
161
|
+
const vHat = this.v[idx][i][j]/(1-Math.pow(this.beta2,this.t));
|
|
162
|
+
p.param[i][j]-= this.lr*mHat/(Math.sqrt(vHat)+this.eps);
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
});
|
|
166
|
+
}
|
|
167
|
+
}
|
|
142
168
|
|
|
143
|
-
|
|
144
|
-
|
|
169
|
+
// Sequential container
|
|
170
|
+
export class Sequential {
|
|
171
|
+
constructor(layers=[]) { this.layers=layers; }
|
|
172
|
+
forward(x) {
|
|
173
|
+
return this.layers.reduce((input, layer)=>layer.forward(input), x);
|
|
174
|
+
}
|
|
175
|
+
backward(grad) {
|
|
176
|
+
return this.layers.reduceRight((g,layer)=>layer.backward(g), grad);
|
|
177
|
+
}
|
|
178
|
+
parameters() {
|
|
179
|
+
return this.layers.flatMap(l=>l.parameters ? l.parameters() : []);
|
|
180
|
+
}
|
|
181
|
+
}
|
|
145
182
|
|
|
146
|
-
|
|
183
|
+
// Tambahin ke akhir file utama mini-jstorch
|
|
184
|
+
export function saveModel(model) {
|
|
185
|
+
if (!(model instanceof Sequential)) {
|
|
186
|
+
throw new Error("saveModel only supports Sequential models");
|
|
187
|
+
}
|
|
188
|
+
const weights = model.layers.map(layer => ({
|
|
189
|
+
weights: layer.W ? layer.W : null,
|
|
190
|
+
biases: layer.b ? layer.b : null
|
|
191
|
+
}));
|
|
192
|
+
return JSON.stringify(weights);
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
export function loadModel(model, json) {
|
|
196
|
+
if (!(model instanceof Sequential)) {
|
|
197
|
+
throw new Error("loadModel only supports Sequential models");
|
|
198
|
+
}
|
|
199
|
+
const weights = JSON.parse(json);
|
|
200
|
+
model.layers.forEach((layer, i) => {
|
|
201
|
+
if (layer.W && weights[i].weights) {
|
|
202
|
+
layer.W = weights[i].weights;
|
|
203
|
+
}
|
|
204
|
+
if (layer.b && weights[i].biases) {
|
|
205
|
+
layer.b = weights[i].biases;
|
|
206
|
+
}
|
|
207
|
+
});
|
|
208
|
+
}
|
|
147
209
|
|
|
148
|
-
console.log('Prediction [7,8]:', model.predict([[7,8]]));
|
package/package.json
CHANGED