mini-jstorch 1.4.4 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -6,30 +6,21 @@ A lightweight JavaScript neural network library for rapid frontend AI experiment
6
6
 
7
7
  Mini-JSTorch is a high-performance, minimalist JavaScript library for building neural networks. It runs efficiently in both frontend and backend environments, including low-end devices. The library enables quick experimentation and learning in AI without compromising stability, accuracy, or training reliability.
8
8
 
9
- This release, **version 1.4.4**, We make `Matrix Utils` now can be used in others Files.
9
+ This release, **version 1.5.0:** Adds user-friendly tensor functions, Flatten layer, and improved Conv2D operations and Modify Some Class
10
+ For Architecture Compability.
10
11
 
11
12
  ---
12
13
 
13
- ## Feature Highlights
14
-
15
- - **Learning Rate Schedulers:** New `StepLR` and `LambdaLR` for dynamic optimizer learning rate adjustment.
16
- - **Full Conv2D support:** Forward and backward operations for convolutional layers.
17
- - **Tensor operations:** Broadcasting, reshaping, and reduction utilities.
18
- - **Advanced Activations:** Includes `LeakyReLU`, `GELU`, `Mish`, `SiLU`, `ELU`, and more.
19
- - **Optimizers:** `Adam` and `SGD` with gradient updates.
20
- - **Dropout Layer:** For regularization during training.
21
- - **BatchNorm2D:** For stable training in convolutional models.
22
- - **Tensor Manipulation:** Utilities like `flatten`, `stack`, `concat`, `eye`, `reshape`.
23
- - **Model Save & Load:** Easy persistence and restore of models.
24
- - **Test/Demo Templates:** The `tests/` folder provides ready-to-run examples for model building and feature usage.
25
- - **Performance Optimized:** Suitable for both frontend and backend usage.
26
- - **Backward Compatibility:** Maintained for core layers and activations.
14
+ ## New Features Highlights
15
+
16
+ - **User-Friendly Tensor API:** New `fu_` functions (`fu_tensor`, `fu_add`, `fu_matmul`, etc.) with automatic validation and shape checking
17
+ - **Flatten Layer:** Essential for connecting CNN architectures to dense layers.
27
18
 
28
19
  ---
29
20
 
30
21
  ## Core Features
31
22
 
32
- - **Layers:** Linear, Conv2D
23
+ - **Layers:** Linear, Flatten, Conv2D
33
24
  - **Activations:** ReLU, Sigmoid, Tanh, LeakyReLU, GELU, Mish, SiLU, ELU
34
25
  - **Loss Functions:** MSELoss, CrossEntropyLoss
35
26
  - **Optimizers:** Adam, SGD
@@ -52,7 +43,7 @@ npm install mini-jstorch
52
43
  ## Quick Start Example
53
44
 
54
45
  ```javascript
55
- import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam, StepLR } from 'mini-jstorch';
46
+ import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam, StepLR } from './jstorch.js';
56
47
 
57
48
  // Build model
58
49
  const model = new Sequential([
@@ -99,7 +90,7 @@ predTest.forEach((p,i) => {
99
90
  ## Save & Load Models
100
91
 
101
92
  ```javascript
102
- import { saveModel, loadModel, Sequential } from './src/MainEngine.js';
93
+ import { saveModel, loadModel, Sequential } from '.jstorch.js';
103
94
 
104
95
  const json = saveModel(model);
105
96
  const model2 = new Sequential([...]); // same architecture
package/index.js CHANGED
@@ -1,2 +1,2 @@
1
1
  // package root
2
- export * from "./src/MainEngine.js";
2
+ export * from "./src/jstorch.js";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mini-jstorch",
3
- "version": "1.4.4",
3
+ "version": "1.5.0",
4
4
  "type": "module",
5
5
  "description": "A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.",
6
6
  "main": "index.js",
@@ -70,8 +70,160 @@ export function crossEntropy(pred,target){
70
70
  const eps=1e-12;
71
71
  return -target.reduce((sum,t,i)=>sum+t*Math.log(pred[i]+eps),0);
72
72
  }
73
- /* Not Added more Utils yet. (this patch not MINOR)
74
- Just make it Public to other files. */
73
+
74
+ // ---------------------- USERS FRIENDLY UTILS ----------------
75
+ export function fu_tensor(data, requiresGrad = false) {
76
+ if (!Array.isArray(data) || !Array.isArray(data[0])) {
77
+ throw new Error("fu_tensor: Data must be 2D array");
78
+ }
79
+ const tensor = new Tensor(data);
80
+ tensor.requiresGrad = requiresGrad;
81
+ return tensor;
82
+ }
83
+
84
+ // fu_add
85
+ export function fu_add(a, b) {
86
+ if (!(a instanceof Tensor) && !(b instanceof Tensor)) {
87
+ throw new Error("fu_add: At least one operand must be Tensor");
88
+ }
89
+
90
+ if (!(a instanceof Tensor)) {
91
+ a = fu_tensor(Array(b.shape()[0]).fill().map(() =>
92
+ Array(b.shape()[1]).fill(a)
93
+ ));
94
+ }
95
+
96
+ if (!(b instanceof Tensor)) {
97
+ b = fu_tensor(Array(a.shape()[0]).fill().map(() =>
98
+ Array(a.shape()[1]).fill(b)
99
+ ));
100
+ }
101
+
102
+ if (a.shape()[0] !== b.shape()[0] || a.shape()[1] !== b.shape()[1]) {
103
+ throw new Error(`fu_add: Shape mismatch ${a.shape()} vs ${b.shape()}`);
104
+ }
105
+
106
+ return new Tensor(a.data.map((r, i) => r.map((v, j) => v + b.data[i][j])));
107
+ }
108
+
109
+ // fu_mul
110
+ export function fu_mul(a, b) {
111
+ if (!(a instanceof Tensor) && !(b instanceof Tensor)) {
112
+ throw new Error("fu_mul: At least one operand must be Tensor");
113
+ }
114
+
115
+ if (!(a instanceof Tensor)) {
116
+ a = fu_tensor(Array(b.shape()[0]).fill().map(() =>
117
+ Array(b.shape()[1]).fill(a)
118
+ ));
119
+ }
120
+
121
+ if (!(b instanceof Tensor)) {
122
+ b = fu_tensor(Array(a.shape()[0]).fill().map(() =>
123
+ Array(a.shape()[1]).fill(b)
124
+ ));
125
+ }
126
+
127
+ if (a.shape()[0] !== b.shape()[0] || a.shape()[1] !== b.shape()[1]) {
128
+ throw new Error(`fu_mul: Shape mismatch ${a.shape()} vs ${b.shape()}`);
129
+ }
130
+
131
+ return new Tensor(a.data.map((r, i) => r.map((v, j) => v * b.data[i][j])));
132
+ }
133
+
134
+ // fu_matmul
135
+ export function fu_matmul(a, b) {
136
+ if (!(a instanceof Tensor)) a = fu_tensor(a);
137
+ if (!(b instanceof Tensor)) b = fu_tensor(b);
138
+
139
+ if (a.shape()[1] !== b.shape()[0]) {
140
+ throw new Error(`fu_matmul: Inner dimension mismatch ${a.shape()[1]} vs ${b.shape()[0]}`);
141
+ }
142
+
143
+ return new Tensor(dot(a.data, b.data));
144
+ }
145
+
146
+ // fu_sum
147
+ export function fu_sum(tensor) {
148
+ if (!(tensor instanceof Tensor)) tensor = fu_tensor(tensor);
149
+ const total = tensor.data.flat().reduce((a, b) => a + b, 0);
150
+ return new Tensor([[total]]);
151
+ }
152
+
153
+ // fu_mean
154
+ export function fu_mean(tensor) {
155
+ if (!(tensor instanceof Tensor)) tensor = fu_tensor(tensor);
156
+ const totalElements = tensor.shape()[0] * tensor.shape()[1];
157
+ const sum = fu_sum(tensor).data[0][0];
158
+ return new Tensor([[sum / totalElements]]);
159
+ }
160
+
161
+ // fu_relu
162
+ export function fu_relu(tensor) {
163
+ if (!(tensor instanceof Tensor)) tensor = fu_tensor(tensor);
164
+ return new Tensor(tensor.data.map(r => r.map(v => Math.max(0, v))));
165
+ }
166
+
167
+ // fu_sigmoid
168
+ export function fu_sigmoid(tensor) {
169
+ if (!(tensor instanceof Tensor)) tensor = fu_tensor(tensor);
170
+ const fn = v => 1 / (1 + Math.exp(-v));
171
+ return new Tensor(tensor.data.map(r => r.map(fn)));
172
+ }
173
+
174
+ // fu_tanh
175
+ export function fu_tanh(tensor) {
176
+ if (!(tensor instanceof Tensor)) tensor = fu_tensor(tensor);
177
+ return new Tensor(tensor.data.map(r => r.map(v => Math.tanh(v))));
178
+ }
179
+
180
+ // fu_softmax
181
+ export function fu_softmax(tensor) {
182
+ if (!(tensor instanceof Tensor)) tensor = fu_tensor(tensor);
183
+ const result = tensor.data.map(row => {
184
+ const maxVal = Math.max(...row);
185
+ const exps = row.map(v => Math.exp(v - maxVal));
186
+ const sumExps = exps.reduce((a, b) => a + b, 0);
187
+ return exps.map(v => v / sumExps);
188
+ });
189
+ return new Tensor(result);
190
+ }
191
+
192
+ // fu_flatten - Flatten tensor to 1D
193
+ export function fu_flatten(tensor) {
194
+ if (!(tensor instanceof Tensor)) tensor = fu_tensor(tensor);
195
+ return new Tensor([tensor.data.flat()]);
196
+ }
197
+
198
+ // fu_reshape
199
+ export function fu_reshape(tensor, rows, cols) {
200
+ if (!(tensor instanceof Tensor)) tensor = fu_tensor(tensor);
201
+ const flat = tensor.data.flat();
202
+ if (flat.length !== rows * cols) {
203
+ throw new Error(`fu_reshape: Size mismatch ${flat.length} vs ${rows * cols}`);
204
+ }
205
+
206
+ const result = [];
207
+ for (let i = 0; i < rows; i++) {
208
+ result.push(flat.slice(i * cols, i * cols + cols));
209
+ }
210
+ return new Tensor(result);
211
+ }
212
+
213
+ // fu_stack
214
+ export function fu_stack(tensors) {
215
+ if (!tensors.every(t => t instanceof Tensor)) {
216
+ throw new Error("fu_stack: All inputs must be Tensors");
217
+ }
218
+
219
+ const firstShape = tensors[0].shape();
220
+ if (!tensors.every(t => t.shape()[0] === firstShape[0] && t.shape()[1] === firstShape[1])) {
221
+ throw new Error("fu_stack: All tensors must have same shape");
222
+ }
223
+
224
+ const stacked = tensors.map(t => t.data);
225
+ return new Tensor(stacked);
226
+ }
75
227
 
76
228
  // ---------------------- Tensor ----------------------
77
229
  export class Tensor {
@@ -121,47 +273,163 @@ export class Linear {
121
273
  parameters(){ return [ {param:this.W,grad:this.gradW}, {param:[this.b],grad:[this.gradb]} ]; }
122
274
  }
123
275
 
276
+ export class Flatten {
277
+ constructor() {
278
+ this.originalShape = null;
279
+ }
280
+
281
+ forward(x) {
282
+ // Always convert to [batch, features] format
283
+ this.originalShape = x.map(sample => this._getShape(sample));
284
+
285
+ return x.map(sample => {
286
+ const flat = this._flatten(sample);
287
+ return flat; // Return as 1D array for [batch, features] compatibility
288
+ });
289
+ }
290
+
291
+ backward(grad) {
292
+ // grad is [batch, features], reshape back to original shape
293
+ return grad.map((flatGrad, batchIdx) => {
294
+ const shape = this.originalShape[batchIdx];
295
+ return this._unflatten(flatGrad, shape);
296
+ });
297
+ }
298
+
299
+ _getShape(sample) {
300
+ if (Array.isArray(sample[0]) && Array.isArray(sample[0][0])) {
301
+ return {
302
+ type: '3d',
303
+ dims: [sample.length, sample[0].length, sample[0][0].length]
304
+ };
305
+ } else if (Array.isArray(sample[0])) {
306
+ return {
307
+ type: '2d',
308
+ dims: [sample.length, sample[0].length]
309
+ };
310
+ } else {
311
+ return {
312
+ type: '1d',
313
+ dims: [sample.length]
314
+ };
315
+ }
316
+ }
317
+
318
+ _flatten(sample) {
319
+ if (Array.isArray(sample[0]) && Array.isArray(sample[0][0])) {
320
+ return sample.flat(2); // [channels, height, width] -> flat
321
+ } else if (Array.isArray(sample[0])) {
322
+ return sample.flat(); // [height, width] -> flat
323
+ } else {
324
+ return sample; // already flat
325
+ }
326
+ }
327
+
328
+ _unflatten(flat, shape) {
329
+ if (shape.type === '3d') {
330
+ const [channels, height, width] = shape.dims;
331
+ const result = [];
332
+ let index = 0;
333
+ for (let c = 0; c < channels; c++) {
334
+ const channel = [];
335
+ for (let h = 0; h < height; h++) {
336
+ const row = [];
337
+ for (let w = 0; w < width; w++) {
338
+ row.push(flat[index++]);
339
+ }
340
+ channel.push(row);
341
+ }
342
+ result.push(channel);
343
+ }
344
+ return result;
345
+ } else if (shape.type === '2d') {
346
+ const [height, width] = shape.dims;
347
+ const result = [];
348
+ for (let h = 0; h < height; h++) {
349
+ result.push(flat.slice(h * width, h * width + width));
350
+ }
351
+ return result;
352
+ } else {
353
+ return flat; // 1d
354
+ }
355
+ }
356
+
357
+ parameters() { return []; }
358
+ }
359
+
124
360
  // ---------------------- Conv2D ----------------------
125
361
  export class Conv2D {
126
- constructor(inC,outC,kernel,stride=1,padding=0){
127
- this.inC=inC; this.outC=outC; this.kernel=kernel;
128
- this.stride=stride; this.padding=padding;
129
- this.W=Array(outC).fill(0).map(()=>Array(inC).fill(0).map(()=>randomMatrix(kernel,kernel)));
130
- this.gradW=Array(outC).fill(0).map(()=>Array(inC).fill(0).map(()=>zeros(kernel,kernel)));
131
- this.x=null;
362
+ constructor(inC, outC, kernel, stride=1, padding=0){
363
+ this.inC = inC;
364
+ this.outC = outC;
365
+ this.kernel = kernel;
366
+ this.stride = stride;
367
+ this.padding = padding;
368
+ this.W = Array(outC).fill().map(() =>
369
+ Array(inC).fill().map(() => randomMatrix(kernel, kernel))
370
+ );
371
+ this.gradW = Array(outC).fill().map(() =>
372
+ Array(inC).fill().map(() => zeros(kernel, kernel))
373
+ );
374
+ this.x = null;
132
375
  }
133
376
 
134
- pad2D(input,pad){
135
- return input.map(channel=>{
136
- const rows=channel.length+2*pad;
137
- const cols=channel[0].length+2*pad;
138
- const out=Array.from({length:rows},()=>Array(cols).fill(0));
139
- for(let i=0;i<channel.length;i++) for(let j=0;j<channel[0].length;j++) out[i+pad][j+pad]=channel[i][j];
140
- return out;
141
- });
377
+ pad2D(input, pad){
378
+ // Input is single channel [height, width]
379
+ if (!input || !input.length) return input;
380
+
381
+ const rows = input.length + 2 * pad;
382
+ const cols = input[0].length + 2 * pad;
383
+ const out = Array.from({length: rows}, () => Array(cols).fill(0));
384
+
385
+ for(let i = 0; i < input.length; i++) {
386
+ for(let j = 0; j < input[0].length; j++) {
387
+ out[i + pad][j + pad] = input[i][j];
388
+ }
389
+ }
390
+ return out;
142
391
  }
143
392
 
144
- conv2DSingle(input,kernel){
145
- const rows=input.length-kernel.length+1;
146
- const cols=input[0].length-kernel[0].length+1;
147
- const out=zeros(rows,cols);
148
- for(let i=0;i<rows;i++) for(let j=0;j<cols;j++)
149
- for(let ki=0;ki<kernel.length;ki++) for(let kj=0;kj<kernel[0].length;kj++)
150
- out[i][j]+=input[i+ki][j+kj]*kernel[ki][kj];
393
+ conv2DSingle(input, kernel) {
394
+ const rows = Math.floor((input.length - kernel.length) / this.stride) + 1;
395
+ const cols = Math.floor((input[0].length - kernel[0].length) / this.stride) + 1;
396
+ const out = zeros(rows, cols);
397
+
398
+ for(let i = 0; i < rows; i++) {
399
+ for(let j = 0; j < cols; j++) {
400
+ let sum = 0;
401
+ for(let ki = 0; ki < kernel.length; ki++) {
402
+ for(let kj = 0; kj < kernel[0].length; kj++) {
403
+ const inputRow = i * this.stride + ki;
404
+ const inputCol = j * this.stride + kj;
405
+ sum += input[inputRow][inputCol] * kernel[ki][kj];
406
+ }
407
+ }
408
+ out[i][j] = sum;
409
+ }
410
+ }
151
411
  return out;
152
412
  }
153
413
 
154
- forward(batch){
155
- this.x=batch;
156
- return batch.map(sample=>{
157
- const channelsOut=[];
158
- for(let oc=0;oc<this.outC;oc++){
159
- let outChan=zeros(sample[0].length,sample[0][0].length);
160
- for(let ic=0;ic<this.inC;ic++){
161
- let inputChan=sample[ic];
162
- if(this.padding>0) inputChan=this.pad2D([inputChan],this.padding)[0];
163
- const conv=this.conv2DSingle(inputChan,this.W[oc][ic]);
164
- outChan=addMatrices(outChan,conv);
414
+ forward(batch) {
415
+ this.x = batch;
416
+ return batch.map(sample => {
417
+ const channelsOut = [];
418
+ for(let oc = 0; oc < this.outC; oc++) {
419
+ let outChan = null;
420
+ for(let ic = 0; ic < this.inC; ic++) {
421
+ let inputChan = sample[ic];
422
+ if(this.padding > 0) {
423
+ inputChan = this.pad2D(inputChan, this.padding);
424
+ }
425
+
426
+ const conv = this.conv2DSingle(inputChan, this.W[oc][ic]);
427
+
428
+ if(outChan === null) {
429
+ outChan = conv;
430
+ } else {
431
+ outChan = addMatrices(outChan, conv);
432
+ }
165
433
  }
166
434
  channelsOut.push(outChan);
167
435
  }
@@ -170,46 +438,45 @@ export class Conv2D {
170
438
  }
171
439
 
172
440
  backward(grad) {
173
- const batchSize = this.x.length;
174
- const gradInput = this.x.map(sample => sample.map(chan => zeros(chan.length, chan[0].length)));
175
- const gradW = this.W.map(oc => oc.map(ic => zeros(this.kernel,this.kernel)));
176
-
177
- for (let b = 0; b < batchSize; b++) {
178
- const xPadded = this.pad2D(this.x[b], this.padding);
179
- const gradInputPadded = xPadded.map(chan => zeros(chan.length, chan[0].length));
180
-
181
- for (let oc = 0; oc < this.outC; oc++) {
182
- for (let ic = 0; ic < this.inC; ic++) {
183
- const outGrad = grad[b][oc];
184
- const inChan = xPadded[ic];
185
-
186
- // Compute gradW
187
- for (let i = 0; i < this.kernel; i++) {
188
- for (let j = 0; j < this.kernel; j++) {
189
- let sum = 0;
190
- for (let y = 0; y < outGrad.length; y++) {
191
- for (let x = 0; x < outGrad[0].length; x++) {
192
- const inY = y * this.stride + i;
193
- const inX = x * this.stride + j;
194
- if (inY < inChan.length && inX < inChan[0].length) {
195
- sum += inChan[inY][inX] * outGrad[y][x];
441
+ const batchSize = this.x.length;
442
+ const gradW = this.W.map(oc => oc.map(ic => zeros(this.kernel, this.kernel)));
443
+ const gradInput = this.x.map(sample =>
444
+ sample.map(chan => zeros(chan.length, chan[0].length))
445
+ );
446
+
447
+ for (let b = 0; b < batchSize; b++) {
448
+ for (let oc = 0; oc < this.outC; oc++) {
449
+ for (let ic = 0; ic < this.inC; ic++) {
450
+ const outGrad = grad[b][oc];
451
+
452
+ // Compute gradW
453
+ for (let i = 0; i < this.kernel; i++) {
454
+ for (let j = 0; j < this.kernel; j++) {
455
+ let sum = 0;
456
+ for (let y = 0; y < outGrad.length; y++) {
457
+ for (let x = 0; x < outGrad[0].length; x++) {
458
+ const inY = y * this.stride + i;
459
+ const inX = x * this.stride + j;
460
+ if (inY < this.x[b][ic].length && inX < this.x[b][ic][0].length) {
461
+ sum += this.x[b][ic][inY][inX] * outGrad[y][x];
462
+ }
196
463
  }
197
464
  }
465
+ gradW[oc][ic][i][j] += sum;
198
466
  }
199
- gradW[oc][ic][i][j] += sum;
200
467
  }
201
- }
202
468
 
203
- // Compute gradInput
204
- const flippedKernel = this.W[oc][ic].map(row => [...row].reverse()).reverse();
205
- for (let y = 0; y < outGrad.length; y++) {
206
- for (let x = 0; x < outGrad[0].length; x++) {
207
- for (let i = 0; i < this.kernel; i++) {
208
- for (let j = 0; j < this.kernel; j++) {
209
- const inY = y * this.stride + i;
210
- const inX = x * this.stride + j;
211
- if (inY < gradInputPadded[ic].length && inX < gradInputPadded[ic][0].length) {
212
- gradInputPadded[ic][inY][inX] += flippedKernel[i][j] * outGrad[y][x];
469
+ // Compute gradInput
470
+ for (let y = 0; y < outGrad.length; y++) {
471
+ for (let x = 0; x < outGrad[0].length; x++) {
472
+ for (let ki = 0; ki < this.kernel; ki++) {
473
+ for (let kj = 0; kj < this.kernel; kj++) {
474
+ const inY = y * this.stride + ki;
475
+ const inX = x * this.stride + kj;
476
+ if (inY < gradInput[b][ic].length && inX < gradInput[b][ic][0].length) {
477
+ gradInput[b][ic][inY][inX] +=
478
+ this.W[oc][ic][ki][kj] * outGrad[y][x];
479
+ }
213
480
  }
214
481
  }
215
482
  }
@@ -218,24 +485,18 @@ export class Conv2D {
218
485
  }
219
486
  }
220
487
 
221
- // Remove padding from gradInput
222
- if (this.padding > 0) {
223
- for (let ic = 0; ic < this.inC; ic++) {
224
- const padded = gradInputPadded[ic];
225
- const cropped = padded.slice(this.padding, padded.length - this.padding)
226
- .map(row => row.slice(this.padding, row.length - this.padding));
227
- gradInput[b][ic] = cropped;
228
- }
229
- } else {
230
- for (let ic = 0; ic < this.inC; ic++) gradInput[b][ic] = gradInputPadded[ic];
231
- }
488
+ this.gradW = gradW;
489
+ return gradInput;
232
490
  }
233
491
 
234
- this.gradW = gradW;
235
- return gradInput;
236
- }
237
-
238
- parameters(){ return this.W.flatMap((w,oc)=>w.map((wc,ic)=>({param:wc,grad:this.gradW[oc][ic]}))); }
492
+ parameters() {
493
+ return this.W.flatMap((w, oc) =>
494
+ w.map((wc, ic) => ({
495
+ param: wc,
496
+ grad: this.gradW[oc][ic]
497
+ }))
498
+ );
499
+ }
239
500
  }
240
501
 
241
502
  // ---------------------- Sequential ----------------------
@@ -247,7 +508,39 @@ export class Sequential {
247
508
  }
248
509
 
249
510
  // ---------------------- Activations ----------------------
250
- export class ReLU{ constructor(){ this.out=null; } forward(x){ this.out=x.map(r=>r.map(v=>Math.max(0,v))); return this.out; } backward(grad){ return grad.map((r,i)=>r.map((v,j)=>v*(this.out[i][j]>0?1:0))); } }
511
+ export class ReLU{
512
+ constructor(){ this.out=null; }
513
+
514
+ forward(x){
515
+ // Handle both [batch, features] and [batch, channels, height, width]
516
+ if (Array.isArray(x[0]) && Array.isArray(x[0][0]) && Array.isArray(x[0][0][0])) {
517
+ // [batch, channels, height, width]
518
+ this.out = x.map(batch =>
519
+ batch.flatMap(channel =>
520
+ channel.flatMap(row =>
521
+ row.map(v => Math.max(0, v))
522
+ )
523
+ )
524
+ );
525
+ } else {
526
+ // [batch, features] - existing behavior
527
+ this.out = x.map(r => r.map(v => Math.max(0, v)));
528
+ }
529
+ return this.out;
530
+ }
531
+
532
+ backward(grad){
533
+ // Gradient shape must match forward output shape
534
+ if (Array.isArray(grad[0]) && Array.isArray(grad[0][0])) {
535
+ // Standard [batch, features]
536
+ return grad.map((r, i) => r.map((v, j) => v * (this.out[i][j] > 0 ? 1 : 0)));
537
+ } else {
538
+ // return as is
539
+ return grad;
540
+ }
541
+ }
542
+ }
543
+
251
544
  export class Sigmoid{ constructor(){ this.out=null; } forward(x){ const fn=v=>1/(1+Math.exp(-v)); this.out=x.map(r=>r.map(fn)); return this.out; } backward(grad){ return grad.map((r,i)=>r.map((v,j)=>v*this.out[i][j]*(1-this.out[i][j]))); } }
252
545
  export class Tanh{ constructor(){ this.out=null; } forward(x){ this.out=x.map(r=>r.map(v=>Math.tanh(v))); return this.out; } backward(grad){ return grad.map((r,i)=>r.map((v,j)=>v*(1-this.out[i][j]**2))); } }
253
546
  export class LeakyReLU{ constructor(alpha=0.01){ this.alpha=alpha; this.out=null; } forward(x){ this.out=x.map(r=>r.map(v=>v>0?v:v*this.alpha)); return this.out; } backward(grad){ return grad.map((r,i)=>r.map((v,j)=>v*(this.out[i][j]>0?1:this.alpha))); } }
@@ -635,7 +928,7 @@ export function saveModel(model){
635
928
  if(!(model instanceof Sequential)) throw new Error("saveModel supports only Sequential");
636
929
  const weights=model.layers.map(layer=>({weights:layer.W||null,biases:layer.b||null}));
637
930
  return JSON.stringify(weights);
638
- /* Didn't expect this to work */
931
+ /* Didn't expect this to work /: */
639
932
  }
640
933
 
641
934
  export function loadModel(model,json){
@@ -645,7 +938,7 @@ export function loadModel(model,json){
645
938
  if(layer.W && weights[i].weights) layer.W=weights[i].weights;
646
939
  if(layer.b && weights[i].biases) layer.b=weights[i].biases;
647
940
  });
648
- /* Didn't expect this to work */
941
+ /* Didn't expect this to work /: */
649
942
  }
650
943
 
651
944
  // ---------------------- Advanced Utils ----------------------
@@ -1,38 +1,36 @@
1
- // Example: Build and run a simple neural network model using mini-jstorch
1
+ import { Sequential, Linear, ReLU, MSELoss, Adam, StepLR, Tanh } from '../src/jstorch.js';
2
2
 
3
- import { Sequential, Linear, ReLU, MSELoss, SGD, Tensor } from "../src/MainEngine.js";
4
-
5
- // Create dummy input and target data
6
- const input = new Tensor([[0.5, -1.0], [1.5, 2.0]]); // shape: [2,2]
7
- const target = new Tensor([[1.0, 0.0], [0.0, 1.0]]); // shape: [2,2]
8
-
9
- // Build a simple model: Linear -> ReLU -> Linear
10
3
  const model = new Sequential([
11
- new Linear(2, 4),
4
+ new Linear(2, 16),
5
+ new Tanh(),
6
+ new Linear(16, 8),
12
7
  new ReLU(),
13
- new Linear(4, 2)
8
+ new Linear(8, 1)
14
9
  ]);
15
10
 
16
- const criterion = new MSELoss();
17
- const optimizer = new SGD(model.parameters(), 0.01);
18
-
19
- // Forward pass
20
- const output = model.forward(input.data);
21
- console.log("Model output:", output);
11
+ const X = [[0,0], [0,1], [1,0], [1,1]];
12
+ const y = [[0], [1], [1], [0]];
22
13
 
23
- // Compute loss
24
- const loss = criterion.forward(output, target.data);
25
- console.log("Loss:", loss);
26
-
27
- // Backward pass
28
- const grad = criterion.backward();
29
- model.backward(grad);
30
-
31
- // Optimizer step
32
- optimizer.step();
33
- console.log("Parameters updated!");
34
-
35
- // Run again to show change
36
- const output2 = model.forward(input.data);
37
- const loss2 = criterion.forward(output2, target.data);
38
- console.log("New Loss:", loss2);
14
+ const criterion = new MSELoss();
15
+ const optimizer = new Adam(model.parameters(), 0.1);
16
+ const scheduler = new StepLR(optimizer, 25, 0.5); // LR * 0.5 every 25 epochs
17
+
18
+ console.log("Training Progress:");
19
+ for (let epoch = 0; epoch < 1000; epoch++) {
20
+ const pred = model.forward(X);
21
+ const loss = criterion.forward(pred, y);
22
+ const grad = criterion.backward();
23
+ model.backward(grad);
24
+ optimizer.step();
25
+ scheduler.step();
26
+
27
+ if (epoch % 100 === 0) {
28
+ console.log(`Epoch ${epoch}: Loss = ${loss.toFixed(6)}, LR = ${optimizer.lr.toFixed(6)}`);
29
+ }
30
+ }
31
+
32
+ console.log("\nFinal Predictions:");
33
+ const predictions = model.forward(X);
34
+ predictions.forEach((pred, i) => {
35
+ console.log(`Input: ${X[i]} -> ${pred[0].toFixed(4)} (target: ${y[i][0]})`);
36
+ });
@@ -0,0 +1,72 @@
1
+
2
+ import {
3
+ fu_tensor, fu_add, fu_mul, fu_matmul, fu_sum, fu_mean,
4
+ fu_relu, fu_sigmoid, fu_tanh, fu_softmax, fu_flatten, fu_reshape
5
+ } from '../src/jstorch.js';
6
+
7
+ function testAllFuFunctions() {
8
+ console.log("🧪 TESTING ALL FU_FUNCTIONS\n");
9
+
10
+ // Test 1: fu_tensor
11
+ console.log("1. fu_tensor");
12
+ const t1 = fu_tensor([[1, 2], [3, 4]]);
13
+ console.log("✅", t1.data);
14
+
15
+ // Test 2: fu_add
16
+ console.log("\n2. fu_add");
17
+ const a = fu_tensor([[1, 2]]);
18
+ const b = fu_tensor([[3, 4]]);
19
+ const c = fu_add(a, b);
20
+ console.log("✅", a.data, "+", b.data, "=", c.data);
21
+
22
+ // Test 3: fu_mul
23
+ console.log("\n3. fu_mul");
24
+ const d = fu_mul(a, b);
25
+ console.log("✅", a.data, "*", b.data, "=", d.data);
26
+
27
+ // Test 4: fu_matmul
28
+ console.log("\n4. fu_matmul");
29
+ const e = fu_tensor([[1, 2]]);
30
+ const f = fu_tensor([[3], [4]]);
31
+ const g = fu_matmul(e, f);
32
+ console.log("✅ matmul =", g.data);
33
+
34
+ // Test 5: fu_sum & fu_mean
35
+ console.log("\n5. fu_sum & fu_mean");
36
+ const h = fu_tensor([[1, 2], [3, 4]]);
37
+ const sum = fu_sum(h);
38
+ const mean = fu_mean(h);
39
+ console.log("✅ sum =", sum.data, "mean =", mean.data);
40
+
41
+ // Test 6: fu_relu
42
+ console.log("\n6. fu_relu");
43
+ const i = fu_tensor([[-1, 0], [1, 2]]);
44
+ const relu = fu_relu(i);
45
+ console.log("✅ relu =", relu.data);
46
+
47
+ // Test 7: fu_sigmoid
48
+ console.log("\n7. fu_sigmoid");
49
+ const sigmoid = fu_sigmoid(i);
50
+ console.log("✅ sigmoid =", sigmoid.data);
51
+
52
+ // Test 8: fu_tanh
53
+ console.log("\n8. fu_tanh");
54
+ const tanh = fu_tanh(i);
55
+ console.log("✅ tanh =", tanh.data);
56
+
57
+ // Test 9: fu_softmax
58
+ console.log("\n9. fu_softmax");
59
+ const j = fu_tensor([[1, 2, 3]]);
60
+ const softmax = fu_softmax(j);
61
+ console.log("✅ softmax =", softmax.data);
62
+
63
+ // Test 10: fu_flatten & fu_reshape
64
+ console.log("\n10. fu_flatten & fu_reshape");
65
+ const k = fu_tensor([[1, 2], [3, 4]]);
66
+ const flat = fu_flatten(k);
67
+ const reshaped = fu_reshape(flat, 1, 4);
68
+ console.log("✅ flatten =", flat.data);
69
+ console.log("✅ reshape =", reshaped.data);
70
+ }
71
+
72
+ testAllFuFunctions();
@@ -1,6 +1,6 @@
1
1
  // Example: Test learning rate schedulers (StepLR and LambdaLR) with mini-jstorch optimizers
2
2
 
3
- import { SGD, StepLR, LambdaLR, Tensor } from "../src/MainEngine.js";
3
+ import { SGD, StepLR, LambdaLR, Tensor } from "../src/jstorch.js";
4
4
 
5
5
  const param = { param: [[1, 2], [3, 4]], grad: [[0, 0], [0, 0]] };
6
6
  const optimizer = new SGD([param], 0.1);
package/src/startup.cpu DELETED
@@ -1,15 +0,0 @@
1
- // you can delete this files this files are not important for the engine runtime.
2
-
3
- e=run=[cpu[runtime]]
4
- devices=e.getdata[devices[5]]
5
- env.set.runtime('beta')
6
- env.rnt()
7
- env.set()
8
- env.register('vanilla',expe='Experiments.js',main='MainEngine.js',tgver=latest)
9
- resources=e.find(tag='resources')
10
- resources.ld(env)
11
- l=env.prog('asm')
12
- r=l.gv=[0xCAFEBABE]
13
- eng=env.load(register,r,'vanilla')
14
- eng.boot(env,r,'dp')
15
- eng.load(resources,runtime,devices)