mini-jstorch 1.3.2 → 1.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,39 +1,42 @@
1
1
  # Mini-JSTorch
2
2
 
3
- A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.
3
+ A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices, inspired by PyTorch.
4
4
 
5
5
  ## Overview
6
6
 
7
- Mini-JSTorch is a lightweight, high-performance JavaScript library for building neural networks that runs efficiently in both frontend and backend environments, including low-end devices. The library enables experimentation and learning in AI without compromising stability, accuracy, or training reliability.
7
+ Mini-JSTorch is a high-performance, minimalist JavaScript library for building neural networks. It runs efficiently in both frontend and backend environments, including low-end devices. The library enables quick experimentation and learning in AI without compromising stability, accuracy, or training reliability.
8
8
 
9
- This release, **version 1.2.3**, is a just some fix a typo and delete not used files.
9
+ This release, **version 1.4.3**, We make `Matrix Utils` now can be used in others Files.
10
10
 
11
11
  ---
12
12
 
13
- ## Features Overview
14
-
15
- - Full **Conv2D support** with forward and backward operations.
16
- - **Tensor operations** now support broadcasting and reshaping.
17
- - Added new activations: `LeakyReLU`, `GELU`.
18
- - Optimizers: `Adam` and `SGD` fully integrated with gradient updates.
19
- - Dropout layer added for regularization.
20
- - Advanced utilities for tensor manipulation: `flatten`, `stack`, `concat`, `eye`, `reshape`.
21
- - End-to-end training and prediction workflow now fully tested.
22
- - Save and load model functionality included for seamless persistence.
23
- - Optimized performance for both frontend and backend usage.
24
- - Maintained backward compatibility for previous core layers and activations.
25
- - folder tests files template that you can use it before make your models.
13
+ ## Feature Highlights
14
+
15
+ - **Learning Rate Schedulers:** New `StepLR` and `LambdaLR` for dynamic optimizer learning rate adjustment.
16
+ - **Full Conv2D support:** Forward and backward operations for convolutional layers.
17
+ - **Tensor operations:** Broadcasting, reshaping, and reduction utilities.
18
+ - **Advanced Activations:** Includes `LeakyReLU`, `GELU`, `Mish`, `SiLU`, `ELU`, and more.
19
+ - **Optimizers:** `Adam` and `SGD` with gradient updates.
20
+ - **Dropout Layer:** For regularization during training.
21
+ - **BatchNorm2D:** For stable training in convolutional models.
22
+ - **Tensor Manipulation:** Utilities like `flatten`, `stack`, `concat`, `eye`, `reshape`.
23
+ - **Model Save & Load:** Easy persistence and restore of models.
24
+ - **Test/Demo Templates:** The `tests/` folder provides ready-to-run examples for model building and feature usage.
25
+ - **Performance Optimized:** Suitable for both frontend and backend usage.
26
+ - **Backward Compatibility:** Maintained for core layers and activations.
26
27
 
27
28
  ---
28
29
 
29
- ## Features
30
+ ## Core Features
30
31
 
31
- - **Core Layers**: Linear, Conv2D
32
- - **Activations**: ReLU, Sigmoid, Tanh, LeakyReLU, GELU
33
- - **Loss Functions**: MSELoss, CrossEntropyLoss
34
- - **Optimizers**: Adam, SGD
35
- - **Utilities**: zeros, randomMatrix, softmax, crossEntropy, dot, addMatrices, reshape, stack, flatten, eye, concat
36
- - **Model Container**: Sequential (for stacking layers with forward/backward passes)
32
+ - **Layers:** Linear, Conv2D
33
+ - **Activations:** ReLU, Sigmoid, Tanh, LeakyReLU, GELU, Mish, SiLU, ELU
34
+ - **Loss Functions:** MSELoss, CrossEntropyLoss
35
+ - **Optimizers:** Adam, SGD
36
+ - **Schedulers:** StepLR, LambdaLR , ReduceLROnPlateau
37
+ - **Regularization:** Dropout, BatchNorm2D
38
+ - **Utilities:** zeros, randomMatrix, softmax, crossEntropy, dot, addMatrices, reshape, stack, flatten, eye, concat
39
+ - **Model Container:** Sequential (for stacking layers with forward/backward passes)
37
40
 
38
41
  ---
39
42
 
@@ -49,7 +52,7 @@ npm install mini-jstorch
49
52
  ## Quick Start Example
50
53
 
51
54
  ```javascript
52
- import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam } from './src/MainEngine.js';
55
+ import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam, StepLR } from 'mini-jstorch';
53
56
 
54
57
  // Build model
55
58
  const model = new Sequential([
@@ -70,6 +73,7 @@ const Y = [
70
73
  // Loss & optimizer
71
74
  const lossFn = new CrossEntropyLoss();
72
75
  const optimizer = new Adam(model.parameters(), 0.1);
76
+ const scheduler = new StepLR(optimizer, 20, 0.5); // Halve LR every 20 epochs
73
77
 
74
78
  // Training loop
75
79
  for (let epoch = 1; epoch <= 100; epoch++) {
@@ -78,7 +82,8 @@ for (let epoch = 1; epoch <= 100; epoch++) {
78
82
  const gradLoss = lossFn.backward();
79
83
  model.backward(gradLoss);
80
84
  optimizer.step();
81
- if (epoch % 20 === 0) console.log(`Epoch ${epoch}, Loss: ${loss.toFixed(4)}`);
85
+ scheduler.step();
86
+ if (epoch % 20 === 0) console.log(`Epoch ${epoch}, Loss: ${loss.toFixed(4)}, LR: ${optimizer.lr.toFixed(4)}`);
82
87
  }
83
88
 
84
89
  // Prediction
@@ -94,7 +99,7 @@ predTest.forEach((p,i) => {
94
99
  ## Save & Load Models
95
100
 
96
101
  ```javascript
97
- import { saveModel, loadModel } from 'mini-jstorch';
102
+ import { saveModel, loadModel, Sequential } from './src/MainEngine.js';
98
103
 
99
104
  const json = saveModel(model);
100
105
  const model2 = new Sequential([...]); // same architecture
@@ -103,7 +108,22 @@ loadModel(model2, json);
103
108
 
104
109
  ---
105
110
 
111
+ ## Demos & Testing
112
+
113
+ Check the `tests/` directory for ready-to-run demos:
114
+ - **tests/MakeModel.js:** Build and run a simple neural network.
115
+ - **tests/scheduler.js:** Experiment with learning rate schedulers.
116
+ - Add your own scripts for quick prototyping!
117
+
118
+ ```bash
119
+ node tests/MakeModel.js
120
+ node tests/scheduler.js
121
+ ```
122
+
123
+ ---
124
+
106
125
  ## Intended Use Cases
126
+
107
127
  - Rapid prototyping of neural networks in frontend and backend.
108
128
  - Learning and teaching foundational neural network concepts.
109
129
  - Experimentation on low-end devices or mobile browsers.
@@ -113,14 +133,24 @@ loadModel(model2, json);
113
133
 
114
134
  # License
115
135
 
116
- **MIT © 2025 Rizal**
136
+ `MIT License`
117
137
 
118
- ---
138
+ **Copyright (c) 2025 rizal-editors**
139
+
140
+ Permission is hereby granted, free of charge, to any person obtaining a copy
141
+ of this software and associated documentation files (the "Software"), to deal
142
+ in the Software without restriction, including without limitation the rights
143
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
144
+ copies of the Software, and to permit persons to whom the Software is
145
+ furnished to do so, subject to the following conditions:
119
146
 
120
- ## Facts
147
+ The above copyright notice and this permission notice shall be included in all
148
+ copies or substantial portions of the Software.
121
149
 
122
- - **This module is implemented entirely in pure JavaScript.**
123
- - **The `Dummy` folder contains modules used for development, testing, and debugging before integration into the main engine.**
124
- - **files startup.cpu is actually an some random files lol.**
125
- - **This module was created by a `single` developer.**
126
- - **You can join to the `mjs-group` Organization on my profile!**
150
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
151
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
152
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
153
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
154
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
155
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
156
+ SOFTWARE.
package/index.js CHANGED
@@ -1,90 +1,2 @@
1
- // =====================================================
2
- // FILE: index.js - Main entry point
3
- // =====================================================
4
-
5
- // Import all modules
6
- import { Tensor } from './src/tensor.js';
7
- import {
8
- Linear, Dense, Conv2d, MaxPool2d, AvgPool2d, Flatten, Dropout,
9
- BatchNorm2d, ReLU, Sigmoid, Tanh, LeakyReLU, ELU, Softmax, Sequential
10
- } from './src/layers.js';
11
- import { MSELoss, CrossEntropyLoss, BCELoss } from './src/loss.js';
12
- import { SGD, Adam, RMSprop } from './src/optimizers.js';
13
- import { Model, Trainer, models, optimizers, losses, layers, tensors, utils } from './src/model.js';
14
-
15
- // Export everything
16
- export {
17
- // Core classes
18
- Tensor,
19
- Model,
20
- Trainer,
21
-
22
- // Layer classes
23
- Linear,
24
- Dense,
25
- Conv2d,
26
- MaxPool2d,
27
- AvgPool2d,
28
- Flatten,
29
- Dropout,
30
- BatchNorm2d,
31
- ReLU,
32
- Sigmoid,
33
- Tanh,
34
- LeakyReLU,
35
- ELU,
36
- Softmax,
37
- Sequential,
38
-
39
- // Loss classes
40
- MSELoss,
41
- CrossEntropyLoss,
42
- BCELoss,
43
-
44
- // Optimizer classes
45
- SGD,
46
- Adam,
47
- RMSprop,
48
-
49
- // Factory functions
50
- models,
51
- optimizers,
52
- losses,
53
- layers,
54
- tensors,
55
- utils
56
- };
57
-
58
- // Default export
59
- export default {
60
- Tensor,
61
- Model,
62
- Trainer,
63
- Linear,
64
- Dense,
65
- Conv2d,
66
- MaxPool2d,
67
- AvgPool2d,
68
- Flatten,
69
- Dropout,
70
- BatchNorm2d,
71
- ReLU,
72
- Sigmoid,
73
- Tanh,
74
- LeakyReLU,
75
- ELU,
76
- Softmax,
77
- Sequential,
78
- MSELoss,
79
- CrossEntropyLoss,
80
- BCELoss,
81
- SGD,
82
- Adam,
83
- RMSprop,
84
- models,
85
- optimizers,
86
- losses,
87
- layers,
88
- tensors,
89
- utils
90
- };
1
+ // package root
2
+ export * from "./src/MainEngine.js";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mini-jstorch",
3
- "version": "1.3.2",
3
+ "version": "1.4.3",
4
4
  "type": "module",
5
5
  "description": "A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.",
6
6
  "main": "index.js",
package/src/MainEngine.js CHANGED
@@ -1,18 +1,77 @@
1
+ /*!
2
+ * Project: mini-jstorch
3
+ * File: MainEngine.js
4
+ * Author: M. Rizal H. (Actual Author Name)
5
+ * License: MIT
6
+ * Copyright (C) 2025 M. Rizal H.
7
+ *
8
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ * of this software and associated documentation files (the "Software"), to deal
10
+ * in the Software without restriction, including without limitation the rights
11
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ * copies of the Software, and to permit persons to whom the Software is
13
+ * furnished to do so, subject to the following conditions:
14
+ *
15
+ * The above copyright notice and this permission notice shall be included in all
16
+ * copies or substantial portions of the Software.
17
+ *
18
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
+ * SOFTWARE.
25
+ */
1
26
 
2
- // MINI JSTORCH ENGINE
3
- // LICENSE: MIT (C) Rizal 2025
4
- // 1.2.3 ENGINE VERSIONS
5
- // IMPORTANT: CORE ENGINE DO NOT EDIT THIS FILES VERY SENSITIVE IT WILL CRASHING YOUR ENGINE SYSTEMS!
6
27
 
7
28
  // ---------------------- Utilities ----------------------
8
- function zeros(rows, cols) { return Array.from({length:rows},()=>Array(cols).fill(0)); }
9
- function ones(rows, cols) { return Array.from({length:rows},()=>Array(cols).fill(1)); }
10
- function randomMatrix(rows, cols, scale=0.1){ return Array.from({length:rows},()=>Array.from({length:cols},()=> (Math.random()*2-1)*scale)); }
11
- function transpose(matrix){ return matrix[0].map((_,i)=>matrix.map(row=>row[i])); }
12
- function addMatrices(a,b){ return a.map((row,i)=>row.map((v,j)=>v+(b[i] && b[i][j]!==undefined?b[i][j]:0))); }
13
- function dot(a,b){ const res=zeros(a.length,b[0].length); for(let i=0;i<a.length;i++) for(let j=0;j<b[0].length;j++) for(let k=0;k<a[0].length;k++) res[i][j]+=a[i][k]*b[k][j]; return res; }
14
- function softmax(x){ const m=Math.max(...x); const exps=x.map(v=>Math.exp(v-m)); const s=exps.reduce((a,b)=>a+b,0); return exps.map(v=>v/s); }
15
- function crossEntropy(pred,target){ const eps=1e-12; return -target.reduce((sum,t,i)=>sum+t*Math.log(pred[i]+eps),0); }
29
+ export function zeros(rows, cols) {
30
+ return Array.from({length:rows},()=>Array(cols).fill(0));
31
+ }
32
+
33
+ export function ones(rows, cols) {
34
+ return Array.from({length:rows},()=>Array(cols).fill(1));
35
+ }
36
+
37
+ export function randomMatrix(rows, cols, scale=0.1){
38
+ return Array.from({length:rows},()=>
39
+ Array.from({length:cols},()=> (Math.random()*2-1)*scale)
40
+ );
41
+ }
42
+
43
+ export function transpose(matrix){
44
+ return matrix[0].map((_,i)=>matrix.map(row=>row[i]));
45
+ }
46
+
47
+ export function addMatrices(a,b){
48
+ return a.map((row,i)=>
49
+ row.map((v,j)=>v+(b[i] && b[i][j]!==undefined?b[i][j]:0))
50
+ );
51
+ }
52
+
53
+ export function dot(a,b){
54
+ const res=zeros(a.length,b[0].length);
55
+ for(let i=0;i<a.length;i++)
56
+ for(let j=0;j<b[0].length;j++)
57
+ for(let k=0;k<a[0].length;k++)
58
+ res[i][j]+=a[i][k]*b[k][j];
59
+ return res;
60
+ }
61
+
62
+ export function softmax(x){
63
+ const m=Math.max(...x);
64
+ const exps=x.map(v=>Math.exp(v-m));
65
+ const s=exps.reduce((a,b)=>a+b,0);
66
+ return exps.map(v=>v/s);
67
+ }
68
+
69
+ export function crossEntropy(pred,target){
70
+ const eps=1e-12;
71
+ return -target.reduce((sum,t,i)=>sum+t*Math.log(pred[i]+eps),0);
72
+ }
73
+ /* Not Added more Utils yet. (this patch not MINOR)
74
+ Just make it Public to other files. */
16
75
 
17
76
  // ---------------------- Tensor ----------------------
18
77
  export class Tensor {
@@ -225,6 +284,48 @@ export class Adam{
225
284
  }
226
285
  }
227
286
 
287
+ // ---------------------- Learning Rate Schedulers ----------------------
288
+ export class StepLR {
289
+ constructor(optimizer, step_size, gamma=1.0) {
290
+ this.optimizer = optimizer;
291
+ this.step_size = step_size;
292
+ this.gamma = gamma;
293
+ this.last_epoch = 0;
294
+ this.base_lr = optimizer.lr;
295
+ }
296
+
297
+ step() {
298
+ this.last_epoch += 1;
299
+ if (this.last_epoch % this.step_size === 0) {
300
+ this.optimizer.lr *= this.gamma;
301
+ }
302
+ }
303
+
304
+ get_lr() {
305
+ return this.optimizer.lr;
306
+ /* Do nothing else */
307
+ }
308
+ }
309
+
310
+ export class LambdaLR {
311
+ constructor(optimizer, lr_lambda) {
312
+ this.optimizer = optimizer;
313
+ this.lr_lambda = lr_lambda;
314
+ this.last_epoch = 0;
315
+ this.base_lr = optimizer.lr;
316
+ }
317
+
318
+ step() {
319
+ this.last_epoch += 1;
320
+ this.optimizer.lr = this.base_lr * this.lr_lambda(this.last_epoch);
321
+ }
322
+
323
+ get_lr() {
324
+ return this.optimizer.lr;
325
+ /* Do nothing else */
326
+ }
327
+ }
328
+
228
329
  // ---------------------- ELU Activation ----------------------
229
330
  export class ELU {
230
331
  constructor(alpha=1.0) {
@@ -534,6 +635,7 @@ export function saveModel(model){
534
635
  if(!(model instanceof Sequential)) throw new Error("saveModel supports only Sequential");
535
636
  const weights=model.layers.map(layer=>({weights:layer.W||null,biases:layer.b||null}));
536
637
  return JSON.stringify(weights);
638
+ /* Didn't expect this to work */
537
639
  }
538
640
 
539
641
  export function loadModel(model,json){
@@ -543,6 +645,7 @@ export function loadModel(model,json){
543
645
  if(layer.W && weights[i].weights) layer.W=weights[i].weights;
544
646
  if(layer.b && weights[i].biases) layer.b=weights[i].biases;
545
647
  });
648
+ /* Didn't expect this to work */
546
649
  }
547
650
 
548
651
  // ---------------------- Advanced Utils ----------------------
@@ -551,10 +654,10 @@ export function stack(tensors){ return tensors.map(t=>t.data); }
551
654
  export function eye(n){ return Array.from({length:n},(_,i)=>Array.from({length:n},(_,j)=>i===j?1:0)); }
552
655
  export function concat(a,b,axis=0){ /* concat along axis */ if(axis===0) return [...a,...b]; if(axis===1) return a.map((row,i)=>[...row,...b[i]]); }
553
656
  export function reshape(tensor, rows, cols) {
554
- let flat = tensor.data.flat(); // flatten dulu
657
+ let flat = tensor.data.flat(); // flatten first
555
658
  if(flat.length < rows*cols) throw new Error("reshape size mismatch");
556
659
  const out = Array.from({length: rows}, (_, i) =>
557
660
  flat.slice(i*cols, i*cols + cols)
558
661
  );
559
662
  return out;
560
- }
663
+ }