mini-jstorch 1.2.2 → 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/MODULE.md +41 -0
- package/README.md +117 -8
- package/hh.js +38 -0
- package/index.js +90 -6
- package/package.json +4 -1
- package/src/MainEngine.js +560 -1
- package/src/startup.cpu +11 -8
- package/tests/DebugModel.js +127 -0
- package/tests/MakeModel.js +570 -0
- package/tests/unit/newver.js +103 -0
- package/tests/unit/ogver.js +87 -0
package/MODULE.md
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
## MODULE STATS ##
|
|
2
|
+
|
|
3
|
+
New Files that automatically will All notable changes status state to *Mini-JSTorch* will be documented in this file.
|
|
4
|
+
|
|
5
|
+
# MSG
|
|
6
|
+
|
|
7
|
+
btw. This files is actually would be modified and note all changes system state automatically without manual i type with myself.
|
|
8
|
+
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
**OFFICIAL RELEASE:** 2025-Monday-August-23 time: 2:22 AM (estimated time release)
|
|
12
|
+
**VERSION:** 1.3.1
|
|
13
|
+
**LICENSE:** MIT © 2025
|
|
14
|
+
**AUTHOR:** Rizal
|
|
15
|
+
**MODULE NAME:** mini-jstorch
|
|
16
|
+
**MODULE DESC:** A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.
|
|
17
|
+
**MODULE TYPE:** module
|
|
18
|
+
**ENGINE VERSIONS:** 1.2.1
|
|
19
|
+
**UPDATE TITLE:** `PATCH` update.
|
|
20
|
+
**ADDED FILES/FOLDER:** {
|
|
21
|
+
"N/A" //N/A [N/A]
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
---
|
|
25
|
+
|
|
26
|
+
**MODIFIED FILES:** {
|
|
27
|
+
"src" //folder
|
|
28
|
+
"src/startup.cpu" //files
|
|
29
|
+
"src/MainEngine.js" //files
|
|
30
|
+
"tests/tests.js" //files [npmignore detected]
|
|
31
|
+
"tests" //folder
|
|
32
|
+
"src/Dummy/exp.js" //files [npmignore detected]
|
|
33
|
+
"package.json" //files
|
|
34
|
+
"src/EngState.json" //files [npmignore detected]
|
|
35
|
+
"src/state.txt" //files [npmignore detected]
|
|
36
|
+
"README.md" //files
|
|
37
|
+
".npmignore" //files [npmignore detected]
|
|
38
|
+
"N/A" //N/A [N/A]
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
---
|
package/README.md
CHANGED
|
@@ -1,17 +1,126 @@
|
|
|
1
|
-
#
|
|
1
|
+
# Mini-JSTorch
|
|
2
|
+
|
|
3
|
+
A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
Mini-JSTorch is a lightweight, high-performance JavaScript library for building neural networks that runs efficiently in both frontend and backend environments, including low-end devices. The library enables experimentation and learning in AI without compromising stability, accuracy, or training reliability.
|
|
8
|
+
|
|
9
|
+
This release, **version 1.2.3**, is a just some fix a typo and delete not used files.
|
|
10
|
+
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
## Features Overview
|
|
14
|
+
|
|
15
|
+
- Full **Conv2D support** with forward and backward operations.
|
|
16
|
+
- **Tensor operations** now support broadcasting and reshaping.
|
|
17
|
+
- Added new activations: `LeakyReLU`, `GELU`.
|
|
18
|
+
- Optimizers: `Adam` and `SGD` fully integrated with gradient updates.
|
|
19
|
+
- Dropout layer added for regularization.
|
|
20
|
+
- Advanced utilities for tensor manipulation: `flatten`, `stack`, `concat`, `eye`, `reshape`.
|
|
21
|
+
- End-to-end training and prediction workflow now fully tested.
|
|
22
|
+
- Save and load model functionality included for seamless persistence.
|
|
23
|
+
- Optimized performance for both frontend and backend usage.
|
|
24
|
+
- Maintained backward compatibility for previous core layers and activations.
|
|
25
|
+
- folder tests files template that you can use it before make your models.
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
## Features
|
|
30
|
+
|
|
31
|
+
- **Core Layers**: Linear, Conv2D
|
|
32
|
+
- **Activations**: ReLU, Sigmoid, Tanh, LeakyReLU, GELU
|
|
33
|
+
- **Loss Functions**: MSELoss, CrossEntropyLoss
|
|
34
|
+
- **Optimizers**: Adam, SGD
|
|
35
|
+
- **Utilities**: zeros, randomMatrix, softmax, crossEntropy, dot, addMatrices, reshape, stack, flatten, eye, concat
|
|
36
|
+
- **Model Container**: Sequential (for stacking layers with forward/backward passes)
|
|
37
|
+
|
|
38
|
+
---
|
|
39
|
+
|
|
40
|
+
## Installation
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
npm install mini-jstorch
|
|
44
|
+
# Node.js v20+ recommended for best performance
|
|
45
|
+
```
|
|
2
46
|
|
|
3
47
|
---
|
|
4
48
|
|
|
49
|
+
## Quick Start Example
|
|
5
50
|
|
|
6
|
-
|
|
7
|
-
|
|
51
|
+
```javascript
|
|
52
|
+
import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam } from './src/MainEngine.js';
|
|
8
53
|
|
|
9
|
-
|
|
54
|
+
// Build model
|
|
55
|
+
const model = new Sequential([
|
|
56
|
+
new Linear(2,4),
|
|
57
|
+
new ReLU(),
|
|
58
|
+
new Linear(4,2),
|
|
59
|
+
new Sigmoid()
|
|
60
|
+
]);
|
|
10
61
|
|
|
11
|
-
|
|
62
|
+
// Sample XOR dataset
|
|
63
|
+
const X = [
|
|
64
|
+
[0,0], [0,1], [1,0], [1,1]
|
|
65
|
+
];
|
|
66
|
+
const Y = [
|
|
67
|
+
[1,0], [0,1], [0,1], [1,0]
|
|
68
|
+
];
|
|
12
69
|
|
|
13
|
-
|
|
70
|
+
// Loss & optimizer
|
|
71
|
+
const lossFn = new CrossEntropyLoss();
|
|
72
|
+
const optimizer = new Adam(model.parameters(), 0.1);
|
|
73
|
+
|
|
74
|
+
// Training loop
|
|
75
|
+
for (let epoch = 1; epoch <= 100; epoch++) {
|
|
76
|
+
const pred = model.forward(X);
|
|
77
|
+
const loss = lossFn.forward(pred, Y);
|
|
78
|
+
const gradLoss = lossFn.backward();
|
|
79
|
+
model.backward(gradLoss);
|
|
80
|
+
optimizer.step();
|
|
81
|
+
if (epoch % 20 === 0) console.log(`Epoch ${epoch}, Loss: ${loss.toFixed(4)}`);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Prediction
|
|
85
|
+
const predTest = model.forward(X);
|
|
86
|
+
predTest.forEach((p,i) => {
|
|
87
|
+
const predictedClass = p.indexOf(Math.max(...p));
|
|
88
|
+
console.log(`Input: ${X[i]}, Predicted class: ${predictedClass}, Raw output: ${p.map(v => v.toFixed(3))}`);
|
|
89
|
+
});
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
---
|
|
93
|
+
|
|
94
|
+
## Save & Load Models
|
|
95
|
+
|
|
96
|
+
```javascript
|
|
97
|
+
import { saveModel, loadModel } from 'mini-jstorch';
|
|
98
|
+
|
|
99
|
+
const json = saveModel(model);
|
|
100
|
+
const model2 = new Sequential([...]); // same architecture
|
|
101
|
+
loadModel(model2, json);
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
## Intended Use Cases
|
|
107
|
+
- Rapid prototyping of neural networks in frontend and backend.
|
|
108
|
+
- Learning and teaching foundational neural network concepts.
|
|
109
|
+
- Experimentation on low-end devices or mobile browsers.
|
|
110
|
+
- Lightweight AI projects without GPU dependency.
|
|
111
|
+
|
|
112
|
+
---
|
|
113
|
+
|
|
114
|
+
# License
|
|
115
|
+
|
|
116
|
+
**MIT © 2025 Rizal**
|
|
117
|
+
|
|
118
|
+
---
|
|
14
119
|
|
|
15
|
-
|
|
120
|
+
## Facts
|
|
16
121
|
|
|
17
|
-
|
|
122
|
+
- **This module is implemented entirely in pure JavaScript.**
|
|
123
|
+
- **The `Dummy` folder contains modules used for development, testing, and debugging before integration into the main engine.**
|
|
124
|
+
- **files startup.cpu is actually an some random files lol.**
|
|
125
|
+
- **This module was created by a `single` developer.**
|
|
126
|
+
- **You can join to the `mjs-group` Organization on my profile!**
|
package/hh.js
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam } from './src/MainEngine.js';
|
|
2
|
+
|
|
3
|
+
// Build model
|
|
4
|
+
const model = new Sequential([
|
|
5
|
+
new Linear(2,4),
|
|
6
|
+
new ReLU(),
|
|
7
|
+
new Linear(4,2),
|
|
8
|
+
new Sigmoid()
|
|
9
|
+
]);
|
|
10
|
+
|
|
11
|
+
// Sample XOR dataset
|
|
12
|
+
const X = [
|
|
13
|
+
[0,0], [0,1], [1,0], [1,1]
|
|
14
|
+
];
|
|
15
|
+
const Y = [
|
|
16
|
+
[1,0], [0,1], [0,1], [1,0]
|
|
17
|
+
];
|
|
18
|
+
|
|
19
|
+
// Loss & optimizer
|
|
20
|
+
const lossFn = new CrossEntropyLoss();
|
|
21
|
+
const optimizer = new Adam(model.parameters(), 0.1);
|
|
22
|
+
|
|
23
|
+
// Training loop
|
|
24
|
+
for (let epoch = 1; epoch <= 500; epoch++) {
|
|
25
|
+
const pred = model.forward(X);
|
|
26
|
+
const loss = lossFn.forward(pred, Y);
|
|
27
|
+
const gradLoss = lossFn.backward();
|
|
28
|
+
model.backward(gradLoss);
|
|
29
|
+
optimizer.step();
|
|
30
|
+
if (epoch % 20 === 0) console.log(`Epoch ${epoch}, Loss: ${loss.toFixed(4)}`);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// Prediction
|
|
34
|
+
const predTest = model.forward(X);
|
|
35
|
+
predTest.forEach((p,i) => {
|
|
36
|
+
const predictedClass = p.indexOf(Math.max(...p));
|
|
37
|
+
console.log(`Input: ${X[i]}, Predicted class: ${predictedClass}, Raw output: ${p.map(v => v.toFixed(3))}`);
|
|
38
|
+
});
|
package/index.js
CHANGED
|
@@ -1,6 +1,90 @@
|
|
|
1
|
-
//
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
1
|
+
// =====================================================
|
|
2
|
+
// FILE: index.js - Main entry point
|
|
3
|
+
// =====================================================
|
|
4
|
+
|
|
5
|
+
// Import all modules
|
|
6
|
+
import { Tensor } from './src/tensor.js';
|
|
7
|
+
import {
|
|
8
|
+
Linear, Dense, Conv2d, MaxPool2d, AvgPool2d, Flatten, Dropout,
|
|
9
|
+
BatchNorm2d, ReLU, Sigmoid, Tanh, LeakyReLU, ELU, Softmax, Sequential
|
|
10
|
+
} from './src/layers.js';
|
|
11
|
+
import { MSELoss, CrossEntropyLoss, BCELoss } from './src/loss.js';
|
|
12
|
+
import { SGD, Adam, RMSprop } from './src/optimizers.js';
|
|
13
|
+
import { Model, Trainer, models, optimizers, losses, layers, tensors, utils } from './src/model.js';
|
|
14
|
+
|
|
15
|
+
// Export everything
|
|
16
|
+
export {
|
|
17
|
+
// Core classes
|
|
18
|
+
Tensor,
|
|
19
|
+
Model,
|
|
20
|
+
Trainer,
|
|
21
|
+
|
|
22
|
+
// Layer classes
|
|
23
|
+
Linear,
|
|
24
|
+
Dense,
|
|
25
|
+
Conv2d,
|
|
26
|
+
MaxPool2d,
|
|
27
|
+
AvgPool2d,
|
|
28
|
+
Flatten,
|
|
29
|
+
Dropout,
|
|
30
|
+
BatchNorm2d,
|
|
31
|
+
ReLU,
|
|
32
|
+
Sigmoid,
|
|
33
|
+
Tanh,
|
|
34
|
+
LeakyReLU,
|
|
35
|
+
ELU,
|
|
36
|
+
Softmax,
|
|
37
|
+
Sequential,
|
|
38
|
+
|
|
39
|
+
// Loss classes
|
|
40
|
+
MSELoss,
|
|
41
|
+
CrossEntropyLoss,
|
|
42
|
+
BCELoss,
|
|
43
|
+
|
|
44
|
+
// Optimizer classes
|
|
45
|
+
SGD,
|
|
46
|
+
Adam,
|
|
47
|
+
RMSprop,
|
|
48
|
+
|
|
49
|
+
// Factory functions
|
|
50
|
+
models,
|
|
51
|
+
optimizers,
|
|
52
|
+
losses,
|
|
53
|
+
layers,
|
|
54
|
+
tensors,
|
|
55
|
+
utils
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
// Default export
|
|
59
|
+
export default {
|
|
60
|
+
Tensor,
|
|
61
|
+
Model,
|
|
62
|
+
Trainer,
|
|
63
|
+
Linear,
|
|
64
|
+
Dense,
|
|
65
|
+
Conv2d,
|
|
66
|
+
MaxPool2d,
|
|
67
|
+
AvgPool2d,
|
|
68
|
+
Flatten,
|
|
69
|
+
Dropout,
|
|
70
|
+
BatchNorm2d,
|
|
71
|
+
ReLU,
|
|
72
|
+
Sigmoid,
|
|
73
|
+
Tanh,
|
|
74
|
+
LeakyReLU,
|
|
75
|
+
ELU,
|
|
76
|
+
Softmax,
|
|
77
|
+
Sequential,
|
|
78
|
+
MSELoss,
|
|
79
|
+
CrossEntropyLoss,
|
|
80
|
+
BCELoss,
|
|
81
|
+
SGD,
|
|
82
|
+
Adam,
|
|
83
|
+
RMSprop,
|
|
84
|
+
models,
|
|
85
|
+
optimizers,
|
|
86
|
+
losses,
|
|
87
|
+
layers,
|
|
88
|
+
tensors,
|
|
89
|
+
utils
|
|
90
|
+
};
|
package/package.json
CHANGED
|
@@ -1,13 +1,16 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "mini-jstorch",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.3.2",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"description": "A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.",
|
|
6
6
|
"main": "index.js",
|
|
7
7
|
"keywords": [
|
|
8
8
|
"neural-network",
|
|
9
9
|
"javascript",
|
|
10
|
+
"lightweight-torch",
|
|
10
11
|
"lightweight",
|
|
12
|
+
"small",
|
|
13
|
+
"javascript-torch",
|
|
11
14
|
"ai",
|
|
12
15
|
"jstorch",
|
|
13
16
|
"pytorch",
|