mini-jstorch 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +73 -0
- package/act/linear.js +8 -0
- package/act/relu.js +10 -0
- package/example.js +23 -0
- package/index.js +17 -0
- package/layers/dense.js +28 -0
- package/layers/dropout.js +15 -0
- package/layers/flatten.js +14 -0
- package/models/Seq.js +25 -0
- package/optim/sgd.js +12 -0
- package/package.json +18 -8
- package/train/loop.js +28 -0
- package/utils/io.js +28 -0
- package/utils/math.js +18 -0
- package/utils/tensor.js +13 -0
package/README.md
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# mini-jstorch README
|
|
2
|
+
|
|
3
|
+
## Overview
|
|
4
|
+
*mini-jstorch* module is a lightweight, **PyTorch-inspired** neural network library in JavaScript. It provides basic components to build, train, and optimize neural networks
|
|
5
|
+
and are not heavy.
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
- Sequential model container (`Seq`) for easy stacking of layers.
|
|
9
|
+
- Common layers: `Dense`, `Dropout`, `Flatten`.
|
|
10
|
+
- Activations: `relu`, `sigmoid`, `leakyRelu`, `tanh`, `linear`.
|
|
11
|
+
- Tensor utilities (`zeros`, `rand`, `shape`) and math helpers (`dot`, `add`).
|
|
12
|
+
- Custom SGD optimizer.
|
|
13
|
+
- Training loop with batch and epoch support.
|
|
14
|
+
- Save/load models with JSON.
|
|
15
|
+
|
|
16
|
+
## Directory Structure
|
|
17
|
+
```
|
|
18
|
+
module-root/ # Only placeholder not an real module root name
|
|
19
|
+
│
|
|
20
|
+
├── index.js # Main exports
|
|
21
|
+
├── models/ # Model containers
|
|
22
|
+
│ └── Seq.js # Sequential model
|
|
23
|
+
├── layers/ # Neural network layers
|
|
24
|
+
│ ├── dense.js # Fully connected layer
|
|
25
|
+
│ ├── dropout.js # Dropout layer
|
|
26
|
+
│ └── flatten.js # Flatten input layer
|
|
27
|
+
├── act/ # Activation functions
|
|
28
|
+
│ ├── linear.js # Additional activations (leakyRelu, tanh)
|
|
29
|
+
│ └── relu.js # ReLU, Sigmoid
|
|
30
|
+
├── utils/ # Utilities
|
|
31
|
+
│ ├── io.js # Save/load models
|
|
32
|
+
│ ├── math.js # Math helpers (dot, add, etc.)
|
|
33
|
+
│ └── tensor.js # Tensor helpers
|
|
34
|
+
├── train/ # Training loop
|
|
35
|
+
│ └── loop.js
|
|
36
|
+
└── optim/ # Optimizers
|
|
37
|
+
└── sgd.js # Custom SGD optimizer
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Patch Notes - Version 1.1.0
|
|
41
|
+
- Refactored activation system into `act/` folder for better separation.
|
|
42
|
+
- Added `linear.js` activations (leakyRelu, tanh).
|
|
43
|
+
- Updated `Dense` layer with `forwardBatch` and `forwardLeaky` support.
|
|
44
|
+
- Added `Flatten` and `Dropout` layers.
|
|
45
|
+
- Implemented batch prediction support in `Seq` model.
|
|
46
|
+
- Added basic SGD optimizer in `optim/`.
|
|
47
|
+
- Introduced training loop in `train/loop.js`.
|
|
48
|
+
- Expanded utils: tensor creation (`zeros`, `rand`), math operations (`dotBatch`, `addScalar`).
|
|
49
|
+
- Enhanced model IO with activations mapping and optional pretty-print.
|
|
50
|
+
- Added comments throughout files for better clarity and professional documentation.
|
|
51
|
+
|
|
52
|
+
## Example Usage
|
|
53
|
+
```javascript
|
|
54
|
+
import { Seq } from './models/Seq.js';
|
|
55
|
+
import { Dense } from './layers/dense.js';
|
|
56
|
+
import * as act from './act/linear.js';
|
|
57
|
+
import { SGD } from './optim/sgd.js';
|
|
58
|
+
import { train } from './train/loop.js';
|
|
59
|
+
|
|
60
|
+
const model = new Seq();
|
|
61
|
+
model.add(new Dense(3, 5, act.leakyRelu));
|
|
62
|
+
model.add(new Dense(5, 1));
|
|
63
|
+
|
|
64
|
+
const optimizer = new SGD(0.01);
|
|
65
|
+
|
|
66
|
+
train(model, [[1,2,3]], [[1]], (pred, y) => pred[0]-y[0], optimizer, 10);
|
|
67
|
+
model.summary();
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Notes
|
|
71
|
+
- Layer and function names are kept short and professional for clarity.
|
|
72
|
+
- Batch methods available in layers and model for better training performance.
|
|
73
|
+
- Designed to be an educational and extensible PyTorch-like system in pure JavaScript.
|
package/act/linear.js
ADDED
package/act/relu.js
ADDED
package/example.js
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
// example.js
|
|
2
|
+
import { Seq, Dense, act, SGD, train } from './index.js';
|
|
3
|
+
|
|
4
|
+
// Buat model
|
|
5
|
+
const model = new Seq();
|
|
6
|
+
model.add(new Dense(3, 5, act.leakyRelu));
|
|
7
|
+
model.add(new Dense(5, 1));
|
|
8
|
+
|
|
9
|
+
// Buat optimizer
|
|
10
|
+
const optimizer = new SGD(0.01);
|
|
11
|
+
|
|
12
|
+
// Contoh data training
|
|
13
|
+
const x = [[1, 2, 3]];
|
|
14
|
+
const y = [[1]];
|
|
15
|
+
|
|
16
|
+
// Loss function sederhana
|
|
17
|
+
const lossFn = (pred, target) => pred[0] - target[0];
|
|
18
|
+
|
|
19
|
+
// Train model
|
|
20
|
+
train(model, x, y, lossFn, optimizer, 10);
|
|
21
|
+
|
|
22
|
+
// Tampilkan summary
|
|
23
|
+
model.summary();
|
package/index.js
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
// index.js
|
|
2
|
+
// Main entry point for mini-jstorch
|
|
3
|
+
|
|
4
|
+
// Core model
|
|
5
|
+
export { Seq } from './models/seq.js';
|
|
6
|
+
|
|
7
|
+
// Layers
|
|
8
|
+
export { Dense } from './layers/dense.js';
|
|
9
|
+
|
|
10
|
+
// Activations
|
|
11
|
+
export * as act from './act/linear.js';
|
|
12
|
+
|
|
13
|
+
// Optimizers
|
|
14
|
+
export { SGD } from './optim/sgd.js';
|
|
15
|
+
|
|
16
|
+
// Training loop
|
|
17
|
+
export { train } from './train/loop.js';
|
package/layers/dense.js
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import { dot, add } from '../utils/math.js';
|
|
2
|
+
import { leakyRelu } from '../act/linear.js';
|
|
3
|
+
|
|
4
|
+
// Fully connected layer
|
|
5
|
+
export class Dense {
|
|
6
|
+
constructor(inputSize, outputSize, activation = x => x) {
|
|
7
|
+
this.weights = Array.from({ length: outputSize }, () =>
|
|
8
|
+
Array.from({ length: inputSize }, () => Math.random() * 0.1)
|
|
9
|
+
);
|
|
10
|
+
this.bias = Array(outputSize).fill(0);
|
|
11
|
+
this.activation = activation;
|
|
12
|
+
this.name = "Dense";
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
forward(input) {
|
|
16
|
+
const z = add(dot(this.weights, input), this.bias);
|
|
17
|
+
return z.map(this.activation);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
forwardBatch(inputs) {
|
|
21
|
+
return inputs.map(input => this.forward(input));
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
forwardLeaky(input) {
|
|
25
|
+
const z = add(dot(this.weights, input), this.bias);
|
|
26
|
+
return leakyRelu(z);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
// Dropout layer for regularization
|
|
2
|
+
export class Dropout {
|
|
3
|
+
constructor(p = 0.5) {
|
|
4
|
+
this.p = p;
|
|
5
|
+
this.name = "Dropout";
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
forward(input) {
|
|
9
|
+
return input.map(v => (Math.random() < this.p ? 0 : v / (1 - this.p)));
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
forwardBatch(inputs) {
|
|
13
|
+
return inputs.map(inp => this.forward(inp));
|
|
14
|
+
}
|
|
15
|
+
}
|
package/models/Seq.js
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
// Sequential model container
|
|
2
|
+
export class Seq {
|
|
3
|
+
constructor() {
|
|
4
|
+
this.layers = [];
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
add(layer) {
|
|
8
|
+
this.layers.push(layer);
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
predict(input) {
|
|
12
|
+
return this.layers.reduce((out, layer) => layer.forward(out), input);
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
predictBatch(inputs) {
|
|
16
|
+
return inputs.map(input => this.predict(input));
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
summary() {
|
|
20
|
+
console.log("Model summary:");
|
|
21
|
+
this.layers.forEach((l, i) => {
|
|
22
|
+
console.log(` Layer ${i+1}: ${l.name}, outputSize: ${l.weights ? l.weights.length : 'N/A'}`);
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
}
|
package/optim/sgd.js
ADDED
package/package.json
CHANGED
|
@@ -1,12 +1,22 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "mini-jstorch",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.1.0",
|
|
4
|
+
"type": "module",
|
|
5
|
+
"description": "A lightweight JavaScript neural network framework for browser & Node.js, inspired by PyTorch.",
|
|
4
6
|
"main": "index.js",
|
|
5
|
-
"
|
|
6
|
-
"
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
7
|
+
"keywords": [
|
|
8
|
+
"neural-network",
|
|
9
|
+
"javascript",
|
|
10
|
+
"lightweight",
|
|
11
|
+
"ai",
|
|
12
|
+
"machine-learning",
|
|
13
|
+
"browser",
|
|
14
|
+
"mini"
|
|
15
|
+
],
|
|
16
|
+
"author": "Rizal",
|
|
17
|
+
"license": "MIT",
|
|
18
|
+
"repository": {
|
|
19
|
+
"type": "git",
|
|
20
|
+
"url": "https://github.com/rizal-editors/mini-jstorch.git"
|
|
21
|
+
}
|
|
12
22
|
}
|
package/train/loop.js
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
// Training loop
|
|
2
|
+
export function train(model, X, Y, lossFn, optimizer, epochs=None) {
|
|
3
|
+
for(let e=0; e<epochs; e++) {
|
|
4
|
+
let totalLoss = 0;
|
|
5
|
+
|
|
6
|
+
for(let i=0; i<X.length; i++) {
|
|
7
|
+
const x = X[i];
|
|
8
|
+
const y = Y[i];
|
|
9
|
+
|
|
10
|
+
// Forward pass
|
|
11
|
+
const pred = model.predict(x);
|
|
12
|
+
|
|
13
|
+
// Compute loss
|
|
14
|
+
const loss = lossFn(pred, y);
|
|
15
|
+
totalLoss += loss;
|
|
16
|
+
|
|
17
|
+
// Naive gradient (placeholder)
|
|
18
|
+
const grads = pred.map((p,j) => [p - y[j]]);
|
|
19
|
+
|
|
20
|
+
// Update first Dense layer as example
|
|
21
|
+
if(model.layers[0].weights) {
|
|
22
|
+
optimizer.step([model.layers[0].weights], grads);
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
console.log(`Epoch ${e+1}/${epochs}, Loss: ${totalLoss/X.length}`);
|
|
27
|
+
}
|
|
28
|
+
}
|
package/utils/io.js
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
// Save and load model
|
|
2
|
+
export function saveModel(model, pretty=false) {
|
|
3
|
+
const data = {
|
|
4
|
+
layers: model.layers.map(l => ({
|
|
5
|
+
name: l.name,
|
|
6
|
+
weights: l.weights,
|
|
7
|
+
bias: l.bias,
|
|
8
|
+
activation: l.activation?.name || null
|
|
9
|
+
}))
|
|
10
|
+
};
|
|
11
|
+
return pretty ? JSON.stringify(data, null, 2) : JSON.stringify(data);
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export function loadModel(json, ModelClass, LayerClass, actMap={}) {
|
|
15
|
+
const data = typeof json === 'string' ? JSON.parse(json) : json;
|
|
16
|
+
const model = new ModelClass();
|
|
17
|
+
|
|
18
|
+
data.layers.forEach(ld => {
|
|
19
|
+
const l = new LayerClass(0,0);
|
|
20
|
+
l.weights = ld.weights;
|
|
21
|
+
l.bias = ld.bias;
|
|
22
|
+
l.name = ld.name;
|
|
23
|
+
if(ld.activation && actMap[ld.activation]) l.activation = actMap[ld.activation];
|
|
24
|
+
model.add(l);
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
return model;
|
|
28
|
+
}
|
package/utils/math.js
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
// Basic math utils
|
|
2
|
+
export function dot(weights, input) {
|
|
3
|
+
return weights.map(row =>
|
|
4
|
+
row.reduce((sum, val, i) => sum + val * input[i], 0)
|
|
5
|
+
);
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export function add(a, b) {
|
|
9
|
+
return a.map((v, i) => v + b[i]);
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export function dotBatch(weights, inputs) {
|
|
13
|
+
return inputs.map(input => dot(weights, input));
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export function addScalar(arr, scalar) {
|
|
17
|
+
return arr.map(v => v + scalar);
|
|
18
|
+
}
|
package/utils/tensor.js
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
// Tensor helpers
|
|
2
|
+
export function zeros(size) {
|
|
3
|
+
return Array(size).fill(0);
|
|
4
|
+
}
|
|
5
|
+
|
|
6
|
+
export function rand(size, scale=0.1) {
|
|
7
|
+
return Array(size).fill(0).map(()=>Math.random()*scale);
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export function shape(tensor) {
|
|
11
|
+
if(Array.isArray(tensor[0])) return [tensor.length, tensor[0].length];
|
|
12
|
+
return [tensor.length];
|
|
13
|
+
}
|