mini-jstorch 1.0.2 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,24 +1,76 @@
1
- # mini-jstorch
2
- *NOTICE!*:
3
- This versions is still on a **BETA** Versions of **mini-jstorch**!
4
- So maybe the features is not many and completely *SO* BE PATIENCE!!
1
+ # mini-jstorch README
5
2
 
6
- ---
3
+ ## !IMPORTANT! ##
4
+ **Some System Internal at mini-jstorch now has some bug plesae try the module at other time we are gonna fix this error around 20 minutes or 1 hour.**
7
5
 
8
- **Version: 1.0.2**
9
- A lightweight and browser-compatible deep learning framework inspired by **PyTorch** written in pure JavaScript.
10
- Perfect for CodePen, web demos, and fast prototyping.
11
-
12
- ---
6
+ ## Overview
7
+ *mini-jstorch BETA* module is a lightweight, **PyTorch-inspired** neural network library in JavaScript. It provides basic components to build, train, and optimize neural networks
8
+ and are not heavy.
13
9
 
14
10
  ## Features
11
+ - Sequential model container (`Seq`) for easy stacking of layers.
12
+ - Common layers: `Dense`, `Dropout`, `Flatten`.
13
+ - Activations: `relu`, `sigmoid`, `leakyRelu`, `tanh`, `linear`.
14
+ - Tensor utilities (`zeros`, `rand`, `shape`) and math helpers (`dot`, `add`).
15
+ - Custom SGD optimizer.
16
+ - Training loop with batch and epoch support.
17
+ - Save/load models with JSON.
18
+
19
+ ## Directory Structure
20
+ ```
21
+ module-root/ # Only placeholder not an real module root name
22
+
23
+ ├── index.js # Main exports
24
+ ├── models/ # Model containers
25
+ │ └── Seq.js # Sequential model
26
+ ├── layers/ # Neural network layers
27
+ │ ├── dense.js # Fully connected layer
28
+ │ ├── dropout.js # Dropout layer
29
+ │ └── flatten.js # Flatten input layer
30
+ ├── act/ # Activation functions
31
+ │ ├── linear.js # Additional activations (leakyRelu, tanh)
32
+ │ └── relu.js # ReLU, Sigmoid
33
+ ├── utils/ # Utilities
34
+ │ ├── io.js # Save/load models
35
+ │ ├── math.js # Math helpers (dot, add, etc.)
36
+ │ └── tensor.js # Tensor helpers
37
+ ├── train/ # Training loop
38
+ │ └── loop.js
39
+ └── optim/ # Optimizers
40
+ └── sgd.js # Custom SGD optimizer
41
+ ```
42
+
43
+ ## Patch Notes - Version 1.1.0
44
+ - Refactored activation system into `act/` folder for better separation.
45
+ - Added `linear.js` activations (leakyRelu, tanh).
46
+ - Updated `Dense` layer with `forwardBatch` and `forwardLeaky` support.
47
+ - Added `Flatten` and `Dropout` layers.
48
+ - Implemented batch prediction support in `Seq` model.
49
+ - Added basic SGD optimizer in `optim/`.
50
+ - Introduced training loop in `train/loop.js`.
51
+ - Expanded utils: tensor creation (`zeros`, `rand`), math operations (`dotBatch`, `addScalar`).
52
+ - Enhanced model IO with activations mapping and optional pretty-print.
53
+ - Added comments throughout files for better clarity and professional documentation.
54
+
55
+ ## Example Usage
56
+ ```javascript
57
+ import { Seq } from './models/Seq.js';
58
+ import { Dense } from './layers/dense.js';
59
+ import * as act from './act/linear.js';
60
+ import { SGD } from './optim/sgd.js';
61
+ import { train } from './train/loop.js';
62
+
63
+ const model = new Seq();
64
+ model.add(new Dense(3, 5, act.leakyRelu));
65
+ model.add(new Dense(5, 1));
66
+
67
+ const optimizer = new SGD(0.01);
15
68
 
16
- - # `Sequential` model API
17
- - # Fully modular layers (e.g., `Dense`)
18
- - # Activation functions: `ReLU`, `Sigmoid`
19
- - # Model serialization (`saveModel`, `loadModel`)
20
- - # Works on both **Node.js** and **browser**
21
- - # No native bindings, 100% JavaScript
22
- - # Inspired by PyTorch, but mini-sized
69
+ train(model, [[1,2,3]], [[1]], (pred, y) => pred[0]-y[0], optimizer, 10);
70
+ model.summary();
71
+ ```
23
72
 
24
- ---
73
+ ## Notes
74
+ - Layer and function names are kept short and professional for clarity.
75
+ - Batch methods available in layers and model for better training performance.
76
+ - Designed to be an educational and extensible PyTorch-like system in pure JavaScript.
package/act/linear.js ADDED
@@ -0,0 +1,8 @@
1
+ // Additional activations
2
+ export function leakyRelu(x, alpha = 0.01) {
3
+ return x.map(v => (v > 0 ? v : alpha * v));
4
+ }
5
+
6
+ export function tanh(x) {
7
+ return x.map(v => Math.tanh(v));
8
+ }
@@ -1,7 +1,10 @@
1
+ // Activation functions
1
2
  export function relu(x) {
3
+ // Rectified Linear Unit
2
4
  return x.map(v => Math.max(0, v));
3
5
  }
4
6
 
5
7
  export function sigmoid(x) {
8
+ // Sigmoid activation
6
9
  return x.map(v => 1 / (1 + Math.exp(-v)));
7
10
  }
package/index.js CHANGED
@@ -1,5 +1,17 @@
1
1
  // index.js
2
- export { Sequential } from './models/Sequential.js';
3
- export { Dense } from './layers/Dense.js';
4
- export * as activations from './activations/relu.js';
5
- export * as io from './utils/io.js';
2
+ // Main entry point for mini-jstorch
3
+
4
+ // Core model
5
+ export { Seq } from './models/seq.js';
6
+
7
+ // Layers
8
+ export { Dense } from './layers/dense.js';
9
+
10
+ // Activations
11
+ export * as act from './act/linear.js';
12
+
13
+ // Optimizers
14
+ export { SGD } from './optim/sgd.js';
15
+
16
+ // Training loop
17
+ export { train } from './train/loop.js';
@@ -1,5 +1,7 @@
1
1
  import { dot, add } from '../utils/math.js';
2
+ import { leakyRelu } from '../act/linear.js';
2
3
 
4
+ // Fully connected layer
3
5
  export class Dense {
4
6
  constructor(inputSize, outputSize, activation = x => x) {
5
7
  this.weights = Array.from({ length: outputSize }, () =>
@@ -14,4 +16,13 @@ export class Dense {
14
16
  const z = add(dot(this.weights, input), this.bias);
15
17
  return z.map(this.activation);
16
18
  }
19
+
20
+ forwardBatch(inputs) {
21
+ return inputs.map(input => this.forward(input));
22
+ }
23
+
24
+ forwardLeaky(input) {
25
+ const z = add(dot(this.weights, input), this.bias);
26
+ return leakyRelu(z);
27
+ }
17
28
  }
@@ -0,0 +1,15 @@
1
+ // Dropout layer for regularization
2
+ export class Dropout {
3
+ constructor(p = 0.5) {
4
+ this.p = p;
5
+ this.name = "Dropout";
6
+ }
7
+
8
+ forward(input) {
9
+ return input.map(v => (Math.random() < this.p ? 0 : v / (1 - this.p)));
10
+ }
11
+
12
+ forwardBatch(inputs) {
13
+ return inputs.map(inp => this.forward(inp));
14
+ }
15
+ }
@@ -0,0 +1,14 @@
1
+ // Flatten input for Dense layers
2
+ export class Flatten {
3
+ constructor() {
4
+ this.name = "Flatten";
5
+ }
6
+
7
+ forward(input) {
8
+ return input.flat();
9
+ }
10
+
11
+ forwardBatch(inputs) {
12
+ return inputs.map(inp => inp.flat());
13
+ }
14
+ }
package/models/Seq.js ADDED
@@ -0,0 +1,25 @@
1
+ // Sequential model container
2
+ export class Seq {
3
+ constructor() {
4
+ this.layers = [];
5
+ }
6
+
7
+ add(layer) {
8
+ this.layers.push(layer);
9
+ }
10
+
11
+ predict(input) {
12
+ return this.layers.reduce((out, layer) => layer.forward(out), input);
13
+ }
14
+
15
+ predictBatch(inputs) {
16
+ return inputs.map(input => this.predict(input));
17
+ }
18
+
19
+ summary() {
20
+ console.log("Model summary:");
21
+ this.layers.forEach((l, i) => {
22
+ console.log(` Layer ${i+1}: ${l.name}, outputSize: ${l.weights ? l.weights.length : 'N/A'}`);
23
+ });
24
+ }
25
+ }
package/optim/sgd.js ADDED
@@ -0,0 +1,12 @@
1
+ // Custom SGD optimizer
2
+ export class SGD {
3
+ constructor(lr = 0.01) {
4
+ this.lr = lr;
5
+ }
6
+
7
+ step(params, grads) {
8
+ for (let i = 0; i < params.length; i++) {
9
+ params[i] = params[i].map((v,j) => v - this.lr * grads[i][j]);
10
+ }
11
+ }
12
+ }
package/package.json CHANGED
@@ -1,6 +1,7 @@
1
1
  {
2
2
  "name": "mini-jstorch",
3
- "version": "1.0.2",
3
+ "version": "1.1.1",
4
+ "type": "module",
4
5
  "description": "A lightweight JavaScript neural network framework for browser & Node.js, inspired by PyTorch.",
5
6
  "main": "index.js",
6
7
  "keywords": [
package/train/loop.js ADDED
@@ -0,0 +1,28 @@
1
+ // Training loop
2
+ export function train(model, X, Y, lossFn, optimizer, epochs=None) {
3
+ for(let e=0; e<epochs; e++) {
4
+ let totalLoss = 0;
5
+
6
+ for(let i=0; i<X.length; i++) {
7
+ const x = X[i];
8
+ const y = Y[i];
9
+
10
+ // Forward pass
11
+ const pred = model.predict(x);
12
+
13
+ // Compute loss
14
+ const loss = lossFn(pred, y);
15
+ totalLoss += loss;
16
+
17
+ // Naive gradient (placeholder)
18
+ const grads = pred.map((p,j) => [p - y[j]]);
19
+
20
+ // Update first Dense layer as example
21
+ if(model.layers[0].weights) {
22
+ optimizer.step([model.layers[0].weights], grads);
23
+ }
24
+ }
25
+
26
+ console.log(`Epoch ${e+1}/${epochs}, Loss: ${totalLoss/X.length}`);
27
+ }
28
+ }
package/utils/io.js CHANGED
@@ -1,24 +1,27 @@
1
- export function saveModel(model) {
2
- const modelData = {
3
- layers: model.layers.map(layer => ({
4
- name: layer.name,
5
- weights: layer.weights,
6
- bias: layer.bias
1
+ // Save and load model
2
+ export function saveModel(model, pretty=false) {
3
+ const data = {
4
+ layers: model.layers.map(l => ({
5
+ name: l.name,
6
+ weights: l.weights,
7
+ bias: l.bias,
8
+ activation: l.activation?.name || null
7
9
  }))
8
10
  };
9
- return JSON.stringify(modelData);
11
+ return pretty ? JSON.stringify(data, null, 2) : JSON.stringify(data);
10
12
  }
11
13
 
12
- export function loadModel(json, ModelClass, LayerClass) {
14
+ export function loadModel(json, ModelClass, LayerClass, actMap={}) {
13
15
  const data = typeof json === 'string' ? JSON.parse(json) : json;
14
16
  const model = new ModelClass();
15
17
 
16
- data.layers.forEach(layer => {
17
- const dense = new LayerClass(0, 0); // dummy init
18
- dense.weights = layer.weights;
19
- dense.bias = layer.bias;
20
- dense.name = layer.name;
21
- model.add(dense);
18
+ data.layers.forEach(ld => {
19
+ const l = new LayerClass(0,0);
20
+ l.weights = ld.weights;
21
+ l.bias = ld.bias;
22
+ l.name = ld.name;
23
+ if(ld.activation && actMap[ld.activation]) l.activation = actMap[ld.activation];
24
+ model.add(l);
22
25
  });
23
26
 
24
27
  return model;
package/utils/math.js CHANGED
@@ -1,3 +1,4 @@
1
+ // Basic math utils
1
2
  export function dot(weights, input) {
2
3
  return weights.map(row =>
3
4
  row.reduce((sum, val, i) => sum + val * input[i], 0)
@@ -5,5 +6,13 @@ export function dot(weights, input) {
5
6
  }
6
7
 
7
8
  export function add(a, b) {
8
- return a.map((val, i) => val + b[i]);
9
+ return a.map((v, i) => v + b[i]);
10
+ }
11
+
12
+ export function dotBatch(weights, inputs) {
13
+ return inputs.map(input => dot(weights, input));
14
+ }
15
+
16
+ export function addScalar(arr, scalar) {
17
+ return arr.map(v => v + scalar);
9
18
  }
@@ -0,0 +1,13 @@
1
+ // Tensor helpers
2
+ export function zeros(size) {
3
+ return Array(size).fill(0);
4
+ }
5
+
6
+ export function rand(size, scale=0.1) {
7
+ return Array(size).fill(0).map(()=>Math.random()*scale);
8
+ }
9
+
10
+ export function shape(tensor) {
11
+ if(Array.isArray(tensor[0])) return [tensor.length, tensor[0].length];
12
+ return [tensor.length];
13
+ }
@@ -1,20 +0,0 @@
1
- export class Sequential {
2
- constructor() {
3
- this.layers = [];
4
- }
5
-
6
- add(layer) {
7
- this.layers.push(layer);
8
- }
9
-
10
- predict(input) {
11
- return this.layers.reduce((out, layer) => layer.forward(out), input);
12
- }
13
-
14
- summary() {
15
- console.log("Sequential model:");
16
- this.layers.forEach((layer, i) => {
17
- console.log(` Layer ${i + 1}: ${layer.name || "unnamed"}`);
18
- });
19
- }
20
- }