mini-jstorch 1.1.0 → 1.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,6 @@
1
+ **File Unavailable**
2
+ This file has been restricted by the developer.
3
+
4
+ It is intended solely for experimental purposes in future versions, including optimizations and bug fixes before full integration into the Main Engine.
5
+
6
+ For upcoming updates, this file may contain critical system components. Current usage may result in errors or malfunction, and it is not supported at this time.
@@ -0,0 +1,148 @@
1
+ // ================================
2
+ // MINI JS AI ENGINE v1
3
+ // ================================
4
+
5
+ // Basic tensor ops
6
+ const zeros = n => Array(n).fill(0);
7
+ const randn = () => (Math.random() * 2 - 1) * 0.1;
8
+
9
+ const dot = (a,b) => a.map(row=>b[0].map((_,j)=>row.reduce((sum,v,k)=>sum+v*b[k][j],0)));
10
+ const add = (a,b) => a.map((row,i)=>row.map((v,j)=>v+b[i][j]));
11
+ const sub = (a,b) => a.map((row,i)=>row.map((v,j)=>v-b[i][j]));
12
+ const mulScalar = (a,s) => a.map(row=>row.map(v=>v*s));
13
+ const transpose = m => m[0].map((_,i)=>m.map(row=>row[i]));
14
+
15
+ // Activations
16
+ const Activations = {
17
+ relu: x => x.map(v=>Math.max(0,v)),
18
+ linear: x => x,
19
+ leakyRelu: (x, alpha=0.01) => x.map(v=>v>0?v:alpha*v)
20
+ };
21
+ const dActivations = {
22
+ relu: x => x.map(v=>v>0?1:0),
23
+ linear: x => x.map(_=>1),
24
+ leakyRelu: (x, alpha=0.01) => x.map(v=>v>0?1:alpha)
25
+ };
26
+
27
+ // Dense layer with manual grad & auto-grad
28
+ class Dense {
29
+ constructor(inputSize, outputSize, activation='linear'){
30
+ this.inputSize = inputSize;
31
+ this.outputSize = outputSize;
32
+ this.activation = activation;
33
+ this.W = Array.from({length:inputSize},()=>Array.from({length:outputSize},()=>randn()*Math.sqrt(2/inputSize)));
34
+ this.b = Array(outputSize).fill(0);
35
+
36
+ // Adam variables
37
+ this.mW = mulScalar(this.W,0);
38
+ this.vW = mulScalar(this.W,0);
39
+ this.mb = Array(outputSize).fill(0);
40
+ this.vb = Array(outputSize).fill(0);
41
+ this.lastInput = null;
42
+ this.lastOutput = null;
43
+ }
44
+
45
+ forward(X){
46
+ this.lastInput = X;
47
+ let output = dot(X,this.W);
48
+ output = output.map((row,i)=>row.map((v,j)=>v+this.b[j]));
49
+ this.lastOutput = output.map(row => Activations[this.activation](row));
50
+ return this.lastOutput;
51
+ }
52
+
53
+ backward(dLoss, lr=0.001, beta1=0.9, beta2=0.999, t=1){
54
+ const flatOut = this.lastOutput.flat();
55
+ const actGrad = dActivations[this.activation](flatOut);
56
+ const dOut = dLoss.flat().map((v,i)=>v*actGrad[i]);
57
+
58
+ const gradW = Array.from({length:this.inputSize},()=>Array(this.outputSize).fill(0));
59
+ const gradB = Array(this.outputSize).fill(0);
60
+
61
+ for(let k=0;k<this.lastInput.length;k++){
62
+ for(let i=0;i<this.inputSize;i++){
63
+ for(let j=0;j<this.outputSize;j++){
64
+ gradW[i][j] += this.lastInput[k][i]*dOut[j]/this.lastInput.length;
65
+ }
66
+ }
67
+ }
68
+ for(let j=0;j<this.outputSize;j++) gradB[j] = dOut[j]/this.lastInput.length;
69
+
70
+ // Adam update with bias correction
71
+ for(let i=0;i<this.inputSize;i++){
72
+ for(let j=0;j<this.outputSize;j++){
73
+ this.mW[i][j] = beta1*this.mW[i][j]+(1-beta1)*gradW[i][j];
74
+ this.vW[i][j] = beta2*this.vW[i][j]+(1-beta2)*gradW[i][j]*gradW[i][j];
75
+ const mHat = this.mW[i][j]/(1-Math.pow(beta1,t));
76
+ const vHat = this.vW[i][j]/(1-Math.pow(beta2,t));
77
+ this.W[i][j] -= lr*mHat/(Math.sqrt(vHat)+1e-8);
78
+ }
79
+ }
80
+
81
+ for(let j=0;j<this.outputSize;j++){
82
+ this.mb[j] = beta1*this.mb[j]+(1-beta1)*gradB[j];
83
+ this.vb[j] = beta2*this.vb[j]+(1-beta2)*gradB[j]*gradB[j];
84
+ const mHat = this.mb[j]/(1-Math.pow(beta1,t));
85
+ const vHat = this.vb[j]/(1-Math.pow(beta2,t));
86
+ this.b[j] -= lr*mHat/(Math.sqrt(vHat)+1e-8);
87
+ }
88
+
89
+ // Return dLoss for next layer (manual grad)
90
+ const dNext = Array(this.lastInput.length).fill(0).map(_=>Array(this.inputSize).fill(0));
91
+ for(let i=0;i<this.inputSize;i++){
92
+ for(let j=0;j<this.outputSize;j++){
93
+ for(let k=0;k<this.lastInput.length;k++){
94
+ dNext[k][i] += dOut[j]*this.W[i][j];
95
+ }
96
+ }
97
+ }
98
+ return dNext;
99
+ }
100
+ }
101
+
102
+ // Sequential model
103
+ class Seq {
104
+ constructor(layers){
105
+ this.layers = layers;
106
+ this.logs = [];
107
+ }
108
+
109
+ forward(X){
110
+ return this.layers.reduce((inp,layer)=>layer.forward(inp), X);
111
+ }
112
+
113
+ train(X,y,epochs=200, lr=0.001){
114
+ for(let epoch=1;epoch<=epochs;epoch++){
115
+ const yPred = this.forward(X);
116
+ const dLoss = yPred.map((row,i)=>row.map((v,j)=>2*(v - y[i][j])/y.length));
117
+
118
+ let grad = dLoss;
119
+ for(let l=this.layers.length-1;l>=0;l--){
120
+ grad = this.layers[l].backward(grad, lr, 0.9, 0.999, epoch);
121
+ }
122
+
123
+ const loss = yPred.map((row,i)=>row.map((v,j)=>Math.pow(v - y[i][j],2))).flat().reduce((a,b)=>a+b,0)/y.length;
124
+ if(epoch%50===0) console.log(`Epoch ${epoch}, Loss ${loss.toFixed(4)}, Pred sample ${yPred[0][0].toFixed(2)}`);
125
+ }
126
+ }
127
+
128
+ predict(X){
129
+ return this.forward(X);
130
+ }
131
+ }
132
+
133
+ // ================================
134
+ // EXAMPLE USAGE
135
+ // ================================
136
+
137
+ const model = new Seq([
138
+ new Dense(2,16,'relu'),
139
+ new Dense(16,12,'relu'),
140
+ new Dense(12,1,'linear')
141
+ ]);
142
+
143
+ const X = [[1,2],[2,3],[3,4]];
144
+ const y = [[3],[5],[7]];
145
+
146
+ model.train(X,y,200,0.01);
147
+
148
+ console.log('Prediction [7,8]:', model.predict([[7,8]]));
@@ -0,0 +1,8 @@
1
+ --start[line]--
2
+ e=run=[cpu[core]]
3
+ e.cg()
4
+ e.register('vanilla')
5
+ l=e.load('asm')
6
+ r=l.gv=[0xCAFEBABE]
7
+ e.load(register,r,'vanilla')
8
+ --end[line]--
package/index.js CHANGED
@@ -1,17 +1,6 @@
1
- // index.js
2
- // Main entry point for mini-jstorch
3
-
4
- // Core model
1
+ // Entry point of the library, export main classes and functions
5
2
  export { Seq } from './models/seq.js';
6
-
7
- // Layers
8
3
  export { Dense } from './layers/dense.js';
9
-
10
- // Activations
11
4
  export * as act from './act/linear.js';
12
-
13
- // Optimizers
14
5
  export { SGD } from './optim/sgd.js';
15
-
16
- // Training loop
17
6
  export { train } from './train/loop.js';
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mini-jstorch",
3
- "version": "1.1.0",
3
+ "version": "1.1.6",
4
4
  "type": "module",
5
5
  "description": "A lightweight JavaScript neural network framework for browser & Node.js, inspired by PyTorch.",
6
6
  "main": "index.js",
package/README.md DELETED
@@ -1,73 +0,0 @@
1
- # mini-jstorch README
2
-
3
- ## Overview
4
- *mini-jstorch* module is a lightweight, **PyTorch-inspired** neural network library in JavaScript. It provides basic components to build, train, and optimize neural networks
5
- and are not heavy.
6
-
7
- ## Features
8
- - Sequential model container (`Seq`) for easy stacking of layers.
9
- - Common layers: `Dense`, `Dropout`, `Flatten`.
10
- - Activations: `relu`, `sigmoid`, `leakyRelu`, `tanh`, `linear`.
11
- - Tensor utilities (`zeros`, `rand`, `shape`) and math helpers (`dot`, `add`).
12
- - Custom SGD optimizer.
13
- - Training loop with batch and epoch support.
14
- - Save/load models with JSON.
15
-
16
- ## Directory Structure
17
- ```
18
- module-root/ # Only placeholder not an real module root name
19
-
20
- ├── index.js # Main exports
21
- ├── models/ # Model containers
22
- │ └── Seq.js # Sequential model
23
- ├── layers/ # Neural network layers
24
- │ ├── dense.js # Fully connected layer
25
- │ ├── dropout.js # Dropout layer
26
- │ └── flatten.js # Flatten input layer
27
- ├── act/ # Activation functions
28
- │ ├── linear.js # Additional activations (leakyRelu, tanh)
29
- │ └── relu.js # ReLU, Sigmoid
30
- ├── utils/ # Utilities
31
- │ ├── io.js # Save/load models
32
- │ ├── math.js # Math helpers (dot, add, etc.)
33
- │ └── tensor.js # Tensor helpers
34
- ├── train/ # Training loop
35
- │ └── loop.js
36
- └── optim/ # Optimizers
37
- └── sgd.js # Custom SGD optimizer
38
- ```
39
-
40
- ## Patch Notes - Version 1.1.0
41
- - Refactored activation system into `act/` folder for better separation.
42
- - Added `linear.js` activations (leakyRelu, tanh).
43
- - Updated `Dense` layer with `forwardBatch` and `forwardLeaky` support.
44
- - Added `Flatten` and `Dropout` layers.
45
- - Implemented batch prediction support in `Seq` model.
46
- - Added basic SGD optimizer in `optim/`.
47
- - Introduced training loop in `train/loop.js`.
48
- - Expanded utils: tensor creation (`zeros`, `rand`), math operations (`dotBatch`, `addScalar`).
49
- - Enhanced model IO with activations mapping and optional pretty-print.
50
- - Added comments throughout files for better clarity and professional documentation.
51
-
52
- ## Example Usage
53
- ```javascript
54
- import { Seq } from './models/Seq.js';
55
- import { Dense } from './layers/dense.js';
56
- import * as act from './act/linear.js';
57
- import { SGD } from './optim/sgd.js';
58
- import { train } from './train/loop.js';
59
-
60
- const model = new Seq();
61
- model.add(new Dense(3, 5, act.leakyRelu));
62
- model.add(new Dense(5, 1));
63
-
64
- const optimizer = new SGD(0.01);
65
-
66
- train(model, [[1,2,3]], [[1]], (pred, y) => pred[0]-y[0], optimizer, 10);
67
- model.summary();
68
- ```
69
-
70
- ## Notes
71
- - Layer and function names are kept short and professional for clarity.
72
- - Batch methods available in layers and model for better training performance.
73
- - Designed to be an educational and extensible PyTorch-like system in pure JavaScript.
package/act/linear.js DELETED
@@ -1,8 +0,0 @@
1
- // Additional activations
2
- export function leakyRelu(x, alpha = 0.01) {
3
- return x.map(v => (v > 0 ? v : alpha * v));
4
- }
5
-
6
- export function tanh(x) {
7
- return x.map(v => Math.tanh(v));
8
- }
package/act/relu.js DELETED
@@ -1,10 +0,0 @@
1
- // Activation functions
2
- export function relu(x) {
3
- // Rectified Linear Unit
4
- return x.map(v => Math.max(0, v));
5
- }
6
-
7
- export function sigmoid(x) {
8
- // Sigmoid activation
9
- return x.map(v => 1 / (1 + Math.exp(-v)));
10
- }
package/example.js DELETED
@@ -1,23 +0,0 @@
1
- // example.js
2
- import { Seq, Dense, act, SGD, train } from './index.js';
3
-
4
- // Buat model
5
- const model = new Seq();
6
- model.add(new Dense(3, 5, act.leakyRelu));
7
- model.add(new Dense(5, 1));
8
-
9
- // Buat optimizer
10
- const optimizer = new SGD(0.01);
11
-
12
- // Contoh data training
13
- const x = [[1, 2, 3]];
14
- const y = [[1]];
15
-
16
- // Loss function sederhana
17
- const lossFn = (pred, target) => pred[0] - target[0];
18
-
19
- // Train model
20
- train(model, x, y, lossFn, optimizer, 10);
21
-
22
- // Tampilkan summary
23
- model.summary();
package/layers/dense.js DELETED
@@ -1,28 +0,0 @@
1
- import { dot, add } from '../utils/math.js';
2
- import { leakyRelu } from '../act/linear.js';
3
-
4
- // Fully connected layer
5
- export class Dense {
6
- constructor(inputSize, outputSize, activation = x => x) {
7
- this.weights = Array.from({ length: outputSize }, () =>
8
- Array.from({ length: inputSize }, () => Math.random() * 0.1)
9
- );
10
- this.bias = Array(outputSize).fill(0);
11
- this.activation = activation;
12
- this.name = "Dense";
13
- }
14
-
15
- forward(input) {
16
- const z = add(dot(this.weights, input), this.bias);
17
- return z.map(this.activation);
18
- }
19
-
20
- forwardBatch(inputs) {
21
- return inputs.map(input => this.forward(input));
22
- }
23
-
24
- forwardLeaky(input) {
25
- const z = add(dot(this.weights, input), this.bias);
26
- return leakyRelu(z);
27
- }
28
- }
package/layers/dropout.js DELETED
@@ -1,15 +0,0 @@
1
- // Dropout layer for regularization
2
- export class Dropout {
3
- constructor(p = 0.5) {
4
- this.p = p;
5
- this.name = "Dropout";
6
- }
7
-
8
- forward(input) {
9
- return input.map(v => (Math.random() < this.p ? 0 : v / (1 - this.p)));
10
- }
11
-
12
- forwardBatch(inputs) {
13
- return inputs.map(inp => this.forward(inp));
14
- }
15
- }
package/layers/flatten.js DELETED
@@ -1,14 +0,0 @@
1
- // Flatten input for Dense layers
2
- export class Flatten {
3
- constructor() {
4
- this.name = "Flatten";
5
- }
6
-
7
- forward(input) {
8
- return input.flat();
9
- }
10
-
11
- forwardBatch(inputs) {
12
- return inputs.map(inp => inp.flat());
13
- }
14
- }
package/models/Seq.js DELETED
@@ -1,25 +0,0 @@
1
- // Sequential model container
2
- export class Seq {
3
- constructor() {
4
- this.layers = [];
5
- }
6
-
7
- add(layer) {
8
- this.layers.push(layer);
9
- }
10
-
11
- predict(input) {
12
- return this.layers.reduce((out, layer) => layer.forward(out), input);
13
- }
14
-
15
- predictBatch(inputs) {
16
- return inputs.map(input => this.predict(input));
17
- }
18
-
19
- summary() {
20
- console.log("Model summary:");
21
- this.layers.forEach((l, i) => {
22
- console.log(` Layer ${i+1}: ${l.name}, outputSize: ${l.weights ? l.weights.length : 'N/A'}`);
23
- });
24
- }
25
- }
package/optim/sgd.js DELETED
@@ -1,12 +0,0 @@
1
- // Custom SGD optimizer
2
- export class SGD {
3
- constructor(lr = 0.01) {
4
- this.lr = lr;
5
- }
6
-
7
- step(params, grads) {
8
- for (let i = 0; i < params.length; i++) {
9
- params[i] = params[i].map((v,j) => v - this.lr * grads[i][j]);
10
- }
11
- }
12
- }
package/train/loop.js DELETED
@@ -1,28 +0,0 @@
1
- // Training loop
2
- export function train(model, X, Y, lossFn, optimizer, epochs=None) {
3
- for(let e=0; e<epochs; e++) {
4
- let totalLoss = 0;
5
-
6
- for(let i=0; i<X.length; i++) {
7
- const x = X[i];
8
- const y = Y[i];
9
-
10
- // Forward pass
11
- const pred = model.predict(x);
12
-
13
- // Compute loss
14
- const loss = lossFn(pred, y);
15
- totalLoss += loss;
16
-
17
- // Naive gradient (placeholder)
18
- const grads = pred.map((p,j) => [p - y[j]]);
19
-
20
- // Update first Dense layer as example
21
- if(model.layers[0].weights) {
22
- optimizer.step([model.layers[0].weights], grads);
23
- }
24
- }
25
-
26
- console.log(`Epoch ${e+1}/${epochs}, Loss: ${totalLoss/X.length}`);
27
- }
28
- }
package/utils/io.js DELETED
@@ -1,28 +0,0 @@
1
- // Save and load model
2
- export function saveModel(model, pretty=false) {
3
- const data = {
4
- layers: model.layers.map(l => ({
5
- name: l.name,
6
- weights: l.weights,
7
- bias: l.bias,
8
- activation: l.activation?.name || null
9
- }))
10
- };
11
- return pretty ? JSON.stringify(data, null, 2) : JSON.stringify(data);
12
- }
13
-
14
- export function loadModel(json, ModelClass, LayerClass, actMap={}) {
15
- const data = typeof json === 'string' ? JSON.parse(json) : json;
16
- const model = new ModelClass();
17
-
18
- data.layers.forEach(ld => {
19
- const l = new LayerClass(0,0);
20
- l.weights = ld.weights;
21
- l.bias = ld.bias;
22
- l.name = ld.name;
23
- if(ld.activation && actMap[ld.activation]) l.activation = actMap[ld.activation];
24
- model.add(l);
25
- });
26
-
27
- return model;
28
- }
package/utils/math.js DELETED
@@ -1,18 +0,0 @@
1
- // Basic math utils
2
- export function dot(weights, input) {
3
- return weights.map(row =>
4
- row.reduce((sum, val, i) => sum + val * input[i], 0)
5
- );
6
- }
7
-
8
- export function add(a, b) {
9
- return a.map((v, i) => v + b[i]);
10
- }
11
-
12
- export function dotBatch(weights, inputs) {
13
- return inputs.map(input => dot(weights, input));
14
- }
15
-
16
- export function addScalar(arr, scalar) {
17
- return arr.map(v => v + scalar);
18
- }
package/utils/tensor.js DELETED
@@ -1,13 +0,0 @@
1
- // Tensor helpers
2
- export function zeros(size) {
3
- return Array(size).fill(0);
4
- }
5
-
6
- export function rand(size, scale=0.1) {
7
- return Array(size).fill(0).map(()=>Math.random()*scale);
8
- }
9
-
10
- export function shape(tensor) {
11
- if(Array.isArray(tensor[0])) return [tensor.length, tensor[0].length];
12
- return [tensor.length];
13
- }