mini-jstorch 1.3.2 → 1.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +46 -36
- package/index.js +2 -90
- package/package.json +1 -1
- package/src/MainEngine.js +44 -5
- package/tests/MakeModel.js +29 -561
- package/tests/scheduler.js +23 -0
- package/MODULE.md +0 -41
- package/hh.js +0 -38
- package/tests/DebugModel.js +0 -127
- package/tests/unit/newver.js +0 -103
- package/tests/unit/ogver.js +0 -87
package/README.md
CHANGED
|
@@ -1,39 +1,42 @@
|
|
|
1
1
|
# Mini-JSTorch
|
|
2
2
|
|
|
3
|
-
A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices
|
|
3
|
+
A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices, inspired by PyTorch.
|
|
4
4
|
|
|
5
5
|
## Overview
|
|
6
6
|
|
|
7
|
-
Mini-JSTorch is a
|
|
7
|
+
Mini-JSTorch is a high-performance, minimalist JavaScript library for building neural networks. It runs efficiently in both frontend and backend environments, including low-end devices. The library enables quick experimentation and learning in AI without compromising stability, accuracy, or training reliability.
|
|
8
8
|
|
|
9
|
-
This release, **version 1.
|
|
9
|
+
This release, **version 1.4.3**, we introduces **learning rate schedulers**, improved testing/demo templates, and other minor enhancements.
|
|
10
10
|
|
|
11
11
|
---
|
|
12
12
|
|
|
13
|
-
##
|
|
14
|
-
|
|
15
|
-
-
|
|
16
|
-
- **
|
|
17
|
-
-
|
|
18
|
-
-
|
|
19
|
-
-
|
|
20
|
-
-
|
|
21
|
-
-
|
|
22
|
-
-
|
|
23
|
-
-
|
|
24
|
-
-
|
|
25
|
-
-
|
|
13
|
+
## Feature Highlights
|
|
14
|
+
|
|
15
|
+
- **Learning Rate Schedulers:** New `StepLR` and `LambdaLR` for dynamic optimizer learning rate adjustment.
|
|
16
|
+
- **Full Conv2D support:** Forward and backward operations for convolutional layers.
|
|
17
|
+
- **Tensor operations:** Broadcasting, reshaping, and reduction utilities.
|
|
18
|
+
- **Advanced Activations:** Includes `LeakyReLU`, `GELU`, `Mish`, `SiLU`, `ELU`, and more.
|
|
19
|
+
- **Optimizers:** `Adam` and `SGD` with gradient updates.
|
|
20
|
+
- **Dropout Layer:** For regularization during training.
|
|
21
|
+
- **BatchNorm2D:** For stable training in convolutional models.
|
|
22
|
+
- **Tensor Manipulation:** Utilities like `flatten`, `stack`, `concat`, `eye`, `reshape`.
|
|
23
|
+
- **Model Save & Load:** Easy persistence and restore of models.
|
|
24
|
+
- **Test/Demo Templates:** The `tests/` folder provides ready-to-run examples for model building and feature usage.
|
|
25
|
+
- **Performance Optimized:** Suitable for both frontend and backend usage.
|
|
26
|
+
- **Backward Compatibility:** Maintained for core layers and activations.
|
|
26
27
|
|
|
27
28
|
---
|
|
28
29
|
|
|
29
|
-
## Features
|
|
30
|
+
## Core Features
|
|
30
31
|
|
|
31
|
-
- **
|
|
32
|
-
- **Activations
|
|
33
|
-
- **Loss Functions
|
|
34
|
-
- **Optimizers
|
|
35
|
-
- **
|
|
36
|
-
- **
|
|
32
|
+
- **Layers:** Linear, Conv2D
|
|
33
|
+
- **Activations:** ReLU, Sigmoid, Tanh, LeakyReLU, GELU, Mish, SiLU, ELU
|
|
34
|
+
- **Loss Functions:** MSELoss, CrossEntropyLoss
|
|
35
|
+
- **Optimizers:** Adam, SGD
|
|
36
|
+
- **Schedulers:** StepLR, LambdaLR
|
|
37
|
+
- **Regularization:** Dropout, BatchNorm2D
|
|
38
|
+
- **Utilities:** zeros, randomMatrix, softmax, crossEntropy, dot, addMatrices, reshape, stack, flatten, eye, concat
|
|
39
|
+
- **Model Container:** Sequential (for stacking layers with forward/backward passes)
|
|
37
40
|
|
|
38
41
|
---
|
|
39
42
|
|
|
@@ -49,7 +52,7 @@ npm install mini-jstorch
|
|
|
49
52
|
## Quick Start Example
|
|
50
53
|
|
|
51
54
|
```javascript
|
|
52
|
-
import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam } from '
|
|
55
|
+
import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam, StepLR } from 'mini-jstorch';
|
|
53
56
|
|
|
54
57
|
// Build model
|
|
55
58
|
const model = new Sequential([
|
|
@@ -70,6 +73,7 @@ const Y = [
|
|
|
70
73
|
// Loss & optimizer
|
|
71
74
|
const lossFn = new CrossEntropyLoss();
|
|
72
75
|
const optimizer = new Adam(model.parameters(), 0.1);
|
|
76
|
+
const scheduler = new StepLR(optimizer, 20, 0.5); // Halve LR every 20 epochs
|
|
73
77
|
|
|
74
78
|
// Training loop
|
|
75
79
|
for (let epoch = 1; epoch <= 100; epoch++) {
|
|
@@ -78,7 +82,8 @@ for (let epoch = 1; epoch <= 100; epoch++) {
|
|
|
78
82
|
const gradLoss = lossFn.backward();
|
|
79
83
|
model.backward(gradLoss);
|
|
80
84
|
optimizer.step();
|
|
81
|
-
|
|
85
|
+
scheduler.step();
|
|
86
|
+
if (epoch % 20 === 0) console.log(`Epoch ${epoch}, Loss: ${loss.toFixed(4)}, LR: ${optimizer.lr.toFixed(4)}`);
|
|
82
87
|
}
|
|
83
88
|
|
|
84
89
|
// Prediction
|
|
@@ -94,7 +99,7 @@ predTest.forEach((p,i) => {
|
|
|
94
99
|
## Save & Load Models
|
|
95
100
|
|
|
96
101
|
```javascript
|
|
97
|
-
import { saveModel, loadModel } from '
|
|
102
|
+
import { saveModel, loadModel, Sequential } from './src/MainEngine.js';
|
|
98
103
|
|
|
99
104
|
const json = saveModel(model);
|
|
100
105
|
const model2 = new Sequential([...]); // same architecture
|
|
@@ -103,7 +108,22 @@ loadModel(model2, json);
|
|
|
103
108
|
|
|
104
109
|
---
|
|
105
110
|
|
|
111
|
+
## Demos & Testing
|
|
112
|
+
|
|
113
|
+
Check the `tests/` directory for ready-to-run demos:
|
|
114
|
+
- **tests/MakeModel.js:** Build and run a simple neural network.
|
|
115
|
+
- **tests/scheduler.js:** Experiment with learning rate schedulers.
|
|
116
|
+
- Add your own scripts for quick prototyping!
|
|
117
|
+
|
|
118
|
+
```bash
|
|
119
|
+
node tests/MakeModel.js
|
|
120
|
+
node tests/scheduler.js
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
---
|
|
124
|
+
|
|
106
125
|
## Intended Use Cases
|
|
126
|
+
|
|
107
127
|
- Rapid prototyping of neural networks in frontend and backend.
|
|
108
128
|
- Learning and teaching foundational neural network concepts.
|
|
109
129
|
- Experimentation on low-end devices or mobile browsers.
|
|
@@ -114,13 +134,3 @@ loadModel(model2, json);
|
|
|
114
134
|
# License
|
|
115
135
|
|
|
116
136
|
**MIT © 2025 Rizal**
|
|
117
|
-
|
|
118
|
-
---
|
|
119
|
-
|
|
120
|
-
## Facts
|
|
121
|
-
|
|
122
|
-
- **This module is implemented entirely in pure JavaScript.**
|
|
123
|
-
- **The `Dummy` folder contains modules used for development, testing, and debugging before integration into the main engine.**
|
|
124
|
-
- **files startup.cpu is actually an some random files lol.**
|
|
125
|
-
- **This module was created by a `single` developer.**
|
|
126
|
-
- **You can join to the `mjs-group` Organization on my profile!**
|
package/index.js
CHANGED
|
@@ -1,90 +1,2 @@
|
|
|
1
|
-
//
|
|
2
|
-
|
|
3
|
-
// =====================================================
|
|
4
|
-
|
|
5
|
-
// Import all modules
|
|
6
|
-
import { Tensor } from './src/tensor.js';
|
|
7
|
-
import {
|
|
8
|
-
Linear, Dense, Conv2d, MaxPool2d, AvgPool2d, Flatten, Dropout,
|
|
9
|
-
BatchNorm2d, ReLU, Sigmoid, Tanh, LeakyReLU, ELU, Softmax, Sequential
|
|
10
|
-
} from './src/layers.js';
|
|
11
|
-
import { MSELoss, CrossEntropyLoss, BCELoss } from './src/loss.js';
|
|
12
|
-
import { SGD, Adam, RMSprop } from './src/optimizers.js';
|
|
13
|
-
import { Model, Trainer, models, optimizers, losses, layers, tensors, utils } from './src/model.js';
|
|
14
|
-
|
|
15
|
-
// Export everything
|
|
16
|
-
export {
|
|
17
|
-
// Core classes
|
|
18
|
-
Tensor,
|
|
19
|
-
Model,
|
|
20
|
-
Trainer,
|
|
21
|
-
|
|
22
|
-
// Layer classes
|
|
23
|
-
Linear,
|
|
24
|
-
Dense,
|
|
25
|
-
Conv2d,
|
|
26
|
-
MaxPool2d,
|
|
27
|
-
AvgPool2d,
|
|
28
|
-
Flatten,
|
|
29
|
-
Dropout,
|
|
30
|
-
BatchNorm2d,
|
|
31
|
-
ReLU,
|
|
32
|
-
Sigmoid,
|
|
33
|
-
Tanh,
|
|
34
|
-
LeakyReLU,
|
|
35
|
-
ELU,
|
|
36
|
-
Softmax,
|
|
37
|
-
Sequential,
|
|
38
|
-
|
|
39
|
-
// Loss classes
|
|
40
|
-
MSELoss,
|
|
41
|
-
CrossEntropyLoss,
|
|
42
|
-
BCELoss,
|
|
43
|
-
|
|
44
|
-
// Optimizer classes
|
|
45
|
-
SGD,
|
|
46
|
-
Adam,
|
|
47
|
-
RMSprop,
|
|
48
|
-
|
|
49
|
-
// Factory functions
|
|
50
|
-
models,
|
|
51
|
-
optimizers,
|
|
52
|
-
losses,
|
|
53
|
-
layers,
|
|
54
|
-
tensors,
|
|
55
|
-
utils
|
|
56
|
-
};
|
|
57
|
-
|
|
58
|
-
// Default export
|
|
59
|
-
export default {
|
|
60
|
-
Tensor,
|
|
61
|
-
Model,
|
|
62
|
-
Trainer,
|
|
63
|
-
Linear,
|
|
64
|
-
Dense,
|
|
65
|
-
Conv2d,
|
|
66
|
-
MaxPool2d,
|
|
67
|
-
AvgPool2d,
|
|
68
|
-
Flatten,
|
|
69
|
-
Dropout,
|
|
70
|
-
BatchNorm2d,
|
|
71
|
-
ReLU,
|
|
72
|
-
Sigmoid,
|
|
73
|
-
Tanh,
|
|
74
|
-
LeakyReLU,
|
|
75
|
-
ELU,
|
|
76
|
-
Softmax,
|
|
77
|
-
Sequential,
|
|
78
|
-
MSELoss,
|
|
79
|
-
CrossEntropyLoss,
|
|
80
|
-
BCELoss,
|
|
81
|
-
SGD,
|
|
82
|
-
Adam,
|
|
83
|
-
RMSprop,
|
|
84
|
-
models,
|
|
85
|
-
optimizers,
|
|
86
|
-
losses,
|
|
87
|
-
layers,
|
|
88
|
-
tensors,
|
|
89
|
-
utils
|
|
90
|
-
};
|
|
1
|
+
// package root
|
|
2
|
+
export * from "./src/MainEngine.js";
|
package/package.json
CHANGED
package/src/MainEngine.js
CHANGED
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
|
|
2
|
-
// MINI
|
|
3
|
-
//
|
|
4
|
-
//
|
|
5
|
-
// IMPORTANT: CORE ENGINE DO NOT EDIT THIS FILES VERY SENSITIVE IT WILL CRASHING YOUR ENGINE SYSTEMS!
|
|
2
|
+
// OFFICIAL MINI-JSTORCH ENGINE
|
|
3
|
+
// LICENSED UNDER MIT LICENSE
|
|
4
|
+
// MIT (C) Rizal 2025
|
|
6
5
|
|
|
7
6
|
// ---------------------- Utilities ----------------------
|
|
8
7
|
function zeros(rows, cols) { return Array.from({length:rows},()=>Array(cols).fill(0)); }
|
|
@@ -225,6 +224,46 @@ export class Adam{
|
|
|
225
224
|
}
|
|
226
225
|
}
|
|
227
226
|
|
|
227
|
+
// ---------------------- Learning Rate Schedulers ----------------------
|
|
228
|
+
export class StepLR {
|
|
229
|
+
constructor(optimizer, step_size, gamma=1.0) {
|
|
230
|
+
this.optimizer = optimizer;
|
|
231
|
+
this.step_size = step_size;
|
|
232
|
+
this.gamma = gamma;
|
|
233
|
+
this.last_epoch = 0;
|
|
234
|
+
this.base_lr = optimizer.lr;
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
step() {
|
|
238
|
+
this.last_epoch += 1;
|
|
239
|
+
if (this.last_epoch % this.step_size === 0) {
|
|
240
|
+
this.optimizer.lr *= this.gamma;
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
get_lr() {
|
|
245
|
+
return this.optimizer.lr;
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
export class LambdaLR {
|
|
250
|
+
constructor(optimizer, lr_lambda) {
|
|
251
|
+
this.optimizer = optimizer;
|
|
252
|
+
this.lr_lambda = lr_lambda;
|
|
253
|
+
this.last_epoch = 0;
|
|
254
|
+
this.base_lr = optimizer.lr;
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
step() {
|
|
258
|
+
this.last_epoch += 1;
|
|
259
|
+
this.optimizer.lr = this.base_lr * this.lr_lambda(this.last_epoch);
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
get_lr() {
|
|
263
|
+
return this.optimizer.lr;
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
|
|
228
267
|
// ---------------------- ELU Activation ----------------------
|
|
229
268
|
export class ELU {
|
|
230
269
|
constructor(alpha=1.0) {
|
|
@@ -551,7 +590,7 @@ export function stack(tensors){ return tensors.map(t=>t.data); }
|
|
|
551
590
|
export function eye(n){ return Array.from({length:n},(_,i)=>Array.from({length:n},(_,j)=>i===j?1:0)); }
|
|
552
591
|
export function concat(a,b,axis=0){ /* concat along axis */ if(axis===0) return [...a,...b]; if(axis===1) return a.map((row,i)=>[...row,...b[i]]); }
|
|
553
592
|
export function reshape(tensor, rows, cols) {
|
|
554
|
-
let flat = tensor.data.flat(); // flatten
|
|
593
|
+
let flat = tensor.data.flat(); // flatten
|
|
555
594
|
if(flat.length < rows*cols) throw new Error("reshape size mismatch");
|
|
556
595
|
const out = Array.from({length: rows}, (_, i) =>
|
|
557
596
|
flat.slice(i*cols, i*cols + cols)
|
package/tests/MakeModel.js
CHANGED
|
@@ -1,570 +1,38 @@
|
|
|
1
|
-
//
|
|
2
|
-
import MiniJSTorch, { Tensor } from '../testdummy.js';
|
|
1
|
+
// Example: Build and run a simple neural network model using mini-jstorch
|
|
3
2
|
|
|
4
|
-
|
|
5
|
-
const torch = new MiniJSTorch();
|
|
6
|
-
let testsPassed = 0;
|
|
7
|
-
let testsFailed = 0;
|
|
3
|
+
import { Sequential, Linear, ReLU, MSELoss, SGD, Tensor } from "../src/MainEngine.js";
|
|
8
4
|
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
testsFailed++;
|
|
13
|
-
} else {
|
|
14
|
-
console.log(`✅ PASS: ${message}`);
|
|
15
|
-
testsPassed++;
|
|
16
|
-
}
|
|
17
|
-
}
|
|
5
|
+
// Create dummy input and target data
|
|
6
|
+
const input = new Tensor([[0.5, -1.0], [1.5, 2.0]]); // shape: [2,2]
|
|
7
|
+
const target = new Tensor([[1.0, 0.0], [0.0, 1.0]]); // shape: [2,2]
|
|
18
8
|
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
9
|
+
// Build a simple model: Linear -> ReLU -> Linear
|
|
10
|
+
const model = new Sequential([
|
|
11
|
+
new Linear(2, 4),
|
|
12
|
+
new ReLU(),
|
|
13
|
+
new Linear(4, 2)
|
|
14
|
+
]);
|
|
22
15
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
console.log(`✅ Passed: ${testsPassed}`);
|
|
26
|
-
console.log(`❌ Failed: ${testsFailed}`);
|
|
27
|
-
console.log(`📈 Success Rate: ${((testsPassed / (testsPassed + testsFailed)) * 100).toFixed(1)}%`);
|
|
28
|
-
}
|
|
16
|
+
const criterion = new MSELoss();
|
|
17
|
+
const optimizer = new SGD(model.parameters(), 0.01);
|
|
29
18
|
|
|
30
|
-
//
|
|
31
|
-
|
|
19
|
+
// Forward pass
|
|
20
|
+
const output = model.forward(input.data);
|
|
21
|
+
console.log("Model output:", output);
|
|
32
22
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
assert(t1.shape[0] === 2 && t1.shape[1] === 2, "Tensor shape creation");
|
|
37
|
-
assert(t1.data[0] === 1 && t1.data[3] === 4, "Tensor data initialization");
|
|
23
|
+
// Compute loss
|
|
24
|
+
const loss = criterion.forward(output, target.data);
|
|
25
|
+
console.log("Loss:", loss);
|
|
38
26
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
assert(sum.data[0] === 6 && sum.data[3] === 12, "Tensor addition");
|
|
27
|
+
// Backward pass
|
|
28
|
+
const grad = criterion.backward();
|
|
29
|
+
model.backward(grad);
|
|
43
30
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
31
|
+
// Optimizer step
|
|
32
|
+
optimizer.step();
|
|
33
|
+
console.log("Parameters updated!");
|
|
47
34
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
// Division
|
|
53
|
-
const div = t2.div(t1);
|
|
54
|
-
assert(Math.abs(div.data[0] - 5) < 0.001, "Tensor division");
|
|
55
|
-
|
|
56
|
-
// Matrix multiplication
|
|
57
|
-
const matmul = t1.matmul(t2);
|
|
58
|
-
assert(Math.abs(matmul.data[0] - 19) < 0.001, "Matrix multiplication");
|
|
59
|
-
|
|
60
|
-
// Activation functions
|
|
61
|
-
const relu = t1.relu();
|
|
62
|
-
assert(relu.data[0] === 1 && relu.data[1] === 2, "ReLU activation");
|
|
63
|
-
|
|
64
|
-
const sigmoid = new Tensor([-1, 0, 1]).sigmoid();
|
|
65
|
-
assert(sigmoid.data[0] < 0.5 && sigmoid.data[1] === 0.5 && sigmoid.data[2] > 0.5, "Sigmoid activation");
|
|
66
|
-
|
|
67
|
-
const tanh = new Tensor([-1, 0, 1]).tanh();
|
|
68
|
-
assert(tanh.data[0] < 0 && tanh.data[1] === 0 && tanh.data[2] > 0, "Tanh activation");
|
|
69
|
-
|
|
70
|
-
// Reshape
|
|
71
|
-
const reshaped = t1.reshape([4]);
|
|
72
|
-
assert(reshaped.shape[0] === 4 && reshaped.data[2] === 3, "Tensor reshape");
|
|
73
|
-
|
|
74
|
-
// Transpose
|
|
75
|
-
const transposed = t1.transpose();
|
|
76
|
-
assert(transposed.shape[0] === 2 && transposed.shape[1] === 2, "Tensor transpose");
|
|
77
|
-
assert(transposed.data[1] === 3 && transposed.data[2] === 2, "Tensor transpose values");
|
|
78
|
-
|
|
79
|
-
// Sum and mean
|
|
80
|
-
const sumAll = t1.sum();
|
|
81
|
-
assert(sumAll.data[0] === 10, "Tensor sum all");
|
|
82
|
-
|
|
83
|
-
const meanAll = t1.mean();
|
|
84
|
-
assert(Math.abs(meanAll.data[0] - 2.5) < 0.1, "Tensor mean all");
|
|
85
|
-
|
|
86
|
-
// Memory management
|
|
87
|
-
const memoryStats = Tensor.memoryTracker.getStats();
|
|
88
|
-
assert(memoryStats.tensorCount > 0, "Memory tracking");
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
// ====================== LAYERS TESTS ======================
|
|
92
|
-
testSuite("Layers");
|
|
93
|
-
|
|
94
|
-
function testLayers() {
|
|
95
|
-
// Linear layer
|
|
96
|
-
const linear = torch.layers.linear(3, 2);
|
|
97
|
-
const input = new Tensor([1, 2, 3], [1, 3]);
|
|
98
|
-
const output = linear.forward(input);
|
|
99
|
-
assert(output.shape[0] === 1 && output.shape[1] === 2, "Linear layer forward pass");
|
|
100
|
-
|
|
101
|
-
// Conv2D layer
|
|
102
|
-
const conv = torch.layers.conv2d(3, 16, 3);
|
|
103
|
-
const convInput = new Tensor(new Float32Array(1 * 3 * 32 * 32), [1, 3, 32, 32]);
|
|
104
|
-
const convOutput = conv.forward(convInput);
|
|
105
|
-
assert(convOutput.shape[0] === 1 && convOutput.shape[1] === 16, "Conv2D layer forward pass");
|
|
106
|
-
|
|
107
|
-
// Activation layers
|
|
108
|
-
const reluLayer = torch.layers.relu();
|
|
109
|
-
const reluOutput = reluLayer.forward(new Tensor([-1, 0, 1]));
|
|
110
|
-
assert(reluOutput.data[0] === 0 && reluOutput.data[2] === 1, "ReLU layer");
|
|
111
|
-
|
|
112
|
-
const sigmoidLayer = torch.layers.sigmoid();
|
|
113
|
-
const sigmoidOutput = sigmoidLayer.forward(new Tensor([-1, 0, 1]));
|
|
114
|
-
assert(sigmoidOutput.data[0] < 0.5 && sigmoidOutput.data[1] === 0.5, "Sigmoid layer");
|
|
115
|
-
|
|
116
|
-
const tanhLayer = torch.layers.tanh();
|
|
117
|
-
const tanhOutput = tanhLayer.forward(new Tensor([-1, 0, 1]));
|
|
118
|
-
assert(tanhOutput.data[0] < 0 && tanhOutput.data[1] === 0, "Tanh layer");
|
|
119
|
-
|
|
120
|
-
// Dropout layer
|
|
121
|
-
const dropout = torch.layers.dropout(0.5);
|
|
122
|
-
dropout.train();
|
|
123
|
-
const dropoutInput = new Tensor([1, 2, 3, 4]);
|
|
124
|
-
const dropoutOutput = dropout.forward(dropoutInput);
|
|
125
|
-
assert(dropoutOutput.data.some(val => val === 0), "Dropout layer in train mode");
|
|
126
|
-
|
|
127
|
-
dropout.eval();
|
|
128
|
-
const dropoutOutputEval = dropout.forward(dropoutInput);
|
|
129
|
-
assert(dropoutOutputEval.data[0] === 1 && dropoutOutputEval.data[3] === 4, "Dropout layer in eval mode");
|
|
130
|
-
|
|
131
|
-
// BatchNorm2D
|
|
132
|
-
const bn = torch.layers.batchNorm2d(16);
|
|
133
|
-
bn.train();
|
|
134
|
-
const bnInput = new Tensor(new Float32Array(2 * 16 * 8 * 8), [2, 16, 8, 8]);
|
|
135
|
-
const bnOutput = bn.forward(bnInput);
|
|
136
|
-
assert(bnOutput.shape[0] === 2 && bnOutput.shape[1] === 16, "BatchNorm2D forward pass");
|
|
137
|
-
|
|
138
|
-
// LSTM
|
|
139
|
-
const lstm = torch.layers.lstm(10, 20, 2);
|
|
140
|
-
const lstmInput = new Tensor(new Float32Array(5 * 3 * 10), [5, 3, 10]);
|
|
141
|
-
const lstmOutput = lstm.forward(lstmInput);
|
|
142
|
-
assert(lstmOutput.output.shape[0] === 5 && lstmOutput.output.shape[2] === 20, "LSTM forward pass");
|
|
143
|
-
|
|
144
|
-
// MultiHeadAttention
|
|
145
|
-
const attn = torch.layers.attention(64, 8);
|
|
146
|
-
const attnInput = new Tensor(new Float32Array(2 * 10 * 64), [2, 10, 64]);
|
|
147
|
-
const attnOutput = attn.forward(attnInput, attnInput, attnInput);
|
|
148
|
-
assert(attnOutput.shape[0] === 2 && attnOutput.shape[2] === 64, "MultiHeadAttention forward pass");
|
|
149
|
-
|
|
150
|
-
// Transformer
|
|
151
|
-
const transformer = torch.layers.transformer(64, 8, 2);
|
|
152
|
-
const transformerOutput = transformer.forward(attnInput);
|
|
153
|
-
assert(transformerOutput.shape[0] === 2 && transformerOutput.shape[2] === 64, "Transformer forward pass");
|
|
154
|
-
|
|
155
|
-
// LayerNorm
|
|
156
|
-
const layerNorm = torch.layers.layerNorm(64);
|
|
157
|
-
const normInput = new Tensor(new Float32Array(2 * 10 * 64), [2, 10, 64]);
|
|
158
|
-
const normOutput = layerNorm.forward(normInput);
|
|
159
|
-
assert(normOutput.shape[0] === 2 && normOutput.shape[2] === 64, "LayerNorm forward pass");
|
|
160
|
-
}
|
|
161
|
-
|
|
162
|
-
// ====================== LOSS FUNCTIONS TESTS ======================
|
|
163
|
-
testSuite("Loss Functions");
|
|
164
|
-
|
|
165
|
-
function testLossFunctions() {
|
|
166
|
-
// MSE Loss
|
|
167
|
-
const mse = torch.loss.mse();
|
|
168
|
-
const pred = new Tensor([1, 2, 3, 4]);
|
|
169
|
-
const target = new Tensor([0, 2, 3, 5]);
|
|
170
|
-
const mseLoss = mse.forward(pred, target);
|
|
171
|
-
assert(Math.abs(mseLoss.data[0] - 0.5) < 0.001, "MSE Loss calculation");
|
|
172
|
-
|
|
173
|
-
// CrossEntropy Loss
|
|
174
|
-
const ce = torch.loss.crossEntropy();
|
|
175
|
-
const cePred = new Tensor([0.1, 0.9, 0.1, 0.9], [2, 2]);
|
|
176
|
-
const ceTarget = new Tensor([0, 1, 1, 0], [2, 2]);
|
|
177
|
-
const ceLoss = ce.forward(cePred, ceTarget);
|
|
178
|
-
assert(ceLoss.data[0] > 0, "CrossEntropy Loss calculation");
|
|
179
|
-
|
|
180
|
-
// Huber Loss
|
|
181
|
-
const huber = torch.loss.huber(1.0);
|
|
182
|
-
const huberLoss = huber.forward(pred, target);
|
|
183
|
-
assert(huberLoss.data[0] > 0, "Huber Loss calculation");
|
|
184
|
-
|
|
185
|
-
// Triplet Loss
|
|
186
|
-
const triplet = torch.loss.tripletLoss(1.0);
|
|
187
|
-
const anchor = new Tensor([1, 2]);
|
|
188
|
-
const positive = new Tensor([1.1, 2.1]);
|
|
189
|
-
const negative = new Tensor([3, 4]);
|
|
190
|
-
const tripletLoss = triplet.forward(anchor, positive, negative);
|
|
191
|
-
assert(tripletLoss.data[0] > 0, "Triplet Loss calculation");
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
// ====================== OPTIMIZERS TESTS ======================
|
|
195
|
-
testSuite("Optimizers");
|
|
196
|
-
|
|
197
|
-
function testOptimizers() {
|
|
198
|
-
// Create a simple model
|
|
199
|
-
const model = torch.nn.sequential([
|
|
200
|
-
torch.layers.linear(2, 3),
|
|
201
|
-
torch.layers.relu(),
|
|
202
|
-
torch.layers.linear(3, 1)
|
|
203
|
-
]);
|
|
204
|
-
|
|
205
|
-
// SGD
|
|
206
|
-
const sgd = torch.optim.sgd(0.01);
|
|
207
|
-
const params = model.getParameters();
|
|
208
|
-
const initialWeights = params[0].data[0];
|
|
209
|
-
|
|
210
|
-
sgd.step(params);
|
|
211
|
-
assert(params[0].data[0] !== initialWeights, "SGD parameter update");
|
|
212
|
-
|
|
213
|
-
// Adam
|
|
214
|
-
const adam = torch.optim.adam(0.001);
|
|
215
|
-
const adamInitialWeights = params[0].data[0];
|
|
216
|
-
|
|
217
|
-
adam.step(params);
|
|
218
|
-
assert(params[0].data[0] !== adamInitialWeights, "Adam parameter update");
|
|
219
|
-
|
|
220
|
-
// AdamW
|
|
221
|
-
const adamw = torch.optim.adamw(0.001, 0.9, 0.999, 0.01);
|
|
222
|
-
const adamwInitialWeights = params[0].data[0];
|
|
223
|
-
|
|
224
|
-
adamw.step(params);
|
|
225
|
-
assert(params[0].data[0] !== adamwInitialWeights, "AdamW parameter update");
|
|
226
|
-
|
|
227
|
-
// LAMB
|
|
228
|
-
const lamb = torch.optim.lamb(0.001);
|
|
229
|
-
const lambInitialWeights = params[0].data[0];
|
|
230
|
-
|
|
231
|
-
lamb.step(params);
|
|
232
|
-
assert(params[0].data[0] !== lambInitialWeights, "LAMB parameter update");
|
|
233
|
-
|
|
234
|
-
// RMSprop
|
|
235
|
-
const rmsprop = torch.optim.rmsprop(0.01);
|
|
236
|
-
const rmspropInitialWeights = params[0].data[0];
|
|
237
|
-
|
|
238
|
-
rmsprop.step(params);
|
|
239
|
-
assert(params[0].data[0] !== rmspropInitialWeights, "RMSprop parameter update");
|
|
240
|
-
}
|
|
241
|
-
|
|
242
|
-
// ====================== NEURAL NETWORK TESTS ======================
|
|
243
|
-
testSuite("Neural Network");
|
|
244
|
-
|
|
245
|
-
function testNeuralNetwork() {
|
|
246
|
-
// Sequential model
|
|
247
|
-
const model = torch.nn.sequential();
|
|
248
|
-
model.add(torch.layers.linear(4, 8));
|
|
249
|
-
model.add(torch.layers.relu());
|
|
250
|
-
model.add(torch.layers.dropout(0.2));
|
|
251
|
-
model.add(torch.layers.linear(8, 2));
|
|
252
|
-
|
|
253
|
-
const input = new Tensor([1, 2, 3, 4], [1, 4]);
|
|
254
|
-
const output = model.forward(input);
|
|
255
|
-
assert(output.shape[0] === 1 && output.shape[1] === 2, "Sequential model forward pass");
|
|
256
|
-
|
|
257
|
-
// Backward pass
|
|
258
|
-
const gradOutput = new Tensor([0.1, 0.2], [1, 2]);
|
|
259
|
-
const gradInput = model.backward(gradOutput);
|
|
260
|
-
assert(gradInput.shape[0] === 1 && gradInput.shape[1] === 4, "Sequential model backward pass");
|
|
261
|
-
|
|
262
|
-
// Get parameters
|
|
263
|
-
const params = model.getParameters();
|
|
264
|
-
assert(params.length > 0, "Sequential model parameter extraction");
|
|
265
|
-
|
|
266
|
-
// Test individual layer parameters
|
|
267
|
-
const linearLayer = torch.layers.linear(3, 2);
|
|
268
|
-
const linearParams = linearLayer.getParameters();
|
|
269
|
-
assert(linearParams.length === 2, "Linear layer parameter count");
|
|
270
|
-
}
|
|
271
|
-
|
|
272
|
-
// ====================== UTILS TESTS ======================
|
|
273
|
-
testSuite("Utils");
|
|
274
|
-
|
|
275
|
-
function testUtils() {
|
|
276
|
-
// DataLoader
|
|
277
|
-
const data = {
|
|
278
|
-
inputs: new Float32Array([1, 2, 3, 4, 5, 6, 7, 8]),
|
|
279
|
-
targets: new Float32Array([0, 1, 0, 1]),
|
|
280
|
-
inputShape: [2],
|
|
281
|
-
targetShape: [1]
|
|
282
|
-
};
|
|
283
|
-
|
|
284
|
-
const loader = torch.utils.dataLoader(data, 2, false);
|
|
285
|
-
const batches = [...loader];
|
|
286
|
-
assert(batches.length === 2, "DataLoader batch creation");
|
|
287
|
-
assert(batches[0].inputs.shape[0] === 2, "DataLoader batch size");
|
|
288
|
-
|
|
289
|
-
// One-hot encoding
|
|
290
|
-
const labels = [0, 2, 1];
|
|
291
|
-
const oneHot = torch.utils.oneHot(labels, 3);
|
|
292
|
-
assert(oneHot.shape[0] === 3 && oneHot.shape[1] === 3, "One-hot encoding shape");
|
|
293
|
-
assert(oneHot.data[0] === 1 && oneHot.data[4] === 1, "One-hot encoding values");
|
|
294
|
-
|
|
295
|
-
// Accuracy calculation
|
|
296
|
-
const pred = new Tensor([0.9, 0.1, 0.7, 0.3], [2, 2]);
|
|
297
|
-
const target = new Tensor([1, 0, 1, 0], [2, 2]);
|
|
298
|
-
const accuracy = torch.utils.accuracy(pred, target);
|
|
299
|
-
assert(accuracy === 1.0, "Accuracy calculation");
|
|
300
|
-
|
|
301
|
-
// Benchmark
|
|
302
|
-
const simpleModel = torch.nn.sequential([
|
|
303
|
-
torch.layers.linear(10, 5),
|
|
304
|
-
torch.layers.relu()
|
|
305
|
-
]);
|
|
306
|
-
const benchmarkInput = new Tensor(new Float32Array(10), [1, 10]);
|
|
307
|
-
const benchmark = torch.utils.benchmark(simpleModel, benchmarkInput, 10);
|
|
308
|
-
assert(benchmark.avgTime > 0, "Benchmark execution");
|
|
309
|
-
assert(benchmark.minTime > 0, "Benchmark min time");
|
|
310
|
-
assert(benchmark.maxTime > 0, "Benchmark max time");
|
|
311
|
-
|
|
312
|
-
// Profile function
|
|
313
|
-
const profileResult = torch.utils.profile(() => {
|
|
314
|
-
const x = new Tensor([1, 2, 3]);
|
|
315
|
-
return x.relu();
|
|
316
|
-
});
|
|
317
|
-
assert(profileResult !== undefined, "Profile function execution");
|
|
318
|
-
}
|
|
319
|
-
|
|
320
|
-
// ====================== ADVANCED FEATURES TESTS ======================
|
|
321
|
-
testSuite("Advanced Features");
|
|
322
|
-
|
|
323
|
-
function testAdvancedFeatures() {
|
|
324
|
-
// Quantization
|
|
325
|
-
const quant = torch.quant;
|
|
326
|
-
const tensor = new Tensor([0.1, 0.5, 0.9]);
|
|
327
|
-
const quantized = quant.quantize(tensor, 'int8');
|
|
328
|
-
assert(quantized.dtype === 'int8', "Tensor quantization");
|
|
329
|
-
|
|
330
|
-
const dequantized = quant.dequantize(quantized);
|
|
331
|
-
assert(dequantized.dtype === 'float32', "Tensor dequantization");
|
|
332
|
-
|
|
333
|
-
// Automatic Mixed Precision
|
|
334
|
-
const amp = torch.amp;
|
|
335
|
-
const ampModel = torch.nn.sequential([
|
|
336
|
-
torch.layers.linear(4, 2)
|
|
337
|
-
]);
|
|
338
|
-
const ampInput = new Tensor([1, 2, 3, 4], [1, 4]);
|
|
339
|
-
const ampOutput = amp.forward(ampModel, ampInput);
|
|
340
|
-
assert(ampOutput.dtype === 'float32', "AMP forward pass");
|
|
341
|
-
|
|
342
|
-
// Visualization
|
|
343
|
-
const viz = torch.viz;
|
|
344
|
-
viz.init();
|
|
345
|
-
viz.logLoss(new Tensor([0.5]));
|
|
346
|
-
viz.logAccuracy(0.8);
|
|
347
|
-
viz.logLearningRate(0.001);
|
|
348
|
-
const metrics = viz.getMetrics();
|
|
349
|
-
assert(metrics.loss.length > 0, "Visualization metrics logging");
|
|
350
|
-
|
|
351
|
-
// Learning Rate Schedulers
|
|
352
|
-
const optimizer = torch.optim.sgd(0.1);
|
|
353
|
-
const scheduler = new StepLR(optimizer, stepSize=2, gamma=0.1);
|
|
354
|
-
|
|
355
|
-
let lr = scheduler.step();
|
|
356
|
-
assert(lr === 0.1, "StepLR initial learning rate");
|
|
357
|
-
|
|
358
|
-
lr = scheduler.step();
|
|
359
|
-
assert(lr === 0.1, "StepLR before step");
|
|
360
|
-
|
|
361
|
-
lr = scheduler.step();
|
|
362
|
-
assert(lr === 0.01, "StepLR after step");
|
|
363
|
-
|
|
364
|
-
// ExponentialLR
|
|
365
|
-
const expScheduler = new ExponentialLR(optimizer, gamma=0.5);
|
|
366
|
-
lr = expScheduler.step();
|
|
367
|
-
assert(lr === 0.005, "ExponentialLR learning rate");
|
|
368
|
-
|
|
369
|
-
// CosineAnnealingLR
|
|
370
|
-
const cosScheduler = new CosineAnnealingLR(optimizer, T_max=10);
|
|
371
|
-
lr = cosScheduler.step();
|
|
372
|
-
assert(lr > 0 && lr < 0.1, "CosineAnnealingLR learning rate");
|
|
373
|
-
|
|
374
|
-
// Gradient Clipping
|
|
375
|
-
const params = [new Tensor([1, 2, 3], [3], true)];
|
|
376
|
-
params[0].grad = new Tensor([0.5, 1.5, 0.8]);
|
|
377
|
-
|
|
378
|
-
const norm = clipGradNorm_(params, 1.0);
|
|
379
|
-
assert(norm > 0, "Gradient norm calculation");
|
|
380
|
-
|
|
381
|
-
clipGradValue_(params, 1.0);
|
|
382
|
-
assert(params[0].grad.data[1] <= 1.0, "Gradient value clipping");
|
|
383
|
-
|
|
384
|
-
// Tensor map function (synchronous)
|
|
385
|
-
const mapTensor = new Tensor([1, 2, 3, 4]);
|
|
386
|
-
const mapped = mapTensor.map(x => x * 2);
|
|
387
|
-
assert(mapped.data[0] === 2 && mapped.data[3] === 8, "Tensor map function");
|
|
388
|
-
}
|
|
389
|
-
|
|
390
|
-
// ====================== INTEGRATION TESTS ======================
|
|
391
|
-
testSuite("Integration");
|
|
392
|
-
|
|
393
|
-
function testIntegration() {
|
|
394
|
-
// Complete training simulation
|
|
395
|
-
const model = torch.nn.sequential([
|
|
396
|
-
torch.layers.linear(4, 8),
|
|
397
|
-
torch.layers.relu(),
|
|
398
|
-
torch.layers.dropout(0.2),
|
|
399
|
-
torch.layers.linear(8, 2)
|
|
400
|
-
]);
|
|
401
|
-
|
|
402
|
-
const optimizer = torch.optim.adam(0.01);
|
|
403
|
-
const lossFn = torch.loss.mse();
|
|
404
|
-
const scheduler = new StepLR(optimizer, stepSize=10, gamma=0.1);
|
|
405
|
-
|
|
406
|
-
// Create dummy data
|
|
407
|
-
const data = {
|
|
408
|
-
inputs: new Float32Array(100 * 4),
|
|
409
|
-
targets: new Float32Array(100 * 2),
|
|
410
|
-
inputShape: [4],
|
|
411
|
-
targetShape: [2]
|
|
412
|
-
};
|
|
413
|
-
|
|
414
|
-
// Fill with random data
|
|
415
|
-
for (let i = 0; i < data.inputs.length; i++) {
|
|
416
|
-
data.inputs[i] = Math.random();
|
|
417
|
-
data.targets[i] = Math.random();
|
|
418
|
-
}
|
|
419
|
-
|
|
420
|
-
const loader = torch.utils.dataLoader(data, 10);
|
|
421
|
-
|
|
422
|
-
// Training loop
|
|
423
|
-
let initialLoss = Infinity;
|
|
424
|
-
let finalLoss = 0;
|
|
425
|
-
|
|
426
|
-
for (let epoch = 0; epoch < 3; epoch++) {
|
|
427
|
-
for (const batch of loader) {
|
|
428
|
-
// Forward
|
|
429
|
-
const output = model.forward(batch.inputs);
|
|
430
|
-
const loss = lossFn.forward(output, batch.targets);
|
|
431
|
-
|
|
432
|
-
if (epoch === 0) {
|
|
433
|
-
initialLoss = loss.data[0];
|
|
434
|
-
}
|
|
435
|
-
|
|
436
|
-
// Backward
|
|
437
|
-
lossFn.backward();
|
|
438
|
-
model.backward(loss.grad);
|
|
439
|
-
|
|
440
|
-
// Update
|
|
441
|
-
optimizer.step(model.getParameters());
|
|
442
|
-
model.getParameters().forEach(p => p.zeroGrad());
|
|
443
|
-
}
|
|
444
|
-
|
|
445
|
-
scheduler.step();
|
|
446
|
-
}
|
|
447
|
-
|
|
448
|
-
// Check final loss
|
|
449
|
-
const finalBatch = [...loader][0];
|
|
450
|
-
const finalOutput = model.forward(finalBatch.inputs);
|
|
451
|
-
const finalLossTensor = lossFn.forward(finalOutput, finalBatch.targets);
|
|
452
|
-
finalLoss = finalLossTensor.data[0];
|
|
453
|
-
|
|
454
|
-
assert(finalLoss < initialLoss, "Training reduces loss");
|
|
455
|
-
console.log(`Initial loss: ${initialLoss.toFixed(4)}, Final loss: ${finalLoss.toFixed(4)}`);
|
|
456
|
-
|
|
457
|
-
// Model saving/loading simulation
|
|
458
|
-
const modelData = {
|
|
459
|
-
layers: model.layers.map(layer => ({
|
|
460
|
-
type: layer.constructor.name,
|
|
461
|
-
params: layer.getParameters().map(param => ({
|
|
462
|
-
data: Array.from(param.data),
|
|
463
|
-
shape: param.shape,
|
|
464
|
-
requiresGrad: param.requiresGrad
|
|
465
|
-
}))
|
|
466
|
-
}))
|
|
467
|
-
};
|
|
468
|
-
|
|
469
|
-
assert(modelData.layers.length > 0, "Model serialization");
|
|
470
|
-
|
|
471
|
-
// Test distributed initialization (sync version)
|
|
472
|
-
const distributed = torch.distributed;
|
|
473
|
-
distributed.init(1, 0); // Single process
|
|
474
|
-
assert(distributed.worldSize === 1, "Distributed initialization");
|
|
475
|
-
}
|
|
476
|
-
|
|
477
|
-
// ====================== PERFORMANCE TESTS ======================
|
|
478
|
-
testSuite("Performance");
|
|
479
|
-
|
|
480
|
-
function testPerformance() {
|
|
481
|
-
// Large matrix multiplication performance
|
|
482
|
-
console.log("Testing large matrix multiplication...");
|
|
483
|
-
const size = 256;
|
|
484
|
-
const a = new Tensor(new Float32Array(size * size), [size, size]);
|
|
485
|
-
const b = new Tensor(new Float32Array(size * size), [size, size]);
|
|
486
|
-
|
|
487
|
-
const start = performance.now();
|
|
488
|
-
const c = a.matmul(b);
|
|
489
|
-
const end = performance.now();
|
|
490
|
-
|
|
491
|
-
const timeMs = end - start;
|
|
492
|
-
console.log(`Matrix multiplication (${size}x${size}): ${timeMs.toFixed(2)}ms`);
|
|
493
|
-
assert(timeMs < 1000, "Large matrix multiplication performance");
|
|
494
|
-
|
|
495
|
-
// Memory usage
|
|
496
|
-
const memoryStats = Tensor.memoryTracker.getStats();
|
|
497
|
-
console.log(`Memory usage: ${(memoryStats.totalMemory / 1024 / 1024).toFixed(2)}MB`);
|
|
498
|
-
assert(memoryStats.totalMemory < 100 * 1024 * 1024, "Memory usage within limits");
|
|
499
|
-
|
|
500
|
-
// Synchronous map test
|
|
501
|
-
console.log("Testing synchronous map...");
|
|
502
|
-
const largeTensor = new Tensor(new Float32Array(10000), [10000]);
|
|
503
|
-
|
|
504
|
-
const mapStart = performance.now();
|
|
505
|
-
const result = largeTensor.map(x => x * 2);
|
|
506
|
-
const mapEnd = performance.now();
|
|
507
|
-
|
|
508
|
-
const mapTime = mapEnd - mapStart;
|
|
509
|
-
console.log(`Synchronous map (10000 elements): ${mapTime.toFixed(2)}ms`);
|
|
510
|
-
assert(mapTime < 100, "Synchronous map performance");
|
|
511
|
-
|
|
512
|
-
// Convolution performance
|
|
513
|
-
console.log("Testing convolution performance...");
|
|
514
|
-
const conv = torch.layers.conv2d(3, 16, 3);
|
|
515
|
-
const convInput = new Tensor(new Float32Array(1 * 3 * 64 * 64), [1, 3, 64, 64]);
|
|
516
|
-
|
|
517
|
-
const convStart = performance.now();
|
|
518
|
-
const convOutput = conv.forward(convInput);
|
|
519
|
-
const convEnd = performance.now();
|
|
520
|
-
|
|
521
|
-
const convTime = convEnd - convStart;
|
|
522
|
-
console.log(`Convolution (1x3x64x64): ${convTime.toFixed(2)}ms`);
|
|
523
|
-
assert(convTime < 500, "Convolution performance");
|
|
524
|
-
|
|
525
|
-
// LSTM performance
|
|
526
|
-
console.log("Testing LSTM performance...");
|
|
527
|
-
const lstm = torch.layers.lstm(64, 128, 2);
|
|
528
|
-
const lstmInput = new Tensor(new Float32Array(10 * 4 * 64), [10, 4, 64]);
|
|
529
|
-
|
|
530
|
-
const lstmStart = performance.now();
|
|
531
|
-
const lstmOutput = lstm.forward(lstmInput);
|
|
532
|
-
const lstmEnd = performance.now();
|
|
533
|
-
|
|
534
|
-
const lstmTime = lstmEnd - lstmStart;
|
|
535
|
-
console.log(`LSTM (10x4x64): ${lstmTime.toFixed(2)}ms`);
|
|
536
|
-
assert(lstmTime < 1000, "LSTM performance");
|
|
537
|
-
}
|
|
538
|
-
|
|
539
|
-
// ====================== RUN ALL TESTS ======================
|
|
540
|
-
function runAllTests() {
|
|
541
|
-
console.log("🚀 Starting mini-jstorch test suite...\n");
|
|
542
|
-
|
|
543
|
-
try {
|
|
544
|
-
testTensorOperations();
|
|
545
|
-
testLayers();
|
|
546
|
-
testLossFunctions();
|
|
547
|
-
testOptimizers();
|
|
548
|
-
testNeuralNetwork();
|
|
549
|
-
testUtils();
|
|
550
|
-
testAdvancedFeatures();
|
|
551
|
-
testIntegration();
|
|
552
|
-
testPerformance();
|
|
553
|
-
|
|
554
|
-
summarize();
|
|
555
|
-
|
|
556
|
-
if (testsFailed === 0) {
|
|
557
|
-
console.log("\n🎉 All tests passed! mini-jstorch is working correctly!");
|
|
558
|
-
} else {
|
|
559
|
-
console.log(`\n⚠️ ${testsFailed} test(s) failed. Please check the implementation.`);
|
|
560
|
-
}
|
|
561
|
-
|
|
562
|
-
} catch (error) {
|
|
563
|
-
console.error("\n💥 Test suite crashed:", error);
|
|
564
|
-
testsFailed++;
|
|
565
|
-
summarize();
|
|
566
|
-
}
|
|
567
|
-
}
|
|
568
|
-
|
|
569
|
-
// Run the tests
|
|
570
|
-
runAllTests();
|
|
35
|
+
// Run again to show change
|
|
36
|
+
const output2 = model.forward(input.data);
|
|
37
|
+
const loss2 = criterion.forward(output2, target.data);
|
|
38
|
+
console.log("New Loss:", loss2);
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
// Example: Test learning rate schedulers (StepLR and LambdaLR) with mini-jstorch optimizers
|
|
2
|
+
|
|
3
|
+
import { SGD, StepLR, LambdaLR, Tensor } from "../src/MainEngine.js";
|
|
4
|
+
|
|
5
|
+
const param = { param: [[1, 2], [3, 4]], grad: [[0, 0], [0, 0]] };
|
|
6
|
+
const optimizer = new SGD([param], 0.1);
|
|
7
|
+
|
|
8
|
+
// --- Test StepLR ---
|
|
9
|
+
console.log("Testing StepLR...");
|
|
10
|
+
const stepScheduler = new StepLR(optimizer, 3, 0.5);
|
|
11
|
+
for (let epoch = 1; epoch <= 10; epoch++) {
|
|
12
|
+
stepScheduler.step();
|
|
13
|
+
console.log(`Epoch ${epoch}: LR = ${optimizer.lr.toFixed(4)}`);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
// --- Test LambdaLR ---
|
|
17
|
+
console.log("\nTesting LambdaLR...");
|
|
18
|
+
optimizer.lr = 0.1; // Reset LR
|
|
19
|
+
const lambdaScheduler = new LambdaLR(optimizer, epoch => 1.0 / (1 + epoch));
|
|
20
|
+
for (let epoch = 1; epoch <= 5; epoch++) {
|
|
21
|
+
lambdaScheduler.step();
|
|
22
|
+
console.log(`Epoch ${epoch}: LR = ${optimizer.lr.toFixed(4)}`);
|
|
23
|
+
}
|
package/MODULE.md
DELETED
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
## MODULE STATS ##
|
|
2
|
-
|
|
3
|
-
New Files that automatically will All notable changes status state to *Mini-JSTorch* will be documented in this file.
|
|
4
|
-
|
|
5
|
-
# MSG
|
|
6
|
-
|
|
7
|
-
btw. This files is actually would be modified and note all changes system state automatically without manual i type with myself.
|
|
8
|
-
|
|
9
|
-
---
|
|
10
|
-
|
|
11
|
-
**OFFICIAL RELEASE:** 2025-Monday-August-23 time: 2:22 AM (estimated time release)
|
|
12
|
-
**VERSION:** 1.3.1
|
|
13
|
-
**LICENSE:** MIT © 2025
|
|
14
|
-
**AUTHOR:** Rizal
|
|
15
|
-
**MODULE NAME:** mini-jstorch
|
|
16
|
-
**MODULE DESC:** A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.
|
|
17
|
-
**MODULE TYPE:** module
|
|
18
|
-
**ENGINE VERSIONS:** 1.2.1
|
|
19
|
-
**UPDATE TITLE:** `PATCH` update.
|
|
20
|
-
**ADDED FILES/FOLDER:** {
|
|
21
|
-
"N/A" //N/A [N/A]
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
---
|
|
25
|
-
|
|
26
|
-
**MODIFIED FILES:** {
|
|
27
|
-
"src" //folder
|
|
28
|
-
"src/startup.cpu" //files
|
|
29
|
-
"src/MainEngine.js" //files
|
|
30
|
-
"tests/tests.js" //files [npmignore detected]
|
|
31
|
-
"tests" //folder
|
|
32
|
-
"src/Dummy/exp.js" //files [npmignore detected]
|
|
33
|
-
"package.json" //files
|
|
34
|
-
"src/EngState.json" //files [npmignore detected]
|
|
35
|
-
"src/state.txt" //files [npmignore detected]
|
|
36
|
-
"README.md" //files
|
|
37
|
-
".npmignore" //files [npmignore detected]
|
|
38
|
-
"N/A" //N/A [N/A]
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
---
|
package/hh.js
DELETED
|
@@ -1,38 +0,0 @@
|
|
|
1
|
-
import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam } from './src/MainEngine.js';
|
|
2
|
-
|
|
3
|
-
// Build model
|
|
4
|
-
const model = new Sequential([
|
|
5
|
-
new Linear(2,4),
|
|
6
|
-
new ReLU(),
|
|
7
|
-
new Linear(4,2),
|
|
8
|
-
new Sigmoid()
|
|
9
|
-
]);
|
|
10
|
-
|
|
11
|
-
// Sample XOR dataset
|
|
12
|
-
const X = [
|
|
13
|
-
[0,0], [0,1], [1,0], [1,1]
|
|
14
|
-
];
|
|
15
|
-
const Y = [
|
|
16
|
-
[1,0], [0,1], [0,1], [1,0]
|
|
17
|
-
];
|
|
18
|
-
|
|
19
|
-
// Loss & optimizer
|
|
20
|
-
const lossFn = new CrossEntropyLoss();
|
|
21
|
-
const optimizer = new Adam(model.parameters(), 0.1);
|
|
22
|
-
|
|
23
|
-
// Training loop
|
|
24
|
-
for (let epoch = 1; epoch <= 500; epoch++) {
|
|
25
|
-
const pred = model.forward(X);
|
|
26
|
-
const loss = lossFn.forward(pred, Y);
|
|
27
|
-
const gradLoss = lossFn.backward();
|
|
28
|
-
model.backward(gradLoss);
|
|
29
|
-
optimizer.step();
|
|
30
|
-
if (epoch % 20 === 0) console.log(`Epoch ${epoch}, Loss: ${loss.toFixed(4)}`);
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
// Prediction
|
|
34
|
-
const predTest = model.forward(X);
|
|
35
|
-
predTest.forEach((p,i) => {
|
|
36
|
-
const predictedClass = p.indexOf(Math.max(...p));
|
|
37
|
-
console.log(`Input: ${X[i]}, Predicted class: ${predictedClass}, Raw output: ${p.map(v => v.toFixed(3))}`);
|
|
38
|
-
});
|
package/tests/DebugModel.js
DELETED
|
@@ -1,127 +0,0 @@
|
|
|
1
|
-
// src/Dummy/debug_train.js
|
|
2
|
-
import {
|
|
3
|
-
Tensor,
|
|
4
|
-
Linear,
|
|
5
|
-
Sequential,
|
|
6
|
-
ReLU,
|
|
7
|
-
Sigmoid,
|
|
8
|
-
CrossEntropyLoss,
|
|
9
|
-
Adam,
|
|
10
|
-
MathOps
|
|
11
|
-
} from '../src/Dummy/exp.js';
|
|
12
|
-
|
|
13
|
-
// ---------------------- Simple Debug Data ----------------------
|
|
14
|
-
function generateSimpleData() {
|
|
15
|
-
// VERY simple data: 4 points in 2D
|
|
16
|
-
const X = [
|
|
17
|
-
[0, 0],
|
|
18
|
-
[0, 1],
|
|
19
|
-
[1, 0],
|
|
20
|
-
[1, 1]
|
|
21
|
-
];
|
|
22
|
-
|
|
23
|
-
const y = [
|
|
24
|
-
[0], // AND operation
|
|
25
|
-
[0],
|
|
26
|
-
[0],
|
|
27
|
-
[1]
|
|
28
|
-
];
|
|
29
|
-
|
|
30
|
-
return { X, y };
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
// ---------------------- Debug Model ----------------------
|
|
34
|
-
function createDebugModel() {
|
|
35
|
-
return new Sequential([
|
|
36
|
-
new Linear(2, 2), // Small layer
|
|
37
|
-
new ReLU(),
|
|
38
|
-
new Linear(2, 1), // Output layer
|
|
39
|
-
new Sigmoid()
|
|
40
|
-
]);
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
// ---------------------- Debug Training ----------------------
|
|
44
|
-
function debugTraining() {
|
|
45
|
-
console.log("🔍 DEBUG TRAINING STARTED");
|
|
46
|
-
console.log("=========================");
|
|
47
|
-
|
|
48
|
-
const data = generateSimpleData();
|
|
49
|
-
const model = createDebugModel();
|
|
50
|
-
const lossFunction = new CrossEntropyLoss();
|
|
51
|
-
const parameters = model.parameters();
|
|
52
|
-
|
|
53
|
-
console.log("Model parameters:", parameters.length);
|
|
54
|
-
console.log("Data samples:", data.X.length);
|
|
55
|
-
|
|
56
|
-
// Single step debug
|
|
57
|
-
for (let step = 0; step < 10; step++) {
|
|
58
|
-
console.log(`\n--- Step ${step} ---`);
|
|
59
|
-
|
|
60
|
-
// Forward pass
|
|
61
|
-
const predictions = model.forward(data.X);
|
|
62
|
-
console.log("Predictions:", predictions.map(p => p[0].toFixed(3)));
|
|
63
|
-
|
|
64
|
-
const loss = lossFunction.forward(predictions, data.y);
|
|
65
|
-
console.log("Loss:", loss);
|
|
66
|
-
|
|
67
|
-
if (isNaN(loss)) {
|
|
68
|
-
console.log("❌ NaN LOSS DETECTED!");
|
|
69
|
-
console.log("Predictions:", predictions);
|
|
70
|
-
console.log("Targets:", data.y);
|
|
71
|
-
break;
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
// Backward pass
|
|
75
|
-
const grad = lossFunction.backward();
|
|
76
|
-
console.log("Gradient:", grad.map(g => g[0].toFixed(3)));
|
|
77
|
-
|
|
78
|
-
model.backward(grad);
|
|
79
|
-
|
|
80
|
-
// Check gradients
|
|
81
|
-
console.log("Parameter gradients:");
|
|
82
|
-
parameters.forEach((param, idx) => {
|
|
83
|
-
if (Array.isArray(param.grad[0])) {
|
|
84
|
-
console.log(` Param ${idx} grad:`, param.grad.map(row =>
|
|
85
|
-
row.map(v => v.toFixed(3))
|
|
86
|
-
));
|
|
87
|
-
} else {
|
|
88
|
-
console.log(` Param ${idx} grad:`, param.grad.map(v => v.toFixed(3)));
|
|
89
|
-
}
|
|
90
|
-
});
|
|
91
|
-
|
|
92
|
-
// Update weights
|
|
93
|
-
const optimizer = new Adam(parameters, 0.1);
|
|
94
|
-
optimizer.step();
|
|
95
|
-
|
|
96
|
-
// Reset gradients manually
|
|
97
|
-
parameters.forEach(param => {
|
|
98
|
-
if (Array.isArray(param.grad[0])) {
|
|
99
|
-
for (let i = 0; i < param.grad.length; i++) {
|
|
100
|
-
for (let j = 0; j < param.grad[0].length; j++) {
|
|
101
|
-
param.grad[i][j] = 0;
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
} else {
|
|
105
|
-
for (let i = 0; i < param.grad.length; i++) {
|
|
106
|
-
param.grad[i] = 0;
|
|
107
|
-
}
|
|
108
|
-
}
|
|
109
|
-
});
|
|
110
|
-
|
|
111
|
-
// Calculate accuracy
|
|
112
|
-
const accuracy = calculateAccuracy(predictions, data.y);
|
|
113
|
-
console.log("Accuracy:", (accuracy * 100).toFixed(1) + "%");
|
|
114
|
-
}
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
function calculateAccuracy(predictions, targets) {
|
|
118
|
-
let correct = 0;
|
|
119
|
-
for (let i = 0; i < predictions.length; i++) {
|
|
120
|
-
const predLabel = predictions[i][0] > 0.5 ? 1 : 0;
|
|
121
|
-
if (predLabel === targets[i][0]) correct++;
|
|
122
|
-
}
|
|
123
|
-
return correct / predictions.length;
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
// Run debug
|
|
127
|
-
debugTraining();
|
package/tests/unit/newver.js
DELETED
|
@@ -1,103 +0,0 @@
|
|
|
1
|
-
// tests/engine.test.js
|
|
2
|
-
import {
|
|
3
|
-
MatrixOps,
|
|
4
|
-
Tensor,
|
|
5
|
-
Linear,
|
|
6
|
-
CrossEntropyLoss, // Ganti pake classnya
|
|
7
|
-
Adam,
|
|
8
|
-
Sequential,
|
|
9
|
-
ReLU,
|
|
10
|
-
MathOps // Import MathOps yang ada softmax di dalamnya
|
|
11
|
-
} from '../../src/Dummy/exp.js';
|
|
12
|
-
|
|
13
|
-
// Alias untuk softmax dari MathOps
|
|
14
|
-
const softmax = MathOps.softmax;
|
|
15
|
-
|
|
16
|
-
// Test 1: MatrixOps.matmul vs old Tensor.matmul (dot)
|
|
17
|
-
console.log("=== TEST 1: Matrix Multiplication ===");
|
|
18
|
-
const A = [[1, 2], [3, 4]];
|
|
19
|
-
const B = [[5, 6], [7, 8]];
|
|
20
|
-
|
|
21
|
-
// Correct matrix multiplication
|
|
22
|
-
const correctResult = MatrixOps.matmul(A, B);
|
|
23
|
-
console.log("Correct matmul result:", correctResult);
|
|
24
|
-
|
|
25
|
-
// Test new Tensor matmul
|
|
26
|
-
const tensorA = new Tensor(A);
|
|
27
|
-
const tensorB = new Tensor(B);
|
|
28
|
-
const tensorResult = tensorA.matmul(tensorB);
|
|
29
|
-
console.log("New Tensor matmul result:", tensorResult.data);
|
|
30
|
-
|
|
31
|
-
// Test 2: Tensor operations consistency
|
|
32
|
-
console.log("\n=== TEST 2: Tensor Operations ===");
|
|
33
|
-
const testTensor = new Tensor([[1, 2], [3, 4]]);
|
|
34
|
-
console.log("Original shape:", testTensor.shape());
|
|
35
|
-
console.log("Transpose shape:", testTensor.transpose().shape());
|
|
36
|
-
console.log("Flatten shape:", testTensor.flatten().shape());
|
|
37
|
-
|
|
38
|
-
// Test 3: CrossEntropyLoss backward pass
|
|
39
|
-
console.log("\n=== TEST 3: CrossEntropyLoss Backward ===");
|
|
40
|
-
const pred = [[0.8, 0.1, 0.1], [0.2, 0.7, 0.1]];
|
|
41
|
-
const target = [[1, 0, 0], [0, 1, 0]];
|
|
42
|
-
|
|
43
|
-
const lossFn = new CrossEntropyLoss();
|
|
44
|
-
const loss = lossFn.forward(pred, target);
|
|
45
|
-
console.log("Loss value:", loss);
|
|
46
|
-
|
|
47
|
-
const grad = lossFn.backward();
|
|
48
|
-
console.log("Gradient shape per sample:", grad.map(g => g.length));
|
|
49
|
-
console.log("Sample gradient:", grad[0]);
|
|
50
|
-
|
|
51
|
-
// Test 4: Adam optimizer with 1D and 2D parameters
|
|
52
|
-
console.log("\n=== TEST 4: Adam Optimizer ===");
|
|
53
|
-
|
|
54
|
-
// Mock parameters (like from a Linear layer)
|
|
55
|
-
const mockParams = [
|
|
56
|
-
{
|
|
57
|
-
param: [[0.5, 0.3], [0.1, 0.9]], // 2D weights
|
|
58
|
-
grad: [[0.1, 0.2], [0.3, 0.4]]
|
|
59
|
-
},
|
|
60
|
-
{
|
|
61
|
-
param: [0.1, 0.2], // 1D bias
|
|
62
|
-
grad: [0.01, 0.02]
|
|
63
|
-
}
|
|
64
|
-
];
|
|
65
|
-
|
|
66
|
-
console.log("Before optimization - Weights:", mockParams[0].param);
|
|
67
|
-
console.log("Before optimization - Bias:", mockParams[1].param);
|
|
68
|
-
|
|
69
|
-
const optimizer = new Adam(mockParams, 0.01);
|
|
70
|
-
optimizer.step();
|
|
71
|
-
|
|
72
|
-
console.log("After optimization - Weights:", mockParams[0].param);
|
|
73
|
-
console.log("After optimization - Bias:", mockParams[1].param);
|
|
74
|
-
|
|
75
|
-
// Test 5: Integration test - Simple forward pass
|
|
76
|
-
console.log("\n=== TEST 5: Integration Test ===");
|
|
77
|
-
try {
|
|
78
|
-
const linearLayer = new Linear(2, 3);
|
|
79
|
-
const input = new Tensor([[1, 2]]);
|
|
80
|
-
|
|
81
|
-
console.log("Input shape:", input.shape());
|
|
82
|
-
|
|
83
|
-
const output = linearLayer.forward(input.data);
|
|
84
|
-
console.log("Linear layer output shape:", [output.length, output[0].length]);
|
|
85
|
-
|
|
86
|
-
// Test with ReLU
|
|
87
|
-
const relu = new ReLU();
|
|
88
|
-
const activated = relu.forward(output);
|
|
89
|
-
console.log("ReLU output shape:", [activated.length, activated[0].length]);
|
|
90
|
-
|
|
91
|
-
console.log("✅ All integration tests passed!");
|
|
92
|
-
} catch (error) {
|
|
93
|
-
console.log("❌ Integration test failed:", error.message);
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
// Test 6: Softmax consistency
|
|
97
|
-
console.log("\n=== TEST 6: Softmax ===");
|
|
98
|
-
const logits = [2.0, 1.0, 0.1];
|
|
99
|
-
const sm = softmax(logits);
|
|
100
|
-
console.log("Softmax result:", sm);
|
|
101
|
-
console.log("Sum:", sm.reduce((a, b) => a + b, 0));
|
|
102
|
-
|
|
103
|
-
console.log("\n=== ALL TESTS COMPLETED ===");
|
package/tests/unit/ogver.js
DELETED
|
@@ -1,87 +0,0 @@
|
|
|
1
|
-
// TEST JSTORCH WHOLE SYSTEMS AT ONCE
|
|
2
|
-
// THIS FILES TESTINGS AT PREVIOUS UPDATE 1.3.0
|
|
3
|
-
// DEPRECATED FILES NOT NEW
|
|
4
|
-
import { Tensor, Linear, Sequential, ReLU, Sigmoid, Tanh, LeakyReLU, GELU, Dropout, Conv2D, MSELoss, CrossEntropyLoss, Adam, SGD, saveModel, loadModel, flattenBatch, reshape, stack, concat, eye } from '../../src/MainEngine.js';
|
|
5
|
-
|
|
6
|
-
// ---------------------- Linear Test ----------------------
|
|
7
|
-
console.log("=== Linear Test ===");
|
|
8
|
-
const lin = new Linear(3,2);
|
|
9
|
-
const linInput = [[1,2,3],[4,5,6]];
|
|
10
|
-
const linOut = lin.forward(linInput);
|
|
11
|
-
console.log("Linear forward:", linOut);
|
|
12
|
-
const linGrad = [[0.1,0.2],[0.3,0.4]];
|
|
13
|
-
const linBack = lin.backward(linGrad);
|
|
14
|
-
console.log("Linear backward gradInput:", linBack);
|
|
15
|
-
|
|
16
|
-
// ---------------------- Sequential + Activations Test ----------------------
|
|
17
|
-
console.log("\n=== Sequential + Activations Test ===");
|
|
18
|
-
const model = new Sequential([new Linear(2,2), new ReLU(), new Linear(2,1), new Sigmoid()]);
|
|
19
|
-
const seqInput = [[0.5,1.0],[1.5,2.0]];
|
|
20
|
-
const seqOut = model.forward(seqInput);
|
|
21
|
-
console.log("Sequential forward:", seqOut);
|
|
22
|
-
const seqGrad = [[0.1],[0.2]];
|
|
23
|
-
const seqBack = model.backward(seqGrad);
|
|
24
|
-
console.log("Sequential backward gradInput:", seqBack);
|
|
25
|
-
|
|
26
|
-
// ---------------------- Conv2D Test ----------------------
|
|
27
|
-
console.log("\n=== Conv2D Test ===");
|
|
28
|
-
const conv = new Conv2D(1,1,3);
|
|
29
|
-
const convInput = [[[ [1,2,3],[4,5,6],[7,8,9] ]]]; // batch=1, inC=1, HxW=3x3
|
|
30
|
-
const convOut = conv.forward(convInput);
|
|
31
|
-
console.log("Conv2D forward:", convOut);
|
|
32
|
-
|
|
33
|
-
// Conv2D backward test
|
|
34
|
-
const convGrad = [[[ [0.1,0.2,0.1],[0.2,0.3,0.2],[0.1,0.2,0.1] ]]];
|
|
35
|
-
const convBack = conv.backward(convGrad);
|
|
36
|
-
console.log("Conv2D backward gradInput:", convBack);
|
|
37
|
-
|
|
38
|
-
// ---------------------- Tensor & Broadcast Test ----------------------
|
|
39
|
-
console.log("\n=== Tensor & Broadcast Test ===");
|
|
40
|
-
const a = Tensor.random(2,3);
|
|
41
|
-
const b = Tensor.ones(2,3);
|
|
42
|
-
const sum = a.add(b);
|
|
43
|
-
console.log("Tensor add broadcast:", sum);
|
|
44
|
-
|
|
45
|
-
// ---------------------- Loss + Optimizer Test ----------------------
|
|
46
|
-
console.log("\n=== Loss + Optimizer Test ===");
|
|
47
|
-
const lossModel = new Sequential([new Linear(2,2)]);
|
|
48
|
-
const pred = lossModel.forward([[1,2]]);
|
|
49
|
-
const target = [[0,1]];
|
|
50
|
-
const ceLoss = new CrossEntropyLoss();
|
|
51
|
-
const lval = ceLoss.forward(pred,target);
|
|
52
|
-
console.log("CrossEntropyLoss value:", lval);
|
|
53
|
-
|
|
54
|
-
const gradLoss = ceLoss.backward();
|
|
55
|
-
lossModel.backward(gradLoss);
|
|
56
|
-
|
|
57
|
-
const opt = new Adam(lossModel.parameters());
|
|
58
|
-
opt.step();
|
|
59
|
-
console.log("Updated parameters after Adam:", lossModel.parameters());
|
|
60
|
-
|
|
61
|
-
// ---------------------- Dropout Test ----------------------
|
|
62
|
-
console.log("\n=== Dropout Test ===");
|
|
63
|
-
const drop = new Dropout(0.5);
|
|
64
|
-
const dropInput = [[1,2],[3,4]];
|
|
65
|
-
const dropOut = drop.forward(dropInput);
|
|
66
|
-
console.log("Dropout forward:", dropOut);
|
|
67
|
-
const dropBack = drop.backward([[0.1,0.2],[0.3,0.4]]);
|
|
68
|
-
console.log("Dropout backward:", dropBack);
|
|
69
|
-
|
|
70
|
-
// ---------------------- Save / Load Model Test ----------------------
|
|
71
|
-
console.log("\n=== Save / Load Model Test ===");
|
|
72
|
-
const modelSave = new Sequential([new Linear(2,2)]);
|
|
73
|
-
const json = saveModel(modelSave);
|
|
74
|
-
console.log("Saved model JSON:", json);
|
|
75
|
-
const modelLoad = new Sequential([new Linear(2,2)]);
|
|
76
|
-
loadModel(modelLoad,json);
|
|
77
|
-
console.log("Loaded model parameters:", modelLoad.parameters());
|
|
78
|
-
|
|
79
|
-
// ---------------------- Advanced Utils Test ----------------------
|
|
80
|
-
console.log("\n=== Advanced Utils Test ===");
|
|
81
|
-
const batch = [[[1,2],[3,4]],[[5,6],[7,8]]];
|
|
82
|
-
console.log("Flatten batch:", flattenBatch(batch));
|
|
83
|
-
console.log("Eye 3:", eye(3));
|
|
84
|
-
console.log("Reshape:", reshape({data:[[1,2,3,4]]},2,2));
|
|
85
|
-
console.log("Stack:", stack([Tensor.ones(2,2), Tensor.zeros(2,2)]));
|
|
86
|
-
console.log("Concat axis0:", concat([[1,2],[3,4]], [[5,6],[7,8]], 0));
|
|
87
|
-
console.log("Concat axis1:", concat([[1,2],[3,4]], [[5,6],[7,8]], 1));
|