mini-jstorch 1.2.2 → 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/MODULE.md +41 -0
- package/README.md +117 -8
- package/hh.js +38 -0
- package/index.js +90 -6
- package/package.json +4 -1
- package/src/MainEngine.js +560 -1
- package/src/startup.cpu +11 -8
- package/tests/DebugModel.js +127 -0
- package/tests/MakeModel.js +570 -0
- package/tests/unit/newver.js +103 -0
- package/tests/unit/ogver.js +87 -0
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
// tests/engine.test.js
|
|
2
|
+
import {
|
|
3
|
+
MatrixOps,
|
|
4
|
+
Tensor,
|
|
5
|
+
Linear,
|
|
6
|
+
CrossEntropyLoss, // Ganti pake classnya
|
|
7
|
+
Adam,
|
|
8
|
+
Sequential,
|
|
9
|
+
ReLU,
|
|
10
|
+
MathOps // Import MathOps yang ada softmax di dalamnya
|
|
11
|
+
} from '../../src/Dummy/exp.js';
|
|
12
|
+
|
|
13
|
+
// Alias untuk softmax dari MathOps
|
|
14
|
+
const softmax = MathOps.softmax;
|
|
15
|
+
|
|
16
|
+
// Test 1: MatrixOps.matmul vs old Tensor.matmul (dot)
|
|
17
|
+
console.log("=== TEST 1: Matrix Multiplication ===");
|
|
18
|
+
const A = [[1, 2], [3, 4]];
|
|
19
|
+
const B = [[5, 6], [7, 8]];
|
|
20
|
+
|
|
21
|
+
// Correct matrix multiplication
|
|
22
|
+
const correctResult = MatrixOps.matmul(A, B);
|
|
23
|
+
console.log("Correct matmul result:", correctResult);
|
|
24
|
+
|
|
25
|
+
// Test new Tensor matmul
|
|
26
|
+
const tensorA = new Tensor(A);
|
|
27
|
+
const tensorB = new Tensor(B);
|
|
28
|
+
const tensorResult = tensorA.matmul(tensorB);
|
|
29
|
+
console.log("New Tensor matmul result:", tensorResult.data);
|
|
30
|
+
|
|
31
|
+
// Test 2: Tensor operations consistency
|
|
32
|
+
console.log("\n=== TEST 2: Tensor Operations ===");
|
|
33
|
+
const testTensor = new Tensor([[1, 2], [3, 4]]);
|
|
34
|
+
console.log("Original shape:", testTensor.shape());
|
|
35
|
+
console.log("Transpose shape:", testTensor.transpose().shape());
|
|
36
|
+
console.log("Flatten shape:", testTensor.flatten().shape());
|
|
37
|
+
|
|
38
|
+
// Test 3: CrossEntropyLoss backward pass
|
|
39
|
+
console.log("\n=== TEST 3: CrossEntropyLoss Backward ===");
|
|
40
|
+
const pred = [[0.8, 0.1, 0.1], [0.2, 0.7, 0.1]];
|
|
41
|
+
const target = [[1, 0, 0], [0, 1, 0]];
|
|
42
|
+
|
|
43
|
+
const lossFn = new CrossEntropyLoss();
|
|
44
|
+
const loss = lossFn.forward(pred, target);
|
|
45
|
+
console.log("Loss value:", loss);
|
|
46
|
+
|
|
47
|
+
const grad = lossFn.backward();
|
|
48
|
+
console.log("Gradient shape per sample:", grad.map(g => g.length));
|
|
49
|
+
console.log("Sample gradient:", grad[0]);
|
|
50
|
+
|
|
51
|
+
// Test 4: Adam optimizer with 1D and 2D parameters
|
|
52
|
+
console.log("\n=== TEST 4: Adam Optimizer ===");
|
|
53
|
+
|
|
54
|
+
// Mock parameters (like from a Linear layer)
|
|
55
|
+
const mockParams = [
|
|
56
|
+
{
|
|
57
|
+
param: [[0.5, 0.3], [0.1, 0.9]], // 2D weights
|
|
58
|
+
grad: [[0.1, 0.2], [0.3, 0.4]]
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
param: [0.1, 0.2], // 1D bias
|
|
62
|
+
grad: [0.01, 0.02]
|
|
63
|
+
}
|
|
64
|
+
];
|
|
65
|
+
|
|
66
|
+
console.log("Before optimization - Weights:", mockParams[0].param);
|
|
67
|
+
console.log("Before optimization - Bias:", mockParams[1].param);
|
|
68
|
+
|
|
69
|
+
const optimizer = new Adam(mockParams, 0.01);
|
|
70
|
+
optimizer.step();
|
|
71
|
+
|
|
72
|
+
console.log("After optimization - Weights:", mockParams[0].param);
|
|
73
|
+
console.log("After optimization - Bias:", mockParams[1].param);
|
|
74
|
+
|
|
75
|
+
// Test 5: Integration test - Simple forward pass
|
|
76
|
+
console.log("\n=== TEST 5: Integration Test ===");
|
|
77
|
+
try {
|
|
78
|
+
const linearLayer = new Linear(2, 3);
|
|
79
|
+
const input = new Tensor([[1, 2]]);
|
|
80
|
+
|
|
81
|
+
console.log("Input shape:", input.shape());
|
|
82
|
+
|
|
83
|
+
const output = linearLayer.forward(input.data);
|
|
84
|
+
console.log("Linear layer output shape:", [output.length, output[0].length]);
|
|
85
|
+
|
|
86
|
+
// Test with ReLU
|
|
87
|
+
const relu = new ReLU();
|
|
88
|
+
const activated = relu.forward(output);
|
|
89
|
+
console.log("ReLU output shape:", [activated.length, activated[0].length]);
|
|
90
|
+
|
|
91
|
+
console.log("✅ All integration tests passed!");
|
|
92
|
+
} catch (error) {
|
|
93
|
+
console.log("❌ Integration test failed:", error.message);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Test 6: Softmax consistency
|
|
97
|
+
console.log("\n=== TEST 6: Softmax ===");
|
|
98
|
+
const logits = [2.0, 1.0, 0.1];
|
|
99
|
+
const sm = softmax(logits);
|
|
100
|
+
console.log("Softmax result:", sm);
|
|
101
|
+
console.log("Sum:", sm.reduce((a, b) => a + b, 0));
|
|
102
|
+
|
|
103
|
+
console.log("\n=== ALL TESTS COMPLETED ===");
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
// TEST JSTORCH WHOLE SYSTEMS AT ONCE
|
|
2
|
+
// THIS FILES TESTINGS AT PREVIOUS UPDATE 1.3.0
|
|
3
|
+
// DEPRECATED FILES NOT NEW
|
|
4
|
+
import { Tensor, Linear, Sequential, ReLU, Sigmoid, Tanh, LeakyReLU, GELU, Dropout, Conv2D, MSELoss, CrossEntropyLoss, Adam, SGD, saveModel, loadModel, flattenBatch, reshape, stack, concat, eye } from '../../src/MainEngine.js';
|
|
5
|
+
|
|
6
|
+
// ---------------------- Linear Test ----------------------
|
|
7
|
+
console.log("=== Linear Test ===");
|
|
8
|
+
const lin = new Linear(3,2);
|
|
9
|
+
const linInput = [[1,2,3],[4,5,6]];
|
|
10
|
+
const linOut = lin.forward(linInput);
|
|
11
|
+
console.log("Linear forward:", linOut);
|
|
12
|
+
const linGrad = [[0.1,0.2],[0.3,0.4]];
|
|
13
|
+
const linBack = lin.backward(linGrad);
|
|
14
|
+
console.log("Linear backward gradInput:", linBack);
|
|
15
|
+
|
|
16
|
+
// ---------------------- Sequential + Activations Test ----------------------
|
|
17
|
+
console.log("\n=== Sequential + Activations Test ===");
|
|
18
|
+
const model = new Sequential([new Linear(2,2), new ReLU(), new Linear(2,1), new Sigmoid()]);
|
|
19
|
+
const seqInput = [[0.5,1.0],[1.5,2.0]];
|
|
20
|
+
const seqOut = model.forward(seqInput);
|
|
21
|
+
console.log("Sequential forward:", seqOut);
|
|
22
|
+
const seqGrad = [[0.1],[0.2]];
|
|
23
|
+
const seqBack = model.backward(seqGrad);
|
|
24
|
+
console.log("Sequential backward gradInput:", seqBack);
|
|
25
|
+
|
|
26
|
+
// ---------------------- Conv2D Test ----------------------
|
|
27
|
+
console.log("\n=== Conv2D Test ===");
|
|
28
|
+
const conv = new Conv2D(1,1,3);
|
|
29
|
+
const convInput = [[[ [1,2,3],[4,5,6],[7,8,9] ]]]; // batch=1, inC=1, HxW=3x3
|
|
30
|
+
const convOut = conv.forward(convInput);
|
|
31
|
+
console.log("Conv2D forward:", convOut);
|
|
32
|
+
|
|
33
|
+
// Conv2D backward test
|
|
34
|
+
const convGrad = [[[ [0.1,0.2,0.1],[0.2,0.3,0.2],[0.1,0.2,0.1] ]]];
|
|
35
|
+
const convBack = conv.backward(convGrad);
|
|
36
|
+
console.log("Conv2D backward gradInput:", convBack);
|
|
37
|
+
|
|
38
|
+
// ---------------------- Tensor & Broadcast Test ----------------------
|
|
39
|
+
console.log("\n=== Tensor & Broadcast Test ===");
|
|
40
|
+
const a = Tensor.random(2,3);
|
|
41
|
+
const b = Tensor.ones(2,3);
|
|
42
|
+
const sum = a.add(b);
|
|
43
|
+
console.log("Tensor add broadcast:", sum);
|
|
44
|
+
|
|
45
|
+
// ---------------------- Loss + Optimizer Test ----------------------
|
|
46
|
+
console.log("\n=== Loss + Optimizer Test ===");
|
|
47
|
+
const lossModel = new Sequential([new Linear(2,2)]);
|
|
48
|
+
const pred = lossModel.forward([[1,2]]);
|
|
49
|
+
const target = [[0,1]];
|
|
50
|
+
const ceLoss = new CrossEntropyLoss();
|
|
51
|
+
const lval = ceLoss.forward(pred,target);
|
|
52
|
+
console.log("CrossEntropyLoss value:", lval);
|
|
53
|
+
|
|
54
|
+
const gradLoss = ceLoss.backward();
|
|
55
|
+
lossModel.backward(gradLoss);
|
|
56
|
+
|
|
57
|
+
const opt = new Adam(lossModel.parameters());
|
|
58
|
+
opt.step();
|
|
59
|
+
console.log("Updated parameters after Adam:", lossModel.parameters());
|
|
60
|
+
|
|
61
|
+
// ---------------------- Dropout Test ----------------------
|
|
62
|
+
console.log("\n=== Dropout Test ===");
|
|
63
|
+
const drop = new Dropout(0.5);
|
|
64
|
+
const dropInput = [[1,2],[3,4]];
|
|
65
|
+
const dropOut = drop.forward(dropInput);
|
|
66
|
+
console.log("Dropout forward:", dropOut);
|
|
67
|
+
const dropBack = drop.backward([[0.1,0.2],[0.3,0.4]]);
|
|
68
|
+
console.log("Dropout backward:", dropBack);
|
|
69
|
+
|
|
70
|
+
// ---------------------- Save / Load Model Test ----------------------
|
|
71
|
+
console.log("\n=== Save / Load Model Test ===");
|
|
72
|
+
const modelSave = new Sequential([new Linear(2,2)]);
|
|
73
|
+
const json = saveModel(modelSave);
|
|
74
|
+
console.log("Saved model JSON:", json);
|
|
75
|
+
const modelLoad = new Sequential([new Linear(2,2)]);
|
|
76
|
+
loadModel(modelLoad,json);
|
|
77
|
+
console.log("Loaded model parameters:", modelLoad.parameters());
|
|
78
|
+
|
|
79
|
+
// ---------------------- Advanced Utils Test ----------------------
|
|
80
|
+
console.log("\n=== Advanced Utils Test ===");
|
|
81
|
+
const batch = [[[1,2],[3,4]],[[5,6],[7,8]]];
|
|
82
|
+
console.log("Flatten batch:", flattenBatch(batch));
|
|
83
|
+
console.log("Eye 3:", eye(3));
|
|
84
|
+
console.log("Reshape:", reshape({data:[[1,2,3,4]]},2,2));
|
|
85
|
+
console.log("Stack:", stack([Tensor.ones(2,2), Tensor.zeros(2,2)]));
|
|
86
|
+
console.log("Concat axis0:", concat([[1,2],[3,4]], [[5,6],[7,8]], 0));
|
|
87
|
+
console.log("Concat axis1:", concat([[1,2],[3,4]], [[5,6],[7,8]], 1));
|