mini-jstorch 1.3.0 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/MODULE.md CHANGED
@@ -9,20 +9,15 @@ btw. This files is actually would be modified and note all changes system state
9
9
  ---
10
10
 
11
11
  **OFFICIAL RELEASE:** 2025-Monday-August-23 time: 2:22 AM (estimated time release)
12
- **VERSION:** 1.3.0
12
+ **VERSION:** 1.3.1
13
13
  **LICENSE:** MIT © 2025
14
14
  **AUTHOR:** Rizal
15
15
  **MODULE NAME:** mini-jstorch
16
16
  **MODULE DESC:** A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.
17
17
  **MODULE TYPE:** module
18
- **ENGINE VERSIONS:** 1.2.0
19
- **UPDATE TITLE:** `MAJOR` update.
18
+ **ENGINE VERSIONS:** 1.2.1
19
+ **UPDATE TITLE:** `PATCH` update.
20
20
  **ADDED FILES/FOLDER:** {
21
- "tests" //folder
22
- "tests/proj1.js" //files
23
- "tests/tests.js" //files [npmignore detected]
24
- "tests/proj2.js" //files
25
- "tests/proj3.js" //files
26
21
  "N/A" //N/A [N/A]
27
22
  }
28
23
 
package/README.md CHANGED
@@ -1,27 +1,16 @@
1
- # Mini-JSTorch Major Update
1
+ # Mini-JSTorch
2
2
 
3
- # MAJOR UPDATE IS RELEASED NOW!! [got little late]
4
-
5
- ---
6
-
7
- ## 🙏 Big Thanks 🙏
8
- I genuinely appreciate everyone who has taken the time to install and try out this module. Every download isn’t just a number it’s a real person spending their time to explore, learn, or build something with this module!
9
-
10
- Reaching 400+ weekly downloads might sound small compared to giant frameworks, but for me it’s huge. It means this project is actually helping people out there, and that alone makes every late-night coding session worth it.
11
-
12
- Thank you for giving this module a chance. Your support and trust are what keep this project moving forward! 🚀
13
-
14
- ---
3
+ A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.
15
4
 
16
5
  ## Overview
17
6
 
18
7
  Mini-JSTorch is a lightweight, high-performance JavaScript library for building neural networks that runs efficiently in both frontend and backend environments, including low-end devices. The library enables experimentation and learning in AI without compromising stability, accuracy, or training reliability.
19
8
 
20
- This release, **version 1.2.3**, is a **major update** that introduces full feature coverage, including convolutional layers, advanced activations, dropout, tensor broadcasting, and enhanced utilities. The engine remains pure JavaScript, lightweight, and compatible with both Node.js and browser environments.
9
+ This release, **version 1.2.3**, is a just some fix a typo and delete not used files.
21
10
 
22
11
  ---
23
12
 
24
- ## Major Updates in 1.2.3
13
+ ## Features Overview
25
14
 
26
15
  - Full **Conv2D support** with forward and backward operations.
27
16
  - **Tensor operations** now support broadcasting and reshaping.
@@ -133,4 +122,5 @@ loadModel(model2, json);
133
122
  - **This module is implemented entirely in pure JavaScript.**
134
123
  - **The `Dummy` folder contains modules used for development, testing, and debugging before integration into the main engine.**
135
124
  - **files startup.cpu is actually an some random files lol.**
136
- - **This module was created by a `single` developer.**
125
+ - **This module was created by a `single` developer.**
126
+ - **You can join to the `mjs-group` Organization on my profile!**
package/hh.js ADDED
@@ -0,0 +1,38 @@
1
+ import { Sequential, Linear, ReLU, Sigmoid, CrossEntropyLoss, Adam } from './src/MainEngine.js';
2
+
3
+ // Build model
4
+ const model = new Sequential([
5
+ new Linear(2,4),
6
+ new ReLU(),
7
+ new Linear(4,2),
8
+ new Sigmoid()
9
+ ]);
10
+
11
+ // Sample XOR dataset
12
+ const X = [
13
+ [0,0], [0,1], [1,0], [1,1]
14
+ ];
15
+ const Y = [
16
+ [1,0], [0,1], [0,1], [1,0]
17
+ ];
18
+
19
+ // Loss & optimizer
20
+ const lossFn = new CrossEntropyLoss();
21
+ const optimizer = new Adam(model.parameters(), 0.1);
22
+
23
+ // Training loop
24
+ for (let epoch = 1; epoch <= 500; epoch++) {
25
+ const pred = model.forward(X);
26
+ const loss = lossFn.forward(pred, Y);
27
+ const gradLoss = lossFn.backward();
28
+ model.backward(gradLoss);
29
+ optimizer.step();
30
+ if (epoch % 20 === 0) console.log(`Epoch ${epoch}, Loss: ${loss.toFixed(4)}`);
31
+ }
32
+
33
+ // Prediction
34
+ const predTest = model.forward(X);
35
+ predTest.forEach((p,i) => {
36
+ const predictedClass = p.indexOf(Math.max(...p));
37
+ console.log(`Input: ${X[i]}, Predicted class: ${predictedClass}, Raw output: ${p.map(v => v.toFixed(3))}`);
38
+ });
package/index.js CHANGED
@@ -1,6 +1,90 @@
1
- // Entry point of the library, export main classes and functions [DEPRECATED]
2
- export { Seq } from './models/seq.js';
3
- export { Dense } from './layers/dense.js';
4
- export * as act from './act/linear.js';
5
- export { SGD } from './optim/sgd.js';
6
- export { train } from './train/loop.js';
1
+ // =====================================================
2
+ // FILE: index.js - Main entry point
3
+ // =====================================================
4
+
5
+ // Import all modules
6
+ import { Tensor } from './src/tensor.js';
7
+ import {
8
+ Linear, Dense, Conv2d, MaxPool2d, AvgPool2d, Flatten, Dropout,
9
+ BatchNorm2d, ReLU, Sigmoid, Tanh, LeakyReLU, ELU, Softmax, Sequential
10
+ } from './src/layers.js';
11
+ import { MSELoss, CrossEntropyLoss, BCELoss } from './src/loss.js';
12
+ import { SGD, Adam, RMSprop } from './src/optimizers.js';
13
+ import { Model, Trainer, models, optimizers, losses, layers, tensors, utils } from './src/model.js';
14
+
15
+ // Export everything
16
+ export {
17
+ // Core classes
18
+ Tensor,
19
+ Model,
20
+ Trainer,
21
+
22
+ // Layer classes
23
+ Linear,
24
+ Dense,
25
+ Conv2d,
26
+ MaxPool2d,
27
+ AvgPool2d,
28
+ Flatten,
29
+ Dropout,
30
+ BatchNorm2d,
31
+ ReLU,
32
+ Sigmoid,
33
+ Tanh,
34
+ LeakyReLU,
35
+ ELU,
36
+ Softmax,
37
+ Sequential,
38
+
39
+ // Loss classes
40
+ MSELoss,
41
+ CrossEntropyLoss,
42
+ BCELoss,
43
+
44
+ // Optimizer classes
45
+ SGD,
46
+ Adam,
47
+ RMSprop,
48
+
49
+ // Factory functions
50
+ models,
51
+ optimizers,
52
+ losses,
53
+ layers,
54
+ tensors,
55
+ utils
56
+ };
57
+
58
+ // Default export
59
+ export default {
60
+ Tensor,
61
+ Model,
62
+ Trainer,
63
+ Linear,
64
+ Dense,
65
+ Conv2d,
66
+ MaxPool2d,
67
+ AvgPool2d,
68
+ Flatten,
69
+ Dropout,
70
+ BatchNorm2d,
71
+ ReLU,
72
+ Sigmoid,
73
+ Tanh,
74
+ LeakyReLU,
75
+ ELU,
76
+ Softmax,
77
+ Sequential,
78
+ MSELoss,
79
+ CrossEntropyLoss,
80
+ BCELoss,
81
+ SGD,
82
+ Adam,
83
+ RMSprop,
84
+ models,
85
+ optimizers,
86
+ losses,
87
+ layers,
88
+ tensors,
89
+ utils
90
+ };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mini-jstorch",
3
- "version": "1.3.0",
3
+ "version": "1.3.2",
4
4
  "type": "module",
5
5
  "description": "A lightweight JavaScript neural network library for rapid frontend AI experimentation on low-resource devices Inspired by PyTorch.",
6
6
  "main": "index.js",
package/src/MainEngine.js CHANGED
@@ -1,5 +1,8 @@
1
- // MINI JSTORCH ENGINE - MAJOR ULTRA OPTIMIZED 1.2.3
1
+
2
+ // MINI JSTORCH ENGINE
2
3
  // LICENSE: MIT (C) Rizal 2025
4
+ // 1.2.3 ENGINE VERSIONS
5
+ // IMPORTANT: CORE ENGINE DO NOT EDIT THIS FILES VERY SENSITIVE IT WILL CRASHING YOUR ENGINE SYSTEMS!
3
6
 
4
7
  // ---------------------- Utilities ----------------------
5
8
  function zeros(rows, cols) { return Array.from({length:rows},()=>Array(cols).fill(0)); }
@@ -222,8 +225,310 @@ export class Adam{
222
225
  }
223
226
  }
224
227
 
228
+ // ---------------------- ELU Activation ----------------------
229
+ export class ELU {
230
+ constructor(alpha=1.0) {
231
+ this.alpha = alpha;
232
+ this.out = null;
233
+ }
234
+
235
+ forward(x) {
236
+ this.out = x.map(row =>
237
+ row.map(v => v > 0 ? v : this.alpha * (Math.exp(v) - 1))
238
+ );
239
+ return this.out;
240
+ }
241
+
242
+ backward(grad) {
243
+ return grad.map((row, i) =>
244
+ row.map((v, j) =>
245
+ v * (this.out[i][j] > 0 ? 1 : this.alpha * Math.exp(this.out[i][j]))
246
+ )
247
+ );
248
+ }
249
+ }
250
+
251
+ // ---------------------- Mish Activation ----------------------
252
+ export class Mish {
253
+ constructor() {
254
+ this.x = null;
255
+ }
256
+
257
+ forward(x) {
258
+ this.x = x;
259
+ return x.map(row =>
260
+ row.map(v => {
261
+ // Mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^x))
262
+ const softplus = Math.log(1 + Math.exp(v));
263
+ return v * Math.tanh(softplus);
264
+ })
265
+ );
266
+ }
267
+
268
+ backward(grad) {
269
+ return grad.map((row, i) =>
270
+ row.map((v, j) => {
271
+ const x_val = this.x[i][j];
272
+
273
+ // Gradient of Mish:
274
+ // δ = ω * (4(x+1) + 4e^2x + e^3x + e^x(4x+6)) / (2e^x + e^2x + 2)^2
275
+ // where ω = sech^2(softplus(x))
276
+
277
+ const exp_x = Math.exp(x_val);
278
+ const exp_2x = Math.exp(2 * x_val);
279
+ const exp_3x = Math.exp(3 * x_val);
280
+ const softplus = Math.log(1 + exp_x);
281
+
282
+ const sech_softplus = 1 / Math.cosh(softplus);
283
+ const numerator = 4 * (x_val + 1) + 4 * exp_2x + exp_3x + exp_x * (4 * x_val + 6);
284
+ const denominator = Math.pow(2 * exp_x + exp_2x + 2, 2);
285
+
286
+ const mish_grad = (sech_softplus * sech_softplus) * (numerator / denominator);
287
+ return v * mish_grad;
288
+ })
289
+ );
290
+ }
291
+ }
292
+
293
+ // ---------------------- SiLU Activation ----------------------
294
+ export class SiLU {
295
+ constructor() {
296
+ this.x = null;
297
+ }
298
+
299
+ forward(x) {
300
+ this.x = x;
301
+ return x.map(row =>
302
+ row.map(v => v / (1 + Math.exp(-v))) // x * sigmoid(x)
303
+ );
304
+ }
305
+
306
+ backward(grad) {
307
+ return grad.map((row, i) =>
308
+ row.map((v, j) => {
309
+ const x_val = this.x[i][j];
310
+ const sigmoid = 1 / (1 + Math.exp(-x_val));
311
+ return v * (sigmoid * (1 + x_val * (1 - sigmoid)));
312
+ })
313
+ );
314
+ }
315
+ }
316
+
225
317
  export class SGD{ constructor(params,lr=0.01){ this.params=params; this.lr=lr; } step(){ this.params.forEach(p=>{ for(let i=0;i<p.param.length;i++) for(let j=0;j<(p.param[0].length||1);j++) p.param[i][j]-=this.lr*p.grad[i][j]; }); } }
226
318
 
319
+ // ---------------------- BatchNorm2D ----------------------
320
+ export class BatchNorm2d {
321
+ constructor(numFeatures, eps=1e-5, momentum=0.1, affine=true) {
322
+ this.numFeatures = numFeatures;
323
+ this.eps = eps;
324
+ this.momentum = momentum;
325
+ this.affine = affine;
326
+
327
+ // Parameters
328
+ if (affine) {
329
+ this.weight = Array(numFeatures).fill(1);
330
+ this.bias = Array(numFeatures).fill(0);
331
+ this.gradWeight = Array(numFeatures).fill(0);
332
+ this.gradBias = Array(numFeatures).fill(0);
333
+ }
334
+
335
+ // Running statistics
336
+ this.runningMean = Array(numFeatures).fill(0);
337
+ this.runningVar = Array(numFeatures).fill(1);
338
+
339
+ // Training state
340
+ this.training = true;
341
+ this.x = null;
342
+ this.xCentered = null;
343
+ this.std = null;
344
+ }
345
+
346
+ forward(x) {
347
+ // x shape: [batch, channels, height, width]
348
+ this.x = x;
349
+ const batchSize = x.length;
350
+ const channels = x[0].length;
351
+
352
+ if (this.training) {
353
+ // Calculate mean per channel
354
+ const means = Array(channels).fill(0);
355
+ for (let b = 0; b < batchSize; b++) {
356
+ for (let c = 0; c < channels; c++) {
357
+ const channelData = x[b][c];
358
+ let sum = 0;
359
+ for (let i = 0; i < channelData.length; i++) {
360
+ for (let j = 0; j < channelData[0].length; j++) {
361
+ sum += channelData[i][j];
362
+ }
363
+ }
364
+ means[c] += sum / (channelData.length * channelData[0].length);
365
+ }
366
+ }
367
+ means.forEach((_, c) => means[c] /= batchSize);
368
+
369
+ // Calculate variance per channel
370
+ const variances = Array(channels).fill(0);
371
+ for (let b = 0; b < batchSize; b++) {
372
+ for (let c = 0; c < channels; c++) {
373
+ const channelData = x[b][c];
374
+ let sum = 0;
375
+ for (let i = 0; i < channelData.length; i++) {
376
+ for (let j = 0; j < channelData[0].length; j++) {
377
+ sum += Math.pow(channelData[i][j] - means[c], 2);
378
+ }
379
+ }
380
+ variances[c] += sum / (channelData.length * channelData[0].length);
381
+ }
382
+ }
383
+ variances.forEach((_, c) => variances[c] /= batchSize);
384
+
385
+ // Update running statistics
386
+ for (let c = 0; c < channels; c++) {
387
+ this.runningMean[c] = this.momentum * means[c] + (1 - this.momentum) * this.runningMean[c];
388
+ this.runningVar[c] = this.momentum * variances[c] + (1 - this.momentum) * this.runningVar[c];
389
+ }
390
+
391
+ // Normalize
392
+ this.xCentered = [];
393
+ this.std = Array(channels).fill(0).map(() => []);
394
+
395
+ const output = [];
396
+ for (let b = 0; b < batchSize; b++) {
397
+ const batchOut = [];
398
+ for (let c = 0; c < channels; c++) {
399
+ const channelData = x[b][c];
400
+ const channelOut = zeros(channelData.length, channelData[0].length);
401
+ const channelCentered = zeros(channelData.length, channelData[0].length);
402
+ const channelStd = Math.sqrt(variances[c] + this.eps);
403
+ this.std[c].push(channelStd);
404
+
405
+ for (let i = 0; i < channelData.length; i++) {
406
+ for (let j = 0; j < channelData[0].length; j++) {
407
+ channelCentered[i][j] = channelData[i][j] - means[c];
408
+ channelOut[i][j] = channelCentered[i][j] / channelStd;
409
+
410
+ // Apply affine transformation if enabled
411
+ if (this.affine) {
412
+ channelOut[i][j] = channelOut[i][j] * this.weight[c] + this.bias[c];
413
+ }
414
+ }
415
+ }
416
+
417
+ batchOut.push(channelOut);
418
+ if (b === 0) this.xCentered.push(channelCentered);
419
+ else this.xCentered[c] = addMatrices(this.xCentered[c], channelCentered);
420
+ }
421
+ output.push(batchOut);
422
+ }
423
+
424
+ return output;
425
+ } else {
426
+ // Inference mode - use running statistics
427
+ const output = [];
428
+ for (let b = 0; b < batchSize; b++) {
429
+ const batchOut = [];
430
+ for (let c = 0; c < channels; c++) {
431
+ const channelData = x[b][c];
432
+ const channelOut = zeros(channelData.length, channelData[0].length);
433
+ const channelStd = Math.sqrt(this.runningVar[c] + this.eps);
434
+
435
+ for (let i = 0; i < channelData.length; i++) {
436
+ for (let j = 0; j < channelData[0].length; j++) {
437
+ channelOut[i][j] = (channelData[i][j] - this.runningMean[c]) / channelStd;
438
+
439
+ // Apply affine transformation if enabled
440
+ if (this.affine) {
441
+ channelOut[i][j] = channelOut[i][j] * this.weight[c] + this.bias[c];
442
+ }
443
+ }
444
+ }
445
+
446
+ batchOut.push(channelOut);
447
+ }
448
+ output.push(batchOut);
449
+ }
450
+
451
+ return output;
452
+ }
453
+ }
454
+
455
+ backward(gradOutput) {
456
+ if (!this.training) {
457
+ throw new Error("Backward should only be called in training mode");
458
+ }
459
+
460
+ const batchSize = gradOutput.length;
461
+ const channels = gradOutput[0].length;
462
+
463
+ // Initialize gradients
464
+ const gradInput = this.x.map(batch =>
465
+ batch.map(channel =>
466
+ zeros(channel.length, channel[0].length)
467
+ )
468
+ );
469
+
470
+ if (this.affine) {
471
+ this.gradWeight.fill(0);
472
+ this.gradBias.fill(0);
473
+ }
474
+
475
+ for (let c = 0; c < channels; c++) {
476
+ let sumGradWeight = 0;
477
+ let sumGradBias = 0;
478
+
479
+ for (let b = 0; b < batchSize; b++) {
480
+ const channelGrad = gradOutput[b][c];
481
+ const channelData = this.x[b][c];
482
+
483
+ // Calculate gradients for bias and weight
484
+ if (this.affine) {
485
+ for (let i = 0; i < channelGrad.length; i++) {
486
+ for (let j = 0; j < channelGrad[0].length; j++) {
487
+ sumGradBias += channelGrad[i][j];
488
+ sumGradWeight += channelGrad[i][j] * (this.xCentered[c][i][j] / this.std[c][b]);
489
+ }
490
+ }
491
+ }
492
+
493
+ // Calculate gradient for input
494
+ const n = channelData.length * channelData[0].length;
495
+ const stdInv = 1 / this.std[c][b];
496
+
497
+ for (let i = 0; i < channelGrad.length; i++) {
498
+ for (let j = 0; j < channelGrad[0].length; j++) {
499
+ let grad = channelGrad[i][j];
500
+
501
+ if (this.affine) {
502
+ grad *= this.weight[c];
503
+ }
504
+
505
+ grad *= stdInv;
506
+ gradInput[b][c][i][j] = grad;
507
+ }
508
+ }
509
+ }
510
+
511
+ if (this.affine) {
512
+ this.gradWeight[c] = sumGradWeight / batchSize;
513
+ this.gradBias[c] = sumGradBias / batchSize;
514
+ }
515
+ }
516
+
517
+ return gradInput;
518
+ }
519
+
520
+ parameters() {
521
+ if (!this.affine) return [];
522
+ return [
523
+ { param: [this.weight], grad: [this.gradWeight] },
524
+ { param: [this.bias], grad: [this.gradBias] }
525
+ ];
526
+ }
527
+
528
+ train() { this.training = true; }
529
+ eval() { this.training = false; }
530
+ }
531
+
227
532
  // ---------------------- Model Save/Load ----------------------
228
533
  export function saveModel(model){
229
534
  if(!(model instanceof Sequential)) throw new Error("saveModel supports only Sequential");
@@ -252,4 +557,4 @@ export function reshape(tensor, rows, cols) {
252
557
  flat.slice(i*cols, i*cols + cols)
253
558
  );
254
559
  return out;
255
- }
560
+ }
package/src/startup.cpu CHANGED
@@ -1,12 +1,15 @@
1
1
  // you can delete this files this files are not important for the engine runtime.
2
2
 
3
3
  e=run=[cpu[runtime]]
4
- e.set.runtime('beta')
5
- e.rnt()
6
- e.set()
7
- e.register('vanilla',expe='Experiments.js',main='MainEngine.js')
8
- l=e.prog('asm')
4
+ devices=e.getdata[devices[5]]
5
+ env.set.runtime('beta')
6
+ env.rnt()
7
+ env.set()
8
+ env.register('vanilla',expe='Experiments.js',main='MainEngine.js',tgver=latest)
9
+ resources=e.find(tag='resources')
10
+ resources.ld(env)
11
+ l=env.prog('asm')
9
12
  r=l.gv=[0xCAFEBABE]
10
- eng=e.load(register,r,'vanilla')
11
- eng.boot(e,r,'dp')
12
- eng.load()
13
+ eng=env.load(register,r,'vanilla')
14
+ eng.boot(env,r,'dp')
15
+ eng.load(resources,runtime,devices)
@@ -0,0 +1,127 @@
1
+ // src/Dummy/debug_train.js
2
+ import {
3
+ Tensor,
4
+ Linear,
5
+ Sequential,
6
+ ReLU,
7
+ Sigmoid,
8
+ CrossEntropyLoss,
9
+ Adam,
10
+ MathOps
11
+ } from '../src/Dummy/exp.js';
12
+
13
+ // ---------------------- Simple Debug Data ----------------------
14
+ function generateSimpleData() {
15
+ // VERY simple data: 4 points in 2D
16
+ const X = [
17
+ [0, 0],
18
+ [0, 1],
19
+ [1, 0],
20
+ [1, 1]
21
+ ];
22
+
23
+ const y = [
24
+ [0], // AND operation
25
+ [0],
26
+ [0],
27
+ [1]
28
+ ];
29
+
30
+ return { X, y };
31
+ }
32
+
33
+ // ---------------------- Debug Model ----------------------
34
+ function createDebugModel() {
35
+ return new Sequential([
36
+ new Linear(2, 2), // Small layer
37
+ new ReLU(),
38
+ new Linear(2, 1), // Output layer
39
+ new Sigmoid()
40
+ ]);
41
+ }
42
+
43
+ // ---------------------- Debug Training ----------------------
44
+ function debugTraining() {
45
+ console.log("🔍 DEBUG TRAINING STARTED");
46
+ console.log("=========================");
47
+
48
+ const data = generateSimpleData();
49
+ const model = createDebugModel();
50
+ const lossFunction = new CrossEntropyLoss();
51
+ const parameters = model.parameters();
52
+
53
+ console.log("Model parameters:", parameters.length);
54
+ console.log("Data samples:", data.X.length);
55
+
56
+ // Single step debug
57
+ for (let step = 0; step < 10; step++) {
58
+ console.log(`\n--- Step ${step} ---`);
59
+
60
+ // Forward pass
61
+ const predictions = model.forward(data.X);
62
+ console.log("Predictions:", predictions.map(p => p[0].toFixed(3)));
63
+
64
+ const loss = lossFunction.forward(predictions, data.y);
65
+ console.log("Loss:", loss);
66
+
67
+ if (isNaN(loss)) {
68
+ console.log("❌ NaN LOSS DETECTED!");
69
+ console.log("Predictions:", predictions);
70
+ console.log("Targets:", data.y);
71
+ break;
72
+ }
73
+
74
+ // Backward pass
75
+ const grad = lossFunction.backward();
76
+ console.log("Gradient:", grad.map(g => g[0].toFixed(3)));
77
+
78
+ model.backward(grad);
79
+
80
+ // Check gradients
81
+ console.log("Parameter gradients:");
82
+ parameters.forEach((param, idx) => {
83
+ if (Array.isArray(param.grad[0])) {
84
+ console.log(` Param ${idx} grad:`, param.grad.map(row =>
85
+ row.map(v => v.toFixed(3))
86
+ ));
87
+ } else {
88
+ console.log(` Param ${idx} grad:`, param.grad.map(v => v.toFixed(3)));
89
+ }
90
+ });
91
+
92
+ // Update weights
93
+ const optimizer = new Adam(parameters, 0.1);
94
+ optimizer.step();
95
+
96
+ // Reset gradients manually
97
+ parameters.forEach(param => {
98
+ if (Array.isArray(param.grad[0])) {
99
+ for (let i = 0; i < param.grad.length; i++) {
100
+ for (let j = 0; j < param.grad[0].length; j++) {
101
+ param.grad[i][j] = 0;
102
+ }
103
+ }
104
+ } else {
105
+ for (let i = 0; i < param.grad.length; i++) {
106
+ param.grad[i] = 0;
107
+ }
108
+ }
109
+ });
110
+
111
+ // Calculate accuracy
112
+ const accuracy = calculateAccuracy(predictions, data.y);
113
+ console.log("Accuracy:", (accuracy * 100).toFixed(1) + "%");
114
+ }
115
+ }
116
+
117
+ function calculateAccuracy(predictions, targets) {
118
+ let correct = 0;
119
+ for (let i = 0; i < predictions.length; i++) {
120
+ const predLabel = predictions[i][0] > 0.5 ? 1 : 0;
121
+ if (predLabel === targets[i][0]) correct++;
122
+ }
123
+ return correct / predictions.length;
124
+ }
125
+
126
+ // Run debug
127
+ debugTraining();