mini-jstorch 1.1.1 → 1.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +23 -65
- package/engine/Dummy/msg +6 -0
- package/engine/MainEngine.js +148 -0
- package/engine/startup.cpu +8 -0
- package/index.js +1 -12
- package/package.json +1 -1
- package/act/linear.js +0 -8
- package/act/relu.js +0 -10
- package/layers/dense.js +0 -28
- package/layers/dropout.js +0 -15
- package/layers/flatten.js +0 -14
- package/models/Seq.js +0 -25
- package/optim/sgd.js +0 -12
- package/train/loop.js +0 -28
- package/utils/io.js +0 -28
- package/utils/math.js +0 -18
- package/utils/tensor.js +0 -13
package/README.md
CHANGED
|
@@ -1,76 +1,34 @@
|
|
|
1
|
-
|
|
1
|
+
## INTRODUCING MINI-JSTORCH ##
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
3
|
+
**Mini-JSTorch** is a lightweight JavaScript Module Packages where it was inspired by **PyTorch** and this Module was designed to be used at any devices and any like frontend or backend. This JSTorch finally at their versions and not *BETA* again.
|
|
4
|
+
This Module only Friendly at Frontend may he not run well while running at GPU or Backend We're gonna still update it so it still stable at backend and GPU.
|
|
5
5
|
|
|
6
|
-
## Overview
|
|
7
|
-
*mini-jstorch BETA* module is a lightweight, **PyTorch-inspired** neural network library in JavaScript. It provides basic components to build, train, and optimize neural networks
|
|
8
|
-
and are not heavy.
|
|
9
6
|
|
|
10
|
-
|
|
11
|
-
- Sequential model container (`Seq`) for easy stacking of layers.
|
|
12
|
-
- Common layers: `Dense`, `Dropout`, `Flatten`.
|
|
13
|
-
- Activations: `relu`, `sigmoid`, `leakyRelu`, `tanh`, `linear`.
|
|
14
|
-
- Tensor utilities (`zeros`, `rand`, `shape`) and math helpers (`dot`, `add`).
|
|
15
|
-
- Custom SGD optimizer.
|
|
16
|
-
- Training loop with batch and epoch support.
|
|
17
|
-
- Save/load models with JSON.
|
|
7
|
+
---
|
|
18
8
|
|
|
19
|
-
##
|
|
20
|
-
```
|
|
21
|
-
module-root/ # Only placeholder not an real module root name
|
|
22
|
-
│
|
|
23
|
-
├── index.js # Main exports
|
|
24
|
-
├── models/ # Model containers
|
|
25
|
-
│ └── Seq.js # Sequential model
|
|
26
|
-
├── layers/ # Neural network layers
|
|
27
|
-
│ ├── dense.js # Fully connected layer
|
|
28
|
-
│ ├── dropout.js # Dropout layer
|
|
29
|
-
│ └── flatten.js # Flatten input layer
|
|
30
|
-
├── act/ # Activation functions
|
|
31
|
-
│ ├── linear.js # Additional activations (leakyRelu, tanh)
|
|
32
|
-
│ └── relu.js # ReLU, Sigmoid
|
|
33
|
-
├── utils/ # Utilities
|
|
34
|
-
│ ├── io.js # Save/load models
|
|
35
|
-
│ ├── math.js # Math helpers (dot, add, etc.)
|
|
36
|
-
│ └── tensor.js # Tensor helpers
|
|
37
|
-
├── train/ # Training loop
|
|
38
|
-
│ └── loop.js
|
|
39
|
-
└── optim/ # Optimizers
|
|
40
|
-
└── sgd.js # Custom SGD optimizer
|
|
41
|
-
```
|
|
9
|
+
## Key Features
|
|
42
10
|
|
|
43
|
-
|
|
44
|
-
-
|
|
45
|
-
-
|
|
46
|
-
-
|
|
47
|
-
- Added `Flatten` and `Dropout` layers.
|
|
48
|
-
- Implemented batch prediction support in `Seq` model.
|
|
49
|
-
- Added basic SGD optimizer in `optim/`.
|
|
50
|
-
- Introduced training loop in `train/loop.js`.
|
|
51
|
-
- Expanded utils: tensor creation (`zeros`, `rand`), math operations (`dotBatch`, `addScalar`).
|
|
52
|
-
- Enhanced model IO with activations mapping and optional pretty-print.
|
|
53
|
-
- Added comments throughout files for better clarity and professional documentation.
|
|
11
|
+
- **Custom Model Creation:** Configure layers, neurons, learning rate, and more.
|
|
12
|
+
- **Manual & Auto Gradient:** Toggle gradient computation mode per experiment.
|
|
13
|
+
- **Sequence Handling:** Supports input sequences for NLP or time series tasks.
|
|
14
|
+
- **Lightweight & Frontend-Friendly:** Pure JS implementation, no GPU dependency.
|
|
54
15
|
|
|
55
|
-
|
|
56
|
-
```javascript
|
|
57
|
-
import { Seq } from './models/Seq.js';
|
|
58
|
-
import { Dense } from './layers/dense.js';
|
|
59
|
-
import * as act from './act/linear.js';
|
|
60
|
-
import { SGD } from './optim/sgd.js';
|
|
61
|
-
import { train } from './train/loop.js';
|
|
16
|
+
---
|
|
62
17
|
|
|
63
|
-
|
|
64
|
-
model.add(new Dense(3, 5, act.leakyRelu));
|
|
65
|
-
model.add(new Dense(5, 1));
|
|
18
|
+
## Installation
|
|
66
19
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
train(model, [[1,2,3]], [[1]], (pred, y) => pred[0]-y[0], optimizer, 10);
|
|
70
|
-
model.summary();
|
|
20
|
+
```bash
|
|
21
|
+
npm install mini-jstorch
|
|
71
22
|
```
|
|
72
23
|
|
|
24
|
+
## Patch v1.1.7
|
|
25
|
+
|
|
26
|
+
- *ONLY* Adding a README.md Because i'm forgot btw.
|
|
27
|
+
|
|
28
|
+
---
|
|
29
|
+
|
|
73
30
|
## Notes
|
|
74
|
-
|
|
75
|
-
-
|
|
76
|
-
-
|
|
31
|
+
|
|
32
|
+
- **One More. This Module are would not Run *WELL* at GPU or Backend**
|
|
33
|
+
- **The Features from the Module are Nearly little near same system Feature Engine like *PyTorch***
|
|
34
|
+
- **If you confusing about file 'startup.cpu' because the syntax are not similar to a program Language it was probably used for booting the Engine**
|
package/engine/Dummy/msg
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
**File Unavailable**
|
|
2
|
+
This file has been restricted by the developer.
|
|
3
|
+
|
|
4
|
+
It is intended solely for experimental purposes in future versions, including optimizations and bug fixes before full integration into the Main Engine.
|
|
5
|
+
|
|
6
|
+
For upcoming updates, this file may contain critical system components. Current usage may result in errors or malfunction, and it is not supported at this time.
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
// ================================
|
|
2
|
+
// MINI JS AI ENGINE v1
|
|
3
|
+
// ================================
|
|
4
|
+
|
|
5
|
+
// Basic tensor ops
|
|
6
|
+
const zeros = n => Array(n).fill(0);
|
|
7
|
+
const randn = () => (Math.random() * 2 - 1) * 0.1;
|
|
8
|
+
|
|
9
|
+
const dot = (a,b) => a.map(row=>b[0].map((_,j)=>row.reduce((sum,v,k)=>sum+v*b[k][j],0)));
|
|
10
|
+
const add = (a,b) => a.map((row,i)=>row.map((v,j)=>v+b[i][j]));
|
|
11
|
+
const sub = (a,b) => a.map((row,i)=>row.map((v,j)=>v-b[i][j]));
|
|
12
|
+
const mulScalar = (a,s) => a.map(row=>row.map(v=>v*s));
|
|
13
|
+
const transpose = m => m[0].map((_,i)=>m.map(row=>row[i]));
|
|
14
|
+
|
|
15
|
+
// Activations
|
|
16
|
+
const Activations = {
|
|
17
|
+
relu: x => x.map(v=>Math.max(0,v)),
|
|
18
|
+
linear: x => x,
|
|
19
|
+
leakyRelu: (x, alpha=0.01) => x.map(v=>v>0?v:alpha*v)
|
|
20
|
+
};
|
|
21
|
+
const dActivations = {
|
|
22
|
+
relu: x => x.map(v=>v>0?1:0),
|
|
23
|
+
linear: x => x.map(_=>1),
|
|
24
|
+
leakyRelu: (x, alpha=0.01) => x.map(v=>v>0?1:alpha)
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
// Dense layer with manual grad & auto-grad
|
|
28
|
+
class Dense {
|
|
29
|
+
constructor(inputSize, outputSize, activation='linear'){
|
|
30
|
+
this.inputSize = inputSize;
|
|
31
|
+
this.outputSize = outputSize;
|
|
32
|
+
this.activation = activation;
|
|
33
|
+
this.W = Array.from({length:inputSize},()=>Array.from({length:outputSize},()=>randn()*Math.sqrt(2/inputSize)));
|
|
34
|
+
this.b = Array(outputSize).fill(0);
|
|
35
|
+
|
|
36
|
+
// Adam variables
|
|
37
|
+
this.mW = mulScalar(this.W,0);
|
|
38
|
+
this.vW = mulScalar(this.W,0);
|
|
39
|
+
this.mb = Array(outputSize).fill(0);
|
|
40
|
+
this.vb = Array(outputSize).fill(0);
|
|
41
|
+
this.lastInput = null;
|
|
42
|
+
this.lastOutput = null;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
forward(X){
|
|
46
|
+
this.lastInput = X;
|
|
47
|
+
let output = dot(X,this.W);
|
|
48
|
+
output = output.map((row,i)=>row.map((v,j)=>v+this.b[j]));
|
|
49
|
+
this.lastOutput = output.map(row => Activations[this.activation](row));
|
|
50
|
+
return this.lastOutput;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
backward(dLoss, lr=0.001, beta1=0.9, beta2=0.999, t=1){
|
|
54
|
+
const flatOut = this.lastOutput.flat();
|
|
55
|
+
const actGrad = dActivations[this.activation](flatOut);
|
|
56
|
+
const dOut = dLoss.flat().map((v,i)=>v*actGrad[i]);
|
|
57
|
+
|
|
58
|
+
const gradW = Array.from({length:this.inputSize},()=>Array(this.outputSize).fill(0));
|
|
59
|
+
const gradB = Array(this.outputSize).fill(0);
|
|
60
|
+
|
|
61
|
+
for(let k=0;k<this.lastInput.length;k++){
|
|
62
|
+
for(let i=0;i<this.inputSize;i++){
|
|
63
|
+
for(let j=0;j<this.outputSize;j++){
|
|
64
|
+
gradW[i][j] += this.lastInput[k][i]*dOut[j]/this.lastInput.length;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
for(let j=0;j<this.outputSize;j++) gradB[j] = dOut[j]/this.lastInput.length;
|
|
69
|
+
|
|
70
|
+
// Adam update with bias correction
|
|
71
|
+
for(let i=0;i<this.inputSize;i++){
|
|
72
|
+
for(let j=0;j<this.outputSize;j++){
|
|
73
|
+
this.mW[i][j] = beta1*this.mW[i][j]+(1-beta1)*gradW[i][j];
|
|
74
|
+
this.vW[i][j] = beta2*this.vW[i][j]+(1-beta2)*gradW[i][j]*gradW[i][j];
|
|
75
|
+
const mHat = this.mW[i][j]/(1-Math.pow(beta1,t));
|
|
76
|
+
const vHat = this.vW[i][j]/(1-Math.pow(beta2,t));
|
|
77
|
+
this.W[i][j] -= lr*mHat/(Math.sqrt(vHat)+1e-8);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
for(let j=0;j<this.outputSize;j++){
|
|
82
|
+
this.mb[j] = beta1*this.mb[j]+(1-beta1)*gradB[j];
|
|
83
|
+
this.vb[j] = beta2*this.vb[j]+(1-beta2)*gradB[j]*gradB[j];
|
|
84
|
+
const mHat = this.mb[j]/(1-Math.pow(beta1,t));
|
|
85
|
+
const vHat = this.vb[j]/(1-Math.pow(beta2,t));
|
|
86
|
+
this.b[j] -= lr*mHat/(Math.sqrt(vHat)+1e-8);
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// Return dLoss for next layer (manual grad)
|
|
90
|
+
const dNext = Array(this.lastInput.length).fill(0).map(_=>Array(this.inputSize).fill(0));
|
|
91
|
+
for(let i=0;i<this.inputSize;i++){
|
|
92
|
+
for(let j=0;j<this.outputSize;j++){
|
|
93
|
+
for(let k=0;k<this.lastInput.length;k++){
|
|
94
|
+
dNext[k][i] += dOut[j]*this.W[i][j];
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
return dNext;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// Sequential model
|
|
103
|
+
class Seq {
|
|
104
|
+
constructor(layers){
|
|
105
|
+
this.layers = layers;
|
|
106
|
+
this.logs = [];
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
forward(X){
|
|
110
|
+
return this.layers.reduce((inp,layer)=>layer.forward(inp), X);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
train(X,y,epochs=200, lr=0.001){
|
|
114
|
+
for(let epoch=1;epoch<=epochs;epoch++){
|
|
115
|
+
const yPred = this.forward(X);
|
|
116
|
+
const dLoss = yPred.map((row,i)=>row.map((v,j)=>2*(v - y[i][j])/y.length));
|
|
117
|
+
|
|
118
|
+
let grad = dLoss;
|
|
119
|
+
for(let l=this.layers.length-1;l>=0;l--){
|
|
120
|
+
grad = this.layers[l].backward(grad, lr, 0.9, 0.999, epoch);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const loss = yPred.map((row,i)=>row.map((v,j)=>Math.pow(v - y[i][j],2))).flat().reduce((a,b)=>a+b,0)/y.length;
|
|
124
|
+
if(epoch%50===0) console.log(`Epoch ${epoch}, Loss ${loss.toFixed(4)}, Pred sample ${yPred[0][0].toFixed(2)}`);
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
predict(X){
|
|
129
|
+
return this.forward(X);
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// ================================
|
|
134
|
+
// EXAMPLE USAGE
|
|
135
|
+
// ================================
|
|
136
|
+
|
|
137
|
+
const model = new Seq([
|
|
138
|
+
new Dense(2,16,'relu'),
|
|
139
|
+
new Dense(16,12,'relu'),
|
|
140
|
+
new Dense(12,1,'linear')
|
|
141
|
+
]);
|
|
142
|
+
|
|
143
|
+
const X = [[1,2],[2,3],[3,4]];
|
|
144
|
+
const y = [[3],[5],[7]];
|
|
145
|
+
|
|
146
|
+
model.train(X,y,200,0.01);
|
|
147
|
+
|
|
148
|
+
console.log('Prediction [7,8]:', model.predict([[7,8]]));
|
package/index.js
CHANGED
|
@@ -1,17 +1,6 @@
|
|
|
1
|
-
//
|
|
2
|
-
// Main entry point for mini-jstorch
|
|
3
|
-
|
|
4
|
-
// Core model
|
|
1
|
+
// Entry point of the library, export main classes and functions
|
|
5
2
|
export { Seq } from './models/seq.js';
|
|
6
|
-
|
|
7
|
-
// Layers
|
|
8
3
|
export { Dense } from './layers/dense.js';
|
|
9
|
-
|
|
10
|
-
// Activations
|
|
11
4
|
export * as act from './act/linear.js';
|
|
12
|
-
|
|
13
|
-
// Optimizers
|
|
14
5
|
export { SGD } from './optim/sgd.js';
|
|
15
|
-
|
|
16
|
-
// Training loop
|
|
17
6
|
export { train } from './train/loop.js';
|
package/package.json
CHANGED
package/act/linear.js
DELETED
package/act/relu.js
DELETED
package/layers/dense.js
DELETED
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
import { dot, add } from '../utils/math.js';
|
|
2
|
-
import { leakyRelu } from '../act/linear.js';
|
|
3
|
-
|
|
4
|
-
// Fully connected layer
|
|
5
|
-
export class Dense {
|
|
6
|
-
constructor(inputSize, outputSize, activation = x => x) {
|
|
7
|
-
this.weights = Array.from({ length: outputSize }, () =>
|
|
8
|
-
Array.from({ length: inputSize }, () => Math.random() * 0.1)
|
|
9
|
-
);
|
|
10
|
-
this.bias = Array(outputSize).fill(0);
|
|
11
|
-
this.activation = activation;
|
|
12
|
-
this.name = "Dense";
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
forward(input) {
|
|
16
|
-
const z = add(dot(this.weights, input), this.bias);
|
|
17
|
-
return z.map(this.activation);
|
|
18
|
-
}
|
|
19
|
-
|
|
20
|
-
forwardBatch(inputs) {
|
|
21
|
-
return inputs.map(input => this.forward(input));
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
forwardLeaky(input) {
|
|
25
|
-
const z = add(dot(this.weights, input), this.bias);
|
|
26
|
-
return leakyRelu(z);
|
|
27
|
-
}
|
|
28
|
-
}
|
package/layers/dropout.js
DELETED
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
// Dropout layer for regularization
|
|
2
|
-
export class Dropout {
|
|
3
|
-
constructor(p = 0.5) {
|
|
4
|
-
this.p = p;
|
|
5
|
-
this.name = "Dropout";
|
|
6
|
-
}
|
|
7
|
-
|
|
8
|
-
forward(input) {
|
|
9
|
-
return input.map(v => (Math.random() < this.p ? 0 : v / (1 - this.p)));
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
forwardBatch(inputs) {
|
|
13
|
-
return inputs.map(inp => this.forward(inp));
|
|
14
|
-
}
|
|
15
|
-
}
|
package/layers/flatten.js
DELETED
package/models/Seq.js
DELETED
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
// Sequential model container
|
|
2
|
-
export class Seq {
|
|
3
|
-
constructor() {
|
|
4
|
-
this.layers = [];
|
|
5
|
-
}
|
|
6
|
-
|
|
7
|
-
add(layer) {
|
|
8
|
-
this.layers.push(layer);
|
|
9
|
-
}
|
|
10
|
-
|
|
11
|
-
predict(input) {
|
|
12
|
-
return this.layers.reduce((out, layer) => layer.forward(out), input);
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
predictBatch(inputs) {
|
|
16
|
-
return inputs.map(input => this.predict(input));
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
summary() {
|
|
20
|
-
console.log("Model summary:");
|
|
21
|
-
this.layers.forEach((l, i) => {
|
|
22
|
-
console.log(` Layer ${i+1}: ${l.name}, outputSize: ${l.weights ? l.weights.length : 'N/A'}`);
|
|
23
|
-
});
|
|
24
|
-
}
|
|
25
|
-
}
|
package/optim/sgd.js
DELETED
package/train/loop.js
DELETED
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
// Training loop
|
|
2
|
-
export function train(model, X, Y, lossFn, optimizer, epochs=None) {
|
|
3
|
-
for(let e=0; e<epochs; e++) {
|
|
4
|
-
let totalLoss = 0;
|
|
5
|
-
|
|
6
|
-
for(let i=0; i<X.length; i++) {
|
|
7
|
-
const x = X[i];
|
|
8
|
-
const y = Y[i];
|
|
9
|
-
|
|
10
|
-
// Forward pass
|
|
11
|
-
const pred = model.predict(x);
|
|
12
|
-
|
|
13
|
-
// Compute loss
|
|
14
|
-
const loss = lossFn(pred, y);
|
|
15
|
-
totalLoss += loss;
|
|
16
|
-
|
|
17
|
-
// Naive gradient (placeholder)
|
|
18
|
-
const grads = pred.map((p,j) => [p - y[j]]);
|
|
19
|
-
|
|
20
|
-
// Update first Dense layer as example
|
|
21
|
-
if(model.layers[0].weights) {
|
|
22
|
-
optimizer.step([model.layers[0].weights], grads);
|
|
23
|
-
}
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
console.log(`Epoch ${e+1}/${epochs}, Loss: ${totalLoss/X.length}`);
|
|
27
|
-
}
|
|
28
|
-
}
|
package/utils/io.js
DELETED
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
// Save and load model
|
|
2
|
-
export function saveModel(model, pretty=false) {
|
|
3
|
-
const data = {
|
|
4
|
-
layers: model.layers.map(l => ({
|
|
5
|
-
name: l.name,
|
|
6
|
-
weights: l.weights,
|
|
7
|
-
bias: l.bias,
|
|
8
|
-
activation: l.activation?.name || null
|
|
9
|
-
}))
|
|
10
|
-
};
|
|
11
|
-
return pretty ? JSON.stringify(data, null, 2) : JSON.stringify(data);
|
|
12
|
-
}
|
|
13
|
-
|
|
14
|
-
export function loadModel(json, ModelClass, LayerClass, actMap={}) {
|
|
15
|
-
const data = typeof json === 'string' ? JSON.parse(json) : json;
|
|
16
|
-
const model = new ModelClass();
|
|
17
|
-
|
|
18
|
-
data.layers.forEach(ld => {
|
|
19
|
-
const l = new LayerClass(0,0);
|
|
20
|
-
l.weights = ld.weights;
|
|
21
|
-
l.bias = ld.bias;
|
|
22
|
-
l.name = ld.name;
|
|
23
|
-
if(ld.activation && actMap[ld.activation]) l.activation = actMap[ld.activation];
|
|
24
|
-
model.add(l);
|
|
25
|
-
});
|
|
26
|
-
|
|
27
|
-
return model;
|
|
28
|
-
}
|
package/utils/math.js
DELETED
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
// Basic math utils
|
|
2
|
-
export function dot(weights, input) {
|
|
3
|
-
return weights.map(row =>
|
|
4
|
-
row.reduce((sum, val, i) => sum + val * input[i], 0)
|
|
5
|
-
);
|
|
6
|
-
}
|
|
7
|
-
|
|
8
|
-
export function add(a, b) {
|
|
9
|
-
return a.map((v, i) => v + b[i]);
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
export function dotBatch(weights, inputs) {
|
|
13
|
-
return inputs.map(input => dot(weights, input));
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
export function addScalar(arr, scalar) {
|
|
17
|
-
return arr.map(v => v + scalar);
|
|
18
|
-
}
|
package/utils/tensor.js
DELETED
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
// Tensor helpers
|
|
2
|
-
export function zeros(size) {
|
|
3
|
-
return Array(size).fill(0);
|
|
4
|
-
}
|
|
5
|
-
|
|
6
|
-
export function rand(size, scale=0.1) {
|
|
7
|
-
return Array(size).fill(0).map(()=>Math.random()*scale);
|
|
8
|
-
}
|
|
9
|
-
|
|
10
|
-
export function shape(tensor) {
|
|
11
|
-
if(Array.isArray(tensor[0])) return [tensor.length, tensor[0].length];
|
|
12
|
-
return [tensor.length];
|
|
13
|
-
}
|