@openfluke/welvet 0.1.2 → 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +285 -641
- package/dist/index.browser.d.ts +32 -0
- package/dist/index.browser.js +37 -0
- package/dist/index.d.ts +30 -28
- package/dist/index.js +34 -82
- package/dist/loader.browser.d.ts +5 -0
- package/dist/loader.browser.js +25 -0
- package/dist/loader.d.ts +5 -3
- package/dist/loader.js +25 -89
- package/dist/{loom.wasm → main.wasm} +0 -0
- package/dist/types.d.ts +96 -106
- package/dist/types.js +2 -10
- package/dist/wasm_exec.js +568 -658
- package/package.json +4 -2
- package/dist/env.d.ts +0 -3
- package/dist/env.js +0 -3
package/README.md
CHANGED
|
@@ -1,731 +1,375 @@
|
|
|
1
1
|
# @openfluke/welvet
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
Isomorphic TypeScript/JavaScript wrapper for the LOOM WebAssembly neural network framework.
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
## Features
|
|
6
6
|
|
|
7
|
-
|
|
8
|
-
|
|
7
|
+
- 🎉 **NEW: Simple API** - Streamlined functions with cross-platform consistency
|
|
8
|
+
- 🚀 **Isomorphic WASM Wrapper** - Works in Node.js and browser with same API
|
|
9
|
+
- 🔄 **Mirrors main.go** - Direct 1:1 mapping to WASM exports
|
|
10
|
+
- 🎯 **Type-Safe** - Full TypeScript type definitions for all Network methods
|
|
11
|
+
- 🤖 **Multi-Agent Networks** - Grid scatter architecture for heterogeneous agents
|
|
12
|
+
- 📦 **JSON Configuration** - Build networks from simple JSON configs
|
|
13
|
+
- ⚡ **Fast Training** - Optimized training with configurable parameters
|
|
14
|
+
- 💾 **Model Persistence** - Save and load trained models as JSON
|
|
15
|
+
- ✅ **Cross-Platform Consistency** - Same API as Python, C#, C, WASM
|
|
9
16
|
|
|
10
|
-
##
|
|
11
|
-
|
|
12
|
-
- 🚀 **5.4MB WASM Binary** - Complete neural network framework compiled to WebAssembly
|
|
13
|
-
- 🧠 **All 5 Layer Types** - Dense, Conv2D, Multi-Head Attention, RNN, LSTM fully supported
|
|
14
|
-
- 🎯 **Registry-based Initialization** - Dynamic layer creation via `CallLayerInit()` with zero manual exports
|
|
15
|
-
- 🔍 **Runtime Introspection** - Discover methods, signatures, and parameters dynamically
|
|
16
|
-
- 💾 **Model Serialization** - Save/load models as JSON (no filesystem required)
|
|
17
|
-
- ⚡ **Full Training Support** - Train networks with `network.Train()` API and automatic gradients
|
|
18
|
-
- 📘 **Full TypeScript Support** - Complete type definitions for IntelliSense
|
|
19
|
-
- 🎯 **Zero Dependencies** - Pure WASM + Go runtime, no external libs
|
|
20
|
-
- 🌐 **Isomorphic** - Works in browsers, Node.js, Bun, and Deno
|
|
21
|
-
- 🎨 **Multiple Activation Functions** - ReLU, Sigmoid, Tanh, Softplus, LeakyReLU, Linear
|
|
22
|
-
- ⚠️ **CPU-Only** (GPU support via WebGPU coming soon)
|
|
23
|
-
|
|
24
|
-
## 📦 Installation
|
|
17
|
+
## Installation
|
|
25
18
|
|
|
26
19
|
```bash
|
|
27
20
|
npm install @openfluke/welvet
|
|
28
21
|
```
|
|
29
22
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
```bash
|
|
33
|
-
# Yarn
|
|
34
|
-
yarn add @openfluke/welvet
|
|
35
|
-
|
|
36
|
-
# pnpm
|
|
37
|
-
pnpm add @openfluke/welvet
|
|
38
|
-
|
|
39
|
-
# Bun
|
|
40
|
-
bun add @openfluke/welvet
|
|
41
|
-
```
|
|
23
|
+
## Quick Start
|
|
42
24
|
|
|
43
|
-
|
|
25
|
+
### 🎉 NEW: Simple API (Recommended)
|
|
44
26
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
Instead of manually configuring layers, **load a complete model with ONE line**:
|
|
27
|
+
The simple API provides streamlined functions with consistent behavior across all platforms:
|
|
48
28
|
|
|
49
29
|
```typescript
|
|
50
|
-
import {
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
const modelJSON = await fetch("test.json").then((r) => r.json());
|
|
56
|
-
const network = loom.LoadModelFromString(
|
|
57
|
-
JSON.stringify(modelJSON),
|
|
58
|
-
"all_layers_test"
|
|
59
|
-
);
|
|
30
|
+
import {
|
|
31
|
+
init,
|
|
32
|
+
createNetworkFromJSON,
|
|
33
|
+
loadLoomNetwork,
|
|
34
|
+
} from "@openfluke/welvet";
|
|
60
35
|
|
|
61
|
-
//
|
|
62
|
-
|
|
63
|
-
const [output, duration] = JSON.parse(
|
|
64
|
-
network.ForwardCPU(JSON.stringify([input]))
|
|
65
|
-
);
|
|
66
|
-
console.log("Output:", output);
|
|
67
|
-
```
|
|
36
|
+
// Initialize LOOM WASM
|
|
37
|
+
await init();
|
|
68
38
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
39
|
+
// Create network from JSON config
|
|
40
|
+
const config = {
|
|
41
|
+
batch_size: 1,
|
|
42
|
+
grid_rows: 1,
|
|
43
|
+
grid_cols: 3,
|
|
44
|
+
layers_per_cell: 1,
|
|
45
|
+
layers: [
|
|
46
|
+
{ type: "dense", input_size: 8, output_size: 16, activation: "relu" },
|
|
47
|
+
{
|
|
48
|
+
type: "parallel",
|
|
49
|
+
combine_mode: "grid_scatter",
|
|
50
|
+
grid_output_rows: 3,
|
|
51
|
+
grid_output_cols: 1,
|
|
52
|
+
grid_output_layers: 1,
|
|
53
|
+
grid_positions: [
|
|
54
|
+
{ branch_index: 0, target_row: 0, target_col: 0, target_layer: 0 },
|
|
55
|
+
{ branch_index: 1, target_row: 1, target_col: 0, target_layer: 0 },
|
|
56
|
+
{ branch_index: 2, target_row: 2, target_col: 0, target_layer: 0 },
|
|
57
|
+
],
|
|
58
|
+
branches: [
|
|
59
|
+
{
|
|
60
|
+
type: "parallel",
|
|
61
|
+
combine_mode: "add",
|
|
62
|
+
branches: [
|
|
63
|
+
{
|
|
64
|
+
type: "dense",
|
|
65
|
+
input_size: 16,
|
|
66
|
+
output_size: 8,
|
|
67
|
+
activation: "relu",
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
type: "dense",
|
|
71
|
+
input_size: 16,
|
|
72
|
+
output_size: 8,
|
|
73
|
+
activation: "gelu",
|
|
74
|
+
},
|
|
75
|
+
],
|
|
76
|
+
},
|
|
77
|
+
{ type: "lstm", input_size: 16, hidden_size: 8, seq_length: 1 },
|
|
78
|
+
{ type: "rnn", input_size: 16, hidden_size: 8, seq_length: 1 },
|
|
79
|
+
],
|
|
80
|
+
},
|
|
81
|
+
{ type: "dense", input_size: 24, output_size: 2, activation: "sigmoid" },
|
|
82
|
+
],
|
|
83
|
+
};
|
|
99
84
|
|
|
100
|
-
|
|
101
|
-
console.log("Inference time:", duration / 1e6, "ms");
|
|
85
|
+
const network = createNetworkFromJSON(JSON.stringify(config));
|
|
102
86
|
|
|
103
|
-
// Training
|
|
87
|
+
// Training
|
|
104
88
|
const batches = [
|
|
105
|
-
{ Input:
|
|
89
|
+
{ Input: [0.2, 0.2, 0.2, 0.2, 0.8, 0.8, 0.8, 0.8], Target: [1.0, 0.0] },
|
|
90
|
+
{ Input: [0.9, 0.9, 0.9, 0.9, 0.1, 0.1, 0.1, 0.1], Target: [0.0, 1.0] },
|
|
91
|
+
{ Input: [0.7, 0.7, 0.7, 0.7, 0.3, 0.3, 0.3, 0.3], Target: [0.0, 1.0] },
|
|
92
|
+
{ Input: [0.3, 0.3, 0.3, 0.3, 0.7, 0.7, 0.7, 0.7], Target: [1.0, 0.0] },
|
|
106
93
|
];
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
94
|
+
|
|
95
|
+
const trainingConfig = {
|
|
96
|
+
Epochs: 800,
|
|
97
|
+
LearningRate: 0.15,
|
|
98
|
+
UseGPU: false,
|
|
99
|
+
PrintEveryBatch: 0,
|
|
110
100
|
GradientClip: 1.0,
|
|
111
101
|
LossType: "mse",
|
|
102
|
+
Verbose: false,
|
|
112
103
|
};
|
|
113
104
|
|
|
114
|
-
const
|
|
115
|
-
|
|
116
|
-
console.log("Final loss:", result.FinalLoss);
|
|
117
|
-
```
|
|
118
|
-
|
|
119
|
-
## 📚 API Reference
|
|
120
|
-
|
|
121
|
-
### Initialization
|
|
122
|
-
|
|
123
|
-
```typescript
|
|
124
|
-
interface InitOptions {
|
|
125
|
-
wasmUrl?: string | URL; // Custom WASM file location
|
|
126
|
-
injectGoRuntime?: boolean; // Include Go runtime (default: true)
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
const loom = await initLoom(options?);
|
|
130
|
-
```
|
|
131
|
-
|
|
132
|
-
### Creating Networks
|
|
133
|
-
|
|
134
|
-
```typescript
|
|
135
|
-
const network = loom.NewNetwork(
|
|
136
|
-
inputSize: number, // Input layer size
|
|
137
|
-
gridRows: number, // Grid rows (use 1 for simple networks)
|
|
138
|
-
gridCols: number, // Grid columns (use 1 for simple networks)
|
|
139
|
-
layersPerCell: number // Number of layers
|
|
140
|
-
);
|
|
141
|
-
```
|
|
142
|
-
|
|
143
|
-
### Layer Types
|
|
144
|
-
|
|
145
|
-
All layer types are created via the registry system using `CallLayerInit()`:
|
|
146
|
-
|
|
147
|
-
#### Dense (Fully-Connected) Layer
|
|
148
|
-
|
|
149
|
-
```typescript
|
|
150
|
-
const config = loom.CallLayerInit(
|
|
151
|
-
"InitDenseLayer",
|
|
152
|
-
JSON.stringify([
|
|
153
|
-
inputSize: number,
|
|
154
|
-
outputSize: number,
|
|
155
|
-
activation: ActivationType,
|
|
156
|
-
])
|
|
157
|
-
);
|
|
158
|
-
```
|
|
159
|
-
|
|
160
|
-
#### Conv2D Layer
|
|
105
|
+
const [result] = network.Train(JSON.stringify([batches, trainingConfig]));
|
|
106
|
+
console.log("Training complete!");
|
|
161
107
|
|
|
162
|
-
|
|
163
|
-
const
|
|
164
|
-
|
|
165
|
-
JSON.stringify([
|
|
166
|
-
height: number, // Input height
|
|
167
|
-
width: number, // Input width
|
|
168
|
-
channels: number, // Input channels
|
|
169
|
-
filters: number, // Number of output filters
|
|
170
|
-
kernelSize: number, // Kernel size (e.g., 3 for 3x3)
|
|
171
|
-
stride: number, // Stride (typically 1 or 2)
|
|
172
|
-
padding: number, // Padding (typically 0 or 1)
|
|
173
|
-
activation: ActivationType,
|
|
174
|
-
])
|
|
108
|
+
// Forward pass
|
|
109
|
+
const [output] = network.ForwardCPU(
|
|
110
|
+
JSON.stringify([[0.2, 0.2, 0.2, 0.2, 0.8, 0.8, 0.8, 0.8]])
|
|
175
111
|
);
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
const
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
dModel: number, // Model dimension
|
|
186
|
-
numHeads: number, // Number of attention heads
|
|
187
|
-
activation: ActivationType,
|
|
188
|
-
])
|
|
112
|
+
console.log("Output:", JSON.parse(output)); // [0.950, 0.050]
|
|
113
|
+
|
|
114
|
+
// Evaluate network
|
|
115
|
+
const inputs = batches.map((b) => b.Input);
|
|
116
|
+
const expected = [0, 1, 1, 0];
|
|
117
|
+
const [metrics] = network.EvaluateNetwork(JSON.stringify([inputs, expected]));
|
|
118
|
+
const metricsData = JSON.parse(metrics);
|
|
119
|
+
console.log(
|
|
120
|
+
`Quality: ${metricsData.score}/100, Deviation: ${metricsData.avg_deviation}%`
|
|
189
121
|
);
|
|
190
|
-
```
|
|
191
122
|
|
|
192
|
-
|
|
123
|
+
// Save/Load
|
|
124
|
+
const [modelJSON] = network.SaveModelToString(JSON.stringify(["my_model"]));
|
|
125
|
+
console.log(`Model saved (${modelJSON.length} bytes)`);
|
|
193
126
|
|
|
194
|
-
|
|
195
|
-
const
|
|
196
|
-
|
|
197
|
-
JSON.stringify([
|
|
198
|
-
inputSize: number, // Input feature size
|
|
199
|
-
hiddenSize: number, // Hidden state size
|
|
200
|
-
seqLength: number, // Sequence length
|
|
201
|
-
outputSize: number, // Output size (hiddenSize * seqLength)
|
|
202
|
-
])
|
|
203
|
-
);
|
|
204
|
-
```
|
|
205
|
-
|
|
206
|
-
#### LSTM Layer
|
|
207
|
-
|
|
208
|
-
```typescript
|
|
209
|
-
const config = loom.CallLayerInit(
|
|
210
|
-
"InitLSTMLayer",
|
|
211
|
-
JSON.stringify([
|
|
212
|
-
inputSize: number, // Input feature size
|
|
213
|
-
hiddenSize: number, // Hidden/cell state size
|
|
214
|
-
seqLength: number, // Sequence length
|
|
215
|
-
outputSize: number, // Output size (hiddenSize * seqLength)
|
|
216
|
-
])
|
|
127
|
+
// Load model
|
|
128
|
+
const loadedNetwork = loadLoomNetwork(modelJSON, "my_model");
|
|
129
|
+
const [output2] = loadedNetwork.ForwardCPU(
|
|
130
|
+
JSON.stringify([[0.2, 0.2, 0.2, 0.2, 0.8, 0.8, 0.8, 0.8]])
|
|
217
131
|
);
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
132
|
+
// output2 === output (bit-for-bit identical!)
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
**Simple API Functions:**
|
|
136
|
+
|
|
137
|
+
- `createNetworkFromJSON(jsonConfig)` - Create network from JSON
|
|
138
|
+
- `loadLoomNetwork(jsonString, modelID)` - Load saved model
|
|
139
|
+
- `network.ForwardCPU(inputJSON)` - Forward pass
|
|
140
|
+
- `network.BackwardCPU(gradientsJSON)` - Backward pass
|
|
141
|
+
- `network.Train(paramsJSON)` - Train network
|
|
142
|
+
- `network.SaveModelToString(idJSON)` - Save to JSON string
|
|
143
|
+
- `network.EvaluateNetwork(paramsJSON)` - Evaluate with metrics
|
|
144
|
+
- `network.UpdateWeights(lrJSON)` - Update weights
|
|
145
|
+
|
|
146
|
+
**Cross-Platform Results:**
|
|
147
|
+
|
|
148
|
+
- ✅ Same training: 99.5% improvement, 100/100 quality score
|
|
149
|
+
- ✅ Same save/load: 0.00 difference in predictions
|
|
150
|
+
- ✅ Same evaluation: Identical deviation metrics
|
|
151
|
+
- ✅ Same behavior as Python, C#, C, and WASM
|
|
152
|
+
|
|
153
|
+
See `example/grid-scatter.ts` for a complete working example.
|
|
154
|
+
{
|
|
155
|
+
type: "parallel",
|
|
156
|
+
combine_mode: "add",
|
|
157
|
+
branches: [
|
|
158
|
+
{
|
|
159
|
+
type: "dense",
|
|
160
|
+
input_size: 16,
|
|
161
|
+
output_size: 8,
|
|
162
|
+
activation: "relu",
|
|
163
|
+
},
|
|
164
|
+
{
|
|
165
|
+
type: "dense",
|
|
166
|
+
input_size: 16,
|
|
167
|
+
output_size: 8,
|
|
168
|
+
activation: "gelu",
|
|
169
|
+
},
|
|
170
|
+
],
|
|
171
|
+
},
|
|
172
|
+
{ type: "lstm", input_size: 16, hidden_size: 8, seq_length: 1 },
|
|
173
|
+
{ type: "rnn", input_size: 16, hidden_size: 8, seq_length: 1 },
|
|
174
|
+
],
|
|
175
|
+
},
|
|
176
|
+
{
|
|
177
|
+
type: "dense",
|
|
178
|
+
input_size: 24,
|
|
179
|
+
output_size: 2,
|
|
180
|
+
activation: "sigmoid",
|
|
181
|
+
},
|
|
182
|
+
],
|
|
183
|
+
});
|
|
242
184
|
|
|
243
|
-
|
|
185
|
+
// Train multi-agent network
|
|
186
|
+
const batches: TrainingBatch[] = [
|
|
187
|
+
{ Input: [0.2, 0.2, 0.2, 0.2, 0.8, 0.8, 0.8, 0.8], Target: [1.0, 0.0] },
|
|
188
|
+
{ Input: [0.9, 0.9, 0.9, 0.9, 0.1, 0.1, 0.1, 0.1], Target: [0.0, 1.0] },
|
|
189
|
+
];
|
|
244
190
|
|
|
245
|
-
|
|
191
|
+
const config: TrainingConfig = {
|
|
192
|
+
Epochs: 800,
|
|
193
|
+
LearningRate: 0.15,
|
|
194
|
+
LossType: "mse",
|
|
195
|
+
Verbose: false,
|
|
196
|
+
};
|
|
246
197
|
|
|
247
|
-
|
|
248
|
-
const config = loom.InitMultiHeadAttentionLayer(
|
|
249
|
-
dModel: number, // Model dimension
|
|
250
|
-
numHeads: number, // Number of attention heads
|
|
251
|
-
seqLength: number, // Sequence length
|
|
252
|
-
activation: ActivationType
|
|
253
|
-
);
|
|
198
|
+
const result = agentNetwork.Train(JSON.stringify([batches, config]));
|
|
254
199
|
|
|
255
|
-
network.SetLayer(JSON.stringify([0, 0, layerIndex, JSON.parse(config)]));
|
|
256
200
|
````
|
|
257
201
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
#### Forward Pass
|
|
261
|
-
|
|
262
|
-
```typescript
|
|
263
|
-
const input = [0.1, 0.2, 0.3, 0.4];
|
|
264
|
-
const resultJSON = network.ForwardCPU(JSON.stringify([input]));
|
|
265
|
-
const [output, duration] = JSON.parse(resultJSON);
|
|
266
|
-
```
|
|
267
|
-
|
|
268
|
-
#### Backward Pass
|
|
269
|
-
|
|
270
|
-
```typescript
|
|
271
|
-
const gradOutput = new Array(outputSize).fill(0.01);
|
|
272
|
-
const backwardJSON = network.BackwardCPU(JSON.stringify([gradOutput]));
|
|
273
|
-
const [gradInput, duration] = JSON.parse(backwardJSON);
|
|
274
|
-
```
|
|
275
|
-
|
|
276
|
-
#### Update Weights
|
|
202
|
+
## API Reference
|
|
277
203
|
|
|
278
|
-
|
|
279
|
-
const learningRate = 0.01;
|
|
280
|
-
network.UpdateWeights(JSON.stringify([learningRate]));
|
|
281
|
-
```
|
|
282
|
-
|
|
283
|
-
### Model Persistence
|
|
204
|
+
### Functions
|
|
284
205
|
|
|
285
|
-
####
|
|
206
|
+
#### `async init(): Promise<void>`
|
|
286
207
|
|
|
287
|
-
|
|
288
|
-
// Fetch model from server
|
|
289
|
-
const savedModel = await fetch("model.json").then((r) => r.json());
|
|
290
|
-
|
|
291
|
-
// Load complete network with ONE function call!
|
|
292
|
-
const network = loom.LoadModelFromString(
|
|
293
|
-
JSON.stringify(savedModel),
|
|
294
|
-
"model_name"
|
|
295
|
-
);
|
|
208
|
+
Initialize LOOM WASM module for Node.js environment.
|
|
296
209
|
|
|
297
|
-
|
|
298
|
-
const savedModel = JSON.parse(localStorage.getItem("my_model")!);
|
|
299
|
-
const network = loom.LoadModelFromString(
|
|
300
|
-
JSON.stringify(savedModel),
|
|
301
|
-
"model_name"
|
|
302
|
-
);
|
|
303
|
-
```
|
|
210
|
+
#### `async initBrowser(): Promise<void>`
|
|
304
211
|
|
|
305
|
-
|
|
212
|
+
Initialize LOOM WASM module for browser environment.
|
|
306
213
|
|
|
307
|
-
####
|
|
214
|
+
#### `createNetwork(config: object | string): Network`
|
|
308
215
|
|
|
309
|
-
|
|
310
|
-
const modelJSON = network.SaveModelToString(JSON.stringify(["model_name"]));
|
|
311
|
-
const model = JSON.parse(JSON.parse(modelJSON)[0]);
|
|
216
|
+
Create a new neural network from JSON configuration object or string.
|
|
312
217
|
|
|
313
|
-
|
|
314
|
-
localStorage.setItem("my_model", JSON.stringify(model));
|
|
315
|
-
```
|
|
218
|
+
**Note:** This is the only global function exposed by the WASM (mirrors `createLoomNetwork` from main.go). To load a saved model, just pass the saved JSON string to `createNetwork()`.
|
|
316
219
|
|
|
317
|
-
|
|
220
|
+
### Network Interface
|
|
318
221
|
|
|
319
|
-
|
|
320
|
-
const modelJSON = network.SaveModelToString(JSON.stringify(["model_name"]));
|
|
321
|
-
const model = JSON.parse(JSON.parse(modelJSON)[0]);
|
|
222
|
+
The `Network` object returned by `createNetwork()` has all methods from the Go `nn.Network` type automatically exposed via reflection.
|
|
322
223
|
|
|
323
|
-
|
|
324
|
-
localStorage.setItem("my_model", JSON.stringify(model));
|
|
325
|
-
```
|
|
224
|
+
**Important:** All Network methods follow the WASM calling convention:
|
|
326
225
|
|
|
327
|
-
|
|
226
|
+
- Take a single parameter: JSON string of an array of parameters
|
|
227
|
+
- Return a JSON string of an array of results
|
|
328
228
|
|
|
329
|
-
|
|
229
|
+
Example:
|
|
330
230
|
|
|
331
231
|
```typescript
|
|
332
|
-
//
|
|
333
|
-
const
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
```python
|
|
337
|
-
# Python
|
|
338
|
-
network = welvet.load_model_from_string(model_json, "model_id")
|
|
339
|
-
```
|
|
232
|
+
// Method with no parameters
|
|
233
|
+
const info = network.GetNetworkInfo(JSON.stringify([]));
|
|
234
|
+
const parsed = JSON.parse(info)[0];
|
|
340
235
|
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
```
|
|
236
|
+
// Method with parameters
|
|
237
|
+
const result = network.Train(JSON.stringify([batches, config]));
|
|
238
|
+
const data = JSON.parse(result)[0];
|
|
345
239
|
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
````
|
|
351
|
-
|
|
352
|
-
### Runtime Introspection
|
|
353
|
-
|
|
354
|
-
#### Get All Methods
|
|
355
|
-
|
|
356
|
-
```typescript
|
|
357
|
-
const methodsJSON = network.GetMethods();
|
|
358
|
-
const methods = JSON.parse(methodsJSON);
|
|
359
|
-
|
|
360
|
-
methods.forEach((method) => {
|
|
361
|
-
console.log(
|
|
362
|
-
`${method.method_name}(${method.parameters.map((p) => p.type).join(", ")})`
|
|
363
|
-
);
|
|
364
|
-
});
|
|
240
|
+
// Save model (requires modelID parameter)
|
|
241
|
+
const saved = network.SaveModelToString(JSON.stringify(["my-model"]));
|
|
242
|
+
const json = JSON.parse(saved)[0];
|
|
365
243
|
````
|
|
366
244
|
|
|
367
|
-
####
|
|
245
|
+
#### Available Network Methods
|
|
246
|
+
|
|
247
|
+
- `ForwardCPU(paramsJSON)` - CPU forward pass: `[inputs]`
|
|
248
|
+
- `ForwardGPU(paramsJSON)` - GPU forward pass: `[inputs]`
|
|
249
|
+
- `BackwardCPU(paramsJSON)` - CPU backward pass: `[gradients]`
|
|
250
|
+
- `BackwardGPU(paramsJSON)` - GPU backward pass: `[gradients]`
|
|
251
|
+
- `UpdateWeights(paramsJSON)` - Update weights: `[learningRate]`
|
|
252
|
+
- `Train(paramsJSON)` - Train network: `[batches, config]`
|
|
253
|
+
- `SaveModelToString(paramsJSON)` - Save model: `["modelID"]`
|
|
254
|
+
- `GetWeights(paramsJSON)` - Get layer weights: `[row, col, layer]`
|
|
255
|
+
- `SetWeights(paramsJSON)` - Set layer weights: `[row, col, layer, weights]`
|
|
256
|
+
- `GetBiases(paramsJSON)` - Get layer biases: `[row, col, layer]`
|
|
257
|
+
- `SetBiases(paramsJSON)` - Set layer biases: `[row, col, layer, biases]`
|
|
258
|
+
- `GetActivation(paramsJSON)` - Get activation: `[row, col, layer]`
|
|
259
|
+
- `GetLayerType(paramsJSON)` - Get layer type: `[row, col, layer]`
|
|
260
|
+
- `GetLayerSizes(paramsJSON)` - Get layer sizes: `[row, col, layer]`
|
|
261
|
+
- `GetBatchSize(paramsJSON)` - Get batch size: `[]`
|
|
262
|
+
- `GetGridDimensions(paramsJSON)` - Get grid dimensions: `[]`
|
|
263
|
+
- `GetNetworkInfo(paramsJSON)` - Get network info: `[]`
|
|
264
|
+
- `GetTotalParameters(paramsJSON)` - Get parameter count: `[]`
|
|
265
|
+
- `InitializeWeights(paramsJSON)` - Initialize weights: `[]` or `[method]`
|
|
266
|
+
- `Clone(paramsJSON)` - Clone network: `[]`
|
|
267
|
+
- And 10+ more methods...
|
|
268
|
+
- `GetLastOutput(): string` - Get last forward pass output
|
|
269
|
+
|
|
270
|
+
### Types
|
|
271
|
+
|
|
272
|
+
#### `NetworkConfig`
|
|
368
273
|
|
|
369
274
|
```typescript
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
275
|
+
interface NetworkConfig {
|
|
276
|
+
batch_size: number;
|
|
277
|
+
grid_rows?: number; // Required for grid networks (use 1 for sequential)
|
|
278
|
+
grid_cols?: number; // Required for grid networks (use 1 for sequential)
|
|
279
|
+
layers_per_cell?: number; // Required for grid networks
|
|
280
|
+
layers: LayerConfig[];
|
|
373
281
|
}
|
|
374
282
|
```
|
|
375
283
|
|
|
376
|
-
####
|
|
284
|
+
#### `LayerConfig`
|
|
377
285
|
|
|
378
286
|
```typescript
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
287
|
+
interface LayerConfig {
|
|
288
|
+
type: string;
|
|
289
|
+
input_size?: number;
|
|
290
|
+
output_size?: number;
|
|
291
|
+
hidden_size?: number;
|
|
292
|
+
seq_length?: number;
|
|
293
|
+
activation?: string;
|
|
294
|
+
combine_mode?: string;
|
|
295
|
+
grid_output_rows?: number;
|
|
296
|
+
grid_output_cols?: number;
|
|
297
|
+
grid_output_layers?: number;
|
|
298
|
+
grid_positions?: GridPosition[];
|
|
299
|
+
branches?: LayerConfig[];
|
|
391
300
|
}
|
|
392
301
|
```
|
|
393
302
|
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
### MNIST-Style Classifier
|
|
303
|
+
#### `TrainingBatch`
|
|
397
304
|
|
|
398
305
|
```typescript
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
const loom = await initLoom();
|
|
403
|
-
|
|
404
|
-
// Network: 784 → 128 → 64 → 10
|
|
405
|
-
const network = loom.NewNetwork(784, 1, 1, 3);
|
|
406
|
-
|
|
407
|
-
const layer0 = loom.InitDenseLayer(784, 128, ActivationType.ReLU);
|
|
408
|
-
const layer1 = loom.InitDenseLayer(128, 64, ActivationType.ReLU);
|
|
409
|
-
const layer2 = loom.InitDenseLayer(64, 10, ActivationType.Sigmoid);
|
|
410
|
-
|
|
411
|
-
network.SetLayer(JSON.stringify([0, 0, 0, JSON.parse(layer0)]));
|
|
412
|
-
network.SetLayer(JSON.stringify([0, 0, 1, JSON.parse(layer1)]));
|
|
413
|
-
network.SetLayer(JSON.stringify([0, 0, 2, JSON.parse(layer2)]));
|
|
414
|
-
|
|
415
|
-
// Training loop
|
|
416
|
-
const epochs = 50;
|
|
417
|
-
const learningRate = 0.01;
|
|
418
|
-
|
|
419
|
-
for (let epoch = 0; epoch < epochs; epoch++) {
|
|
420
|
-
// Your training data here
|
|
421
|
-
const input = new Array(784).fill(0).map(() => Math.random());
|
|
422
|
-
const target = new Array(10).fill(0);
|
|
423
|
-
target[Math.floor(Math.random() * 10)] = 1;
|
|
424
|
-
|
|
425
|
-
// Forward
|
|
426
|
-
const [output] = JSON.parse(network.ForwardCPU(JSON.stringify([input])));
|
|
427
|
-
|
|
428
|
-
// Compute loss (MSE)
|
|
429
|
-
const loss =
|
|
430
|
-
output.reduce((sum, val, i) => sum + Math.pow(val - target[i], 2), 0) /
|
|
431
|
-
output.length;
|
|
432
|
-
|
|
433
|
-
// Backward
|
|
434
|
-
const gradOutput = output.map(
|
|
435
|
-
(val, i) => (2 * (val - target[i])) / output.length
|
|
436
|
-
);
|
|
437
|
-
network.BackwardCPU(JSON.stringify([gradOutput]));
|
|
438
|
-
|
|
439
|
-
// Update
|
|
440
|
-
network.UpdateWeights(JSON.stringify([learningRate]));
|
|
441
|
-
|
|
442
|
-
if (epoch % 10 === 0) {
|
|
443
|
-
console.log(`Epoch ${epoch}: Loss = ${loss.toFixed(6)}`);
|
|
444
|
-
}
|
|
445
|
-
}
|
|
446
|
-
|
|
447
|
-
// Save model
|
|
448
|
-
const modelJSON = network.SaveModelToString(JSON.stringify(["mnist"]));
|
|
449
|
-
localStorage.setItem("mnist_model", JSON.parse(modelJSON)[0]);
|
|
306
|
+
interface TrainingBatch {
|
|
307
|
+
Input: number[];
|
|
308
|
+
Target: number[];
|
|
450
309
|
}
|
|
451
310
|
```
|
|
452
311
|
|
|
453
|
-
|
|
312
|
+
#### `TrainingConfig`
|
|
454
313
|
|
|
455
314
|
```typescript
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
network.SetLayer(JSON.stringify([0, 0, 0, JSON.parse(layer0)]));
|
|
466
|
-
network.SetLayer(JSON.stringify([0, 0, 1, JSON.parse(layer1)]));
|
|
467
|
-
|
|
468
|
-
const trainingData = [
|
|
469
|
-
{ input: [0, 0], target: [0] },
|
|
470
|
-
{ input: [0, 1], target: [1] },
|
|
471
|
-
{ input: [1, 0], target: [1] },
|
|
472
|
-
{ input: [1, 1], target: [0] },
|
|
473
|
-
];
|
|
474
|
-
|
|
475
|
-
for (let epoch = 0; epoch < 1000; epoch++) {
|
|
476
|
-
let totalLoss = 0;
|
|
477
|
-
|
|
478
|
-
for (const sample of trainingData) {
|
|
479
|
-
const [output] = JSON.parse(
|
|
480
|
-
network.ForwardCPU(JSON.stringify([sample.input]))
|
|
481
|
-
);
|
|
482
|
-
const loss = Math.pow(output[0] - sample.target[0], 2);
|
|
483
|
-
totalLoss += loss;
|
|
484
|
-
|
|
485
|
-
const gradOutput = [2 * (output[0] - sample.target[0])];
|
|
486
|
-
network.BackwardCPU(JSON.stringify([gradOutput]));
|
|
487
|
-
network.UpdateWeights(JSON.stringify([0.1]));
|
|
488
|
-
}
|
|
489
|
-
|
|
490
|
-
if (epoch % 100 === 0) {
|
|
491
|
-
console.log(`Epoch ${epoch}: Loss = ${(totalLoss / 4).toFixed(6)}`);
|
|
492
|
-
}
|
|
493
|
-
}
|
|
494
|
-
|
|
495
|
-
// Test
|
|
496
|
-
trainingData.forEach((sample) => {
|
|
497
|
-
const [output] = JSON.parse(
|
|
498
|
-
network.ForwardCPU(JSON.stringify([sample.input]))
|
|
499
|
-
);
|
|
500
|
-
console.log(
|
|
501
|
-
`${sample.input} → ${output[0].toFixed(4)} (expected ${sample.target[0]})`
|
|
502
|
-
);
|
|
503
|
-
});
|
|
504
|
-
```
|
|
505
|
-
|
|
506
|
-
## 🌐 Browser Usage
|
|
507
|
-
|
|
508
|
-
### Via CDN (UMD)
|
|
509
|
-
|
|
510
|
-
```html
|
|
511
|
-
<!DOCTYPE html>
|
|
512
|
-
<html>
|
|
513
|
-
<head>
|
|
514
|
-
<script src="https://unpkg.com/@openfluke/welvet"></script>
|
|
515
|
-
</head>
|
|
516
|
-
<body>
|
|
517
|
-
<script>
|
|
518
|
-
(async () => {
|
|
519
|
-
const { initLoom, ActivationType } = window.Welvet;
|
|
520
|
-
const loom = await initLoom();
|
|
521
|
-
|
|
522
|
-
const network = loom.NewNetwork(4, 1, 1, 1);
|
|
523
|
-
console.log("LOOM ready!");
|
|
524
|
-
})();
|
|
525
|
-
</script>
|
|
526
|
-
</body>
|
|
527
|
-
</html>
|
|
528
|
-
```
|
|
529
|
-
|
|
530
|
-
### Via ES Modules
|
|
531
|
-
|
|
532
|
-
```html
|
|
533
|
-
<!DOCTYPE html>
|
|
534
|
-
<html>
|
|
535
|
-
<head>
|
|
536
|
-
<script type="module">
|
|
537
|
-
import {
|
|
538
|
-
initLoom,
|
|
539
|
-
ActivationType,
|
|
540
|
-
} from "https://unpkg.com/@openfluke/welvet/dist/esm/index.js";
|
|
541
|
-
|
|
542
|
-
const loom = await initLoom();
|
|
543
|
-
const network = loom.NewNetwork(4, 1, 1, 1);
|
|
544
|
-
console.log("LOOM ready!");
|
|
545
|
-
</script>
|
|
546
|
-
</head>
|
|
547
|
-
</html>
|
|
548
|
-
```
|
|
549
|
-
|
|
550
|
-
## ⚛️ Framework Integration
|
|
551
|
-
|
|
552
|
-
### React
|
|
553
|
-
|
|
554
|
-
```tsx
|
|
555
|
-
import { useEffect, useState } from "react";
|
|
556
|
-
import { initLoom, type LoomAPI } from "@openfluke/welvet";
|
|
557
|
-
|
|
558
|
-
function NeuralNetworkComponent() {
|
|
559
|
-
const [loom, setLoom] = useState<LoomAPI | null>(null);
|
|
560
|
-
const [prediction, setPrediction] = useState<number[] | null>(null);
|
|
561
|
-
|
|
562
|
-
useEffect(() => {
|
|
563
|
-
initLoom().then((api) => {
|
|
564
|
-
setLoom(api);
|
|
565
|
-
|
|
566
|
-
// Initialize network
|
|
567
|
-
const network = api.NewNetwork(4, 1, 1, 2);
|
|
568
|
-
const layer0 = api.InitDenseLayer(4, 8, 0); // ReLU
|
|
569
|
-
const layer1 = api.InitDenseLayer(8, 2, 1); // Sigmoid
|
|
570
|
-
|
|
571
|
-
network.SetLayer(JSON.stringify([0, 0, 0, JSON.parse(layer0)]));
|
|
572
|
-
network.SetLayer(JSON.stringify([0, 0, 1, JSON.parse(layer1)]));
|
|
573
|
-
|
|
574
|
-
// Make prediction
|
|
575
|
-
const input = [0.5, 0.3, 0.2, 0.1];
|
|
576
|
-
const [output] = JSON.parse(network.ForwardCPU(JSON.stringify([input])));
|
|
577
|
-
setPrediction(output);
|
|
578
|
-
});
|
|
579
|
-
}, []);
|
|
580
|
-
|
|
581
|
-
if (!loom) return <div>Loading neural network...</div>;
|
|
582
|
-
|
|
583
|
-
return (
|
|
584
|
-
<div>
|
|
585
|
-
<h2>Prediction: {prediction?.map((v) => v.toFixed(4)).join(", ")}</h2>
|
|
586
|
-
</div>
|
|
587
|
-
);
|
|
315
|
+
interface TrainingConfig {
|
|
316
|
+
Epochs: number;
|
|
317
|
+
LearningRate: number;
|
|
318
|
+
LossType?: string;
|
|
319
|
+
Verbose?: boolean;
|
|
320
|
+
UseGPU?: boolean;
|
|
321
|
+
PrintEveryBatch?: number;
|
|
322
|
+
GradientClip?: number;
|
|
588
323
|
}
|
|
589
324
|
```
|
|
590
325
|
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
```vue
|
|
594
|
-
<script setup lang="ts">
|
|
595
|
-
import { ref, onMounted } from "vue";
|
|
596
|
-
import { initLoom, type LoomAPI } from "@openfluke/welvet";
|
|
597
|
-
|
|
598
|
-
const loom = ref<LoomAPI | null>(null);
|
|
599
|
-
const output = ref<number[] | null>(null);
|
|
326
|
+
## Example
|
|
600
327
|
|
|
601
|
-
|
|
602
|
-
const api = await initLoom();
|
|
603
|
-
loom.value = api;
|
|
328
|
+
See `example/grid-scatter.ts` for a complete multi-agent training demo:
|
|
604
329
|
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
const [result] = JSON.parse(network.ForwardCPU(JSON.stringify([[0.5, 0.5]])));
|
|
610
|
-
output.value = result;
|
|
611
|
-
});
|
|
612
|
-
</script>
|
|
613
|
-
|
|
614
|
-
<template>
|
|
615
|
-
<div v-if="!loom">Loading...</div>
|
|
616
|
-
<div v-else>
|
|
617
|
-
<h2>Neural Network Output</h2>
|
|
618
|
-
<pre>{{ output }}</pre>
|
|
619
|
-
</div>
|
|
620
|
-
</template>
|
|
621
|
-
```
|
|
622
|
-
|
|
623
|
-
### Svelte
|
|
624
|
-
|
|
625
|
-
```svelte
|
|
626
|
-
<script lang="ts">
|
|
627
|
-
import { onMount } from 'svelte';
|
|
628
|
-
import { initLoom, type LoomAPI } from '@openfluke/welvet';
|
|
629
|
-
|
|
630
|
-
let loom: LoomAPI | null = null;
|
|
631
|
-
let result: number[] = [];
|
|
632
|
-
|
|
633
|
-
onMount(async () => {
|
|
634
|
-
loom = await initLoom();
|
|
635
|
-
|
|
636
|
-
const network = loom.NewNetwork(3, 1, 1, 1);
|
|
637
|
-
const layer = loom.InitDenseLayer(3, 2, 0); // ReLU
|
|
638
|
-
network.SetLayer(JSON.stringify([0, 0, 0, JSON.parse(layer)]));
|
|
639
|
-
|
|
640
|
-
const [output] = JSON.parse(network.ForwardCPU(JSON.stringify([[1, 2, 3]])));
|
|
641
|
-
result = output;
|
|
642
|
-
});
|
|
643
|
-
</script>
|
|
644
|
-
|
|
645
|
-
{#if !loom}
|
|
646
|
-
<p>Loading neural network...</p>
|
|
647
|
-
{:else}
|
|
648
|
-
<h2>Result: {result.join(', ')}</h2>
|
|
649
|
-
{/if}
|
|
650
|
-
```
|
|
651
|
-
|
|
652
|
-
## 🔧 Advanced Configuration
|
|
653
|
-
|
|
654
|
-
### Custom WASM Location
|
|
655
|
-
|
|
656
|
-
```typescript
|
|
657
|
-
const loom = await initLoom({
|
|
658
|
-
wasmUrl: "/custom/path/loom.wasm",
|
|
659
|
-
});
|
|
660
|
-
```
|
|
661
|
-
|
|
662
|
-
### Skip Go Runtime Injection
|
|
663
|
-
|
|
664
|
-
```typescript
|
|
665
|
-
// Useful if you're loading Go runtime separately
|
|
666
|
-
const loom = await initLoom({
|
|
667
|
-
injectGoRuntime: false,
|
|
668
|
-
});
|
|
669
|
-
```
|
|
670
|
-
|
|
671
|
-
## 📊 Performance Tips
|
|
672
|
-
|
|
673
|
-
1. **Batch Processing** - Process multiple inputs together when possible
|
|
674
|
-
2. **Model Caching** - Save trained models to avoid retraining
|
|
675
|
-
3. **Layer Sizing** - Start with smaller layers and scale up as needed
|
|
676
|
-
4. **Learning Rate** - Tune learning rate for faster convergence (typically 0.001 - 0.1)
|
|
677
|
-
5. **Activation Functions** - ReLU often trains faster than Sigmoid/Tanh
|
|
678
|
-
|
|
679
|
-
## 🐛 Troubleshooting
|
|
680
|
-
|
|
681
|
-
### WASM fails to load
|
|
682
|
-
|
|
683
|
-
Ensure your server serves `.wasm` files with the correct MIME type:
|
|
684
|
-
|
|
685
|
-
```
|
|
686
|
-
Content-Type: application/wasm
|
|
330
|
+
```bash
|
|
331
|
+
cd example
|
|
332
|
+
bun install
|
|
333
|
+
bun run grid-scatter.ts
|
|
687
334
|
```
|
|
688
335
|
|
|
689
|
-
|
|
336
|
+
Expected output:
|
|
690
337
|
|
|
691
|
-
Make sure to await the initialization:
|
|
692
|
-
|
|
693
|
-
```typescript
|
|
694
|
-
const loom = await initLoom(); // Don't forget await!
|
|
695
338
|
```
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
// ❌ Wrong
|
|
706
|
-
network.ForwardCPU(input);
|
|
339
|
+
🤖 Running Grid Scatter Multi-Agent Training...
|
|
340
|
+
✅ Agent network created!
|
|
341
|
+
Training for 800 epochs with learning rate 0.150
|
|
342
|
+
✅ Training complete!
|
|
343
|
+
Training time: 0.47 seconds
|
|
344
|
+
Initial Loss: 0.252249
|
|
345
|
+
Final Loss: 0.001374
|
|
346
|
+
Improvement: 99.46%
|
|
347
|
+
Total Epochs: 800
|
|
707
348
|
```
|
|
708
349
|
|
|
709
|
-
##
|
|
710
|
-
|
|
711
|
-
- **Python Package**: [`welvet`](https://pypi.org/project/welvet/) - Python bindings for LOOM
|
|
712
|
-
- **Go Framework**: [LOOM](https://github.com/openfluke/loom) - Original Go implementation
|
|
713
|
-
- **Legacy Package**: [`@openfluke/portal`](https://github.com/openfluke/portal) - Previous generation framework
|
|
714
|
-
|
|
715
|
-
## 📄 License
|
|
350
|
+
## Layer Types
|
|
716
351
|
|
|
717
|
-
|
|
352
|
+
- `dense` - Fully connected layer
|
|
353
|
+
- `lstm` - Long Short-Term Memory layer
|
|
354
|
+
- `rnn` - Recurrent Neural Network layer
|
|
355
|
+
- `gru` - Gated Recurrent Unit layer
|
|
356
|
+
- `cnn` - Convolutional layer
|
|
357
|
+
- `parallel` - Parallel branches with combine modes:
|
|
358
|
+
- `add` - Element-wise addition
|
|
359
|
+
- `concat` - Concatenation
|
|
360
|
+
- `multiply` - Element-wise multiplication
|
|
361
|
+
- `grid_scatter` - Multi-agent grid routing
|
|
718
362
|
|
|
719
|
-
##
|
|
363
|
+
## Activation Functions
|
|
720
364
|
|
|
721
|
-
|
|
365
|
+
`relu`, `sigmoid`, `tanh`, `softmax`, `gelu`, `swish`, `mish`, `leaky_relu`, `elu`, `selu`
|
|
722
366
|
|
|
723
|
-
##
|
|
367
|
+
## License
|
|
724
368
|
|
|
725
|
-
|
|
726
|
-
- 💬 [Discussions](https://github.com/openfluke/loom/discussions)
|
|
727
|
-
- 📖 [Documentation](https://github.com/openfluke/loom/tree/main/typescript)
|
|
369
|
+
MIT
|
|
728
370
|
|
|
729
|
-
|
|
371
|
+
## Links
|
|
730
372
|
|
|
731
|
-
|
|
373
|
+
- [GitHub](https://github.com/openfluke/loom)
|
|
374
|
+
- [WASM Documentation](../wasm/README.md)
|
|
375
|
+
- [Go Examples](../examples/)
|