@openfluke/welvet 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -151,53 +151,106 @@ const [output2] = loadedNetwork.ForwardCPU(
151
151
  - ✅ Same behavior as Python, C#, C, and WASM
152
152
 
153
153
  See `example/grid-scatter.ts` for a complete working example.
154
- {
155
- type: "parallel",
156
- combine_mode: "add",
157
- branches: [
158
- {
159
- type: "dense",
160
- input_size: 16,
161
- output_size: 8,
162
- activation: "relu",
163
- },
164
- {
165
- type: "dense",
166
- input_size: 16,
167
- output_size: 8,
168
- activation: "gelu",
169
- },
170
- ],
171
- },
172
- { type: "lstm", input_size: 16, hidden_size: 8, seq_length: 1 },
173
- { type: "rnn", input_size: 16, hidden_size: 8, seq_length: 1 },
174
- ],
175
- },
176
- {
177
- type: "dense",
178
- input_size: 24,
179
- output_size: 2,
180
- activation: "sigmoid",
181
- },
182
- ],
183
- });
184
-
185
- // Train multi-agent network
186
- const batches: TrainingBatch[] = [
187
- { Input: [0.2, 0.2, 0.2, 0.2, 0.8, 0.8, 0.8, 0.8], Target: [1.0, 0.0] },
188
- { Input: [0.9, 0.9, 0.9, 0.9, 0.1, 0.1, 0.1, 0.1], Target: [0.0, 1.0] },
189
- ];
190
154
 
191
- const config: TrainingConfig = {
192
- Epochs: 800,
193
- LearningRate: 0.15,
194
- LossType: "mse",
195
- Verbose: false,
196
- };
155
+ ### Stepping API - Fine-Grained Execution Control
197
156
 
198
- const result = agentNetwork.Train(JSON.stringify([batches, config]));
157
+ **NEW:** Execute networks one step at a time for online learning:
199
158
 
200
- ````
159
+ ```typescript
160
+ import { init, createNetwork, StepState } from "@openfluke/welvet";
161
+
162
+ await init();
163
+
164
+ // Create network
165
+ const config = { batch_size: 1, layers: [
166
+ { type: "dense", input_height: 4, output_height: 8, activation: "relu" },
167
+ { type: "lstm", input_size: 8, hidden_size: 12, seq_length: 1 },
168
+ { type: "dense", input_height: 12, output_height: 3, activation: "softmax" }
169
+ ]};
170
+ const network = createNetwork(config);
171
+
172
+ // Initialize stepping state
173
+ const state: StepState = network.createStepState(4);
174
+
175
+ // Training loop - update weights after EACH step
176
+ for (let step = 0; step < 100000; step++) {
177
+ state.setInput(new Float32Array([0.1, 0.2, 0.1, 0.3]));
178
+ state.stepForward();
179
+ const output = state.getOutput();
180
+
181
+ // Calculate gradients
182
+ const gradients = new Float32Array(output.length);
183
+ for (let i = 0; i < output.length; i++)
184
+ gradients[i] = output[i] - target[i];
185
+
186
+ // Backward pass
187
+ state.stepBackward(gradients);
188
+
189
+ // Update weights immediately
190
+ network.ApplyGradients(JSON.stringify([learningRate]));
191
+ }
192
+ ```
193
+
194
+ **Stepping API:**
195
+ - `network.createStepState(inputSize)` - Initialize stepping state
196
+ - `state.setInput(data)` - Set input for current step
197
+ - `state.stepForward()` - Execute forward pass
198
+ - `state.getOutput()` - Get output from last layer
199
+ - `state.stepBackward(gradients)` - Execute backward pass
200
+ - `network.ApplyGradients(paramsJSON)` - Update network weights
201
+
202
+ See `example/step_train_v3.ts` for a complete example achieving 100% accuracy.
203
+
204
+ ### 🧠 Neural Tweening API - Gradient-Free Learning
205
+
206
+ **NEW:** Direct weight adjustment without backpropagation:
207
+
208
+ ```typescript
209
+ import { init, createNetwork, TweenState } from "@openfluke/welvet";
210
+
211
+ await init();
212
+
213
+ const network = createNetwork(config);
214
+
215
+ // Create tween state (with optional chain rule)
216
+ const tweenState: TweenState = network.createTweenState(true); // useChainRule=true
217
+
218
+ // Training loop - direct weight updates
219
+ for (let step = 0; step < 10000; step++) {
220
+ const input = new Float32Array([0.1, 0.2, 0.3, 0.4]);
221
+ const targetClass = 1; // Target output class
222
+
223
+ // Single-step tween learning
224
+ const loss = tweenState.TweenStep(input, targetClass, 4, 0.02);
225
+ }
226
+ ```
227
+
228
+ **Tweening API:**
229
+ - `network.createTweenState(useChainRule)` - Initialize tween state
230
+ - `tweenState.TweenStep(input, targetClass, outputSize, lr)` - Train step
231
+ - `tweenState.setChainRule(enabled)` - Toggle chain rule
232
+ - `tweenState.getChainRule()` - Get chain rule status
233
+ - `tweenState.getTweenSteps()` - Get total steps performed
234
+
235
+ ### 📊 Adaptation Benchmark - Multi-Architecture Testing
236
+
237
+ **NEW:** Run the full Test 18 Multi-Architecture Adaptation Benchmark:
238
+
239
+ ```bash
240
+ cd example
241
+ bun run test18_adaptation.ts
242
+ ```
243
+
244
+ Tests 5 architectures × 3 depths × 5 training modes (75 tests total):
245
+ - **Architectures:** Dense, Conv2D, RNN, LSTM, Attention
246
+ - **Depths:** 3, 5, 9 layers
247
+ - **Modes:** NormalBP, StepBP, Tween, TweenChain, StepTweenChain
248
+
249
+ Measures adaptation speed when tasks change mid-stream (chase→avoid→chase).
250
+
251
+ See `example/test18_adaptation.ts` for the full implementation.
252
+
253
+ ```
201
254
 
202
255
  ## API Reference
203
256
 
@@ -323,9 +376,9 @@ interface TrainingConfig {
323
376
  }
324
377
  ```
325
378
 
326
- ## Example
379
+ ## Examples
327
380
 
328
- See `example/grid-scatter.ts` for a complete multi-agent training demo:
381
+ ### Grid Scatter Multi-Agent
329
382
 
330
383
  ```bash
331
384
  cd example
@@ -333,20 +386,20 @@ bun install
333
386
  bun run grid-scatter.ts
334
387
  ```
335
388
 
336
- Expected output:
389
+ ### Stepping Training (LSTM)
337
390
 
391
+ ```bash
392
+ bun run step_train_v3.ts
338
393
  ```
339
- 🤖 Running Grid Scatter Multi-Agent Training...
340
- Agent network created!
341
- Training for 800 epochs with learning rate 0.150
342
- ✅ Training complete!
343
- Training time: 0.47 seconds
344
- Initial Loss: 0.252249
345
- Final Loss: 0.001374
346
- Improvement: 99.46%
347
- Total Epochs: 800
394
+
395
+ ### Adaptation Benchmark (75 tests)
396
+
397
+ ```bash
398
+ bun run test18_adaptation.ts
348
399
  ```
349
400
 
401
+ > **Note:** Full benchmark takes ~12.5 minutes (10 seconds per test)
402
+
350
403
  ## Layer Types
351
404
 
352
405
  - `dense` - Fully connected layer
@@ -366,7 +419,7 @@ Total Epochs: 800
366
419
 
367
420
  ## License
368
421
 
369
- MIT
422
+ APACHE2
370
423
 
371
424
  ## Links
372
425
 
@@ -24,7 +24,9 @@ export async function init() {
24
24
  * Wrapper around the global createLoomNetwork function exposed by WASM
25
25
  */
26
26
  export function createNetwork(config) {
27
- const jsonConfig = typeof config === 'string' ? config : JSON.stringify(config);
27
+ const jsonConfig = typeof config === "string"
28
+ ? config
29
+ : JSON.stringify(config);
28
30
  return createLoomNetwork(jsonConfig);
29
31
  }
30
32
  /**
package/dist/index.js CHANGED
@@ -25,7 +25,9 @@ export async function initBrowser() {
25
25
  * Wrapper around the global createLoomNetwork function exposed by WASM
26
26
  */
27
27
  export function createNetwork(config) {
28
- const jsonConfig = typeof config === 'string' ? config : JSON.stringify(config);
28
+ const jsonConfig = typeof config === "string"
29
+ ? config
30
+ : JSON.stringify(config);
29
31
  return createLoomNetwork(jsonConfig);
30
32
  }
31
33
  /**
package/dist/loader.js CHANGED
@@ -8,19 +8,31 @@ import { dirname, join } from "path";
8
8
  const __filename = fileURLToPath(import.meta.url);
9
9
  const __dirname = dirname(__filename);
10
10
  export async function loadLoomWASM() {
11
+ // __dirname points to:
12
+ // - dist/ → in production
13
+ // - src/ → when running via Bun, ts-node, or example files
14
+ let root;
15
+ if (__dirname.endsWith("dist")) {
16
+ // Normal production layout
17
+ root = __dirname;
18
+ }
19
+ else {
20
+ // Running from src/ or example/
21
+ // Point to project’s dist/ directory
22
+ root = join(__dirname, "..", "dist");
23
+ }
11
24
  // Load wasm_exec.js
12
- const wasmExecPath = join(__dirname, "../assets/wasm_exec.js");
25
+ const wasmExecPath = join(root, "wasm_exec.js");
13
26
  const wasmExecCode = readFileSync(wasmExecPath, "utf-8");
14
27
  // Execute wasm_exec.js to get the Go runtime
15
28
  eval(wasmExecCode);
16
29
  // Load main.wasm
17
- const wasmPath = join(__dirname, "../assets/main.wasm");
30
+ const wasmPath = join(root, "main.wasm");
18
31
  const wasmBuffer = readFileSync(wasmPath);
19
- // @ts-ignore - Go is defined by wasm_exec.js
32
+ // @ts-ignore - Go runtime from wasm_exec.js
20
33
  const go = new Go();
21
34
  const { instance } = await WebAssembly.instantiate(wasmBuffer, go.importObject);
22
- // Run the Go WASM module
23
35
  go.run(instance);
24
- // Wait a bit for initialization
36
+ // Wait for WASM runtime to finish bootstrapping
25
37
  await new Promise((resolve) => setTimeout(resolve, 100));
26
38
  }
package/dist/types.d.ts CHANGED
@@ -95,6 +95,41 @@ export interface Network {
95
95
  ListMethods(paramsJSON: string): string;
96
96
  HasMethod(paramsJSON: string): string;
97
97
  GetMethodSignature(paramsJSON: string): string;
98
+ ApplyGradients(paramsJSON: string): string;
99
+ ApplyGradientsAdamW(paramsJSON: string): string;
100
+ ApplyGradientsRMSprop(paramsJSON: string): string;
101
+ ApplyGradientsSGDMomentum(paramsJSON: string): string;
102
+ createStepState(inputSize: number): StepState;
103
+ createTweenState(useChainRule?: boolean): TweenState;
104
+ }
105
+ /**
106
+ * StepState interface for stepping execution
107
+ */
108
+ export interface StepState {
109
+ setInput(data: Float32Array | number[]): void;
110
+ stepForward(): number;
111
+ getOutput(): Float32Array;
112
+ stepBackward(gradients: Float32Array | number[]): Float32Array;
113
+ }
114
+ /**
115
+ * TweenState interface for neural tweening execution
116
+ */
117
+ export interface TweenState {
118
+ /**
119
+ * Perform a tween training step
120
+ * @param input - Input data
121
+ * @param targetClass - Target class index
122
+ * @param outputSize - Size of output layer
123
+ * @param learningRate - Learning rate for this step
124
+ * @returns Loss value
125
+ */
126
+ TweenStep(input: Float32Array | number[], targetClass: number, outputSize: number, learningRate: number): number;
127
+ /** Enable/disable chain rule mode */
128
+ setChainRule(enabled: boolean): void;
129
+ /** Get current chain rule setting */
130
+ getChainRule(): boolean;
131
+ /** Get number of tween steps performed */
132
+ getTweenSteps(): number;
98
133
  }
99
134
  /**
100
135
  * Global WASM functions exposed by main.go
@@ -102,4 +137,16 @@ export interface Network {
102
137
  */
103
138
  declare global {
104
139
  function createLoomNetwork(jsonConfig: string): Network;
140
+ function createAdaptationTracker(windowMs: number, totalMs: number): AdaptationTracker;
141
+ }
142
+ /**
143
+ * AdaptationTracker interface for tracking accuracy during task changes
144
+ */
145
+ export interface AdaptationTracker {
146
+ setModelInfo(modelName: string, modeName: string): void;
147
+ scheduleTaskChange(atOffsetMs: number, taskID: number, taskName: string): void;
148
+ start(initialTask: string, initialTaskID: number): void;
149
+ recordOutput(isCorrect: boolean): void;
150
+ getCurrentTask(): number;
151
+ finalize(): string;
105
152
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@openfluke/welvet",
3
- "version": "0.1.5",
3
+ "version": "0.1.7",
4
4
  "description": "TypeScript/JavaScript bindings for LOOM neural network framework with WebAssembly support - GPU-accelerated machine learning in the browser",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -58,4 +58,4 @@
58
58
  "@types/node": "^22.7.5",
59
59
  "typescript": "^5.6.3"
60
60
  }
61
- }
61
+ }
package/dist/main.wasm DELETED
Binary file