@openfluke/welvet 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -42,6 +42,34 @@ bun add @openfluke/welvet
42
42
 
43
43
  ## 🚀 Quick Start
44
44
 
45
+ ### The Easy Way: Load Complete Models
46
+
47
+ Instead of manually configuring layers, **load a complete model with ONE line**:
48
+
49
+ ```typescript
50
+ import { initLoom } from "@openfluke/welvet";
51
+
52
+ const loom = await initLoom();
53
+
54
+ // Load model from JSON (architecture + weights all at once!)
55
+ const modelJSON = await fetch("test.json").then((r) => r.json());
56
+ const network = loom.LoadModelFromString(
57
+ JSON.stringify(modelJSON),
58
+ "all_layers_test"
59
+ );
60
+
61
+ // That's it! All 16 layers, weights, biases loaded automatically
62
+ const input = new Array(10).fill(0).map(() => Math.random());
63
+ const [output, duration] = JSON.parse(
64
+ network.ForwardCPU(JSON.stringify([input]))
65
+ );
66
+ console.log("Output:", output);
67
+ ```
68
+
69
+ **Live Demo:** See `wasm/all_layers_test.html` for a complete working example that loads a 26.4KB model with 16 layers (Dense, Conv2D, Attention, RNN, LSTM) and runs inference in the browser!
70
+
71
+ ### Manual Configuration (for building models from scratch)
72
+
45
73
  ```typescript
46
74
  import { initLoom, ActivationType } from "@openfluke/welvet";
47
75
 
@@ -254,6 +282,28 @@ network.UpdateWeights(JSON.stringify([learningRate]));
254
282
 
255
283
  ### Model Persistence
256
284
 
285
+ #### Load Model (The Easy Way - ONE LINE!)
286
+
287
+ ```typescript
288
+ // Fetch model from server
289
+ const savedModel = await fetch("model.json").then((r) => r.json());
290
+
291
+ // Load complete network with ONE function call!
292
+ const network = loom.LoadModelFromString(
293
+ JSON.stringify(savedModel),
294
+ "model_name"
295
+ );
296
+
297
+ // Or from localStorage
298
+ const savedModel = JSON.parse(localStorage.getItem("my_model")!);
299
+ const network = loom.LoadModelFromString(
300
+ JSON.stringify(savedModel),
301
+ "model_name"
302
+ );
303
+ ```
304
+
305
+ **That's it!** All layers, weights, biases, and configurations are automatically restored. No manual layer setup needed!
306
+
257
307
  #### Save Model
258
308
 
259
309
  ```typescript
@@ -264,16 +314,41 @@ const model = JSON.parse(JSON.parse(modelJSON)[0]);
264
314
  localStorage.setItem("my_model", JSON.stringify(model));
265
315
  ```
266
316
 
267
- #### Load Model
317
+ #### Save Model
268
318
 
269
319
  ```typescript
270
- const savedModel = JSON.parse(localStorage.getItem("my_model")!);
271
- const network = loom.LoadModelFromString(
272
- JSON.stringify(savedModel),
273
- "model_name"
274
- );
320
+ const modelJSON = network.SaveModelToString(JSON.stringify(["model_name"]));
321
+ const model = JSON.parse(JSON.parse(modelJSON)[0]);
322
+
323
+ // Store anywhere (localStorage, IndexedDB, backend API, etc.)
324
+ localStorage.setItem("my_model", JSON.stringify(model));
325
+ ```
326
+
327
+ #### Cross-Platform Model Loading
328
+
329
+ The same JSON model file works across **all three platforms**:
330
+
331
+ ```typescript
332
+ // JavaScript/WASM
333
+ const network = loom.LoadModelFromString(modelJSON, "model_id");
334
+ ```
335
+
336
+ ```python
337
+ # Python
338
+ network = welvet.load_model_from_string(model_json, "model_id")
339
+ ```
340
+
341
+ ```go
342
+ // Go
343
+ network, _ := nn.LoadModelFromString(modelJSON, "model_id")
275
344
  ```
276
345
 
346
+ See `examples/all_layers_validation.go` for a complete demo that generates test.json (26.4KB with 16 layers) and verifies all three platforms load it identically!
347
+
348
+ #### Load Model (Legacy API)
349
+
350
+ ````
351
+
277
352
  ### Runtime Introspection
278
353
 
279
354
  #### Get All Methods
@@ -287,7 +362,7 @@ methods.forEach((method) => {
287
362
  `${method.method_name}(${method.parameters.map((p) => p.type).join(", ")})`
288
363
  );
289
364
  });
290
- ```
365
+ ````
291
366
 
292
367
  #### Check Method Availability
293
368
 
package/dist/index.js CHANGED
@@ -60,6 +60,7 @@ export async function initLoom(opts = {}) {
60
60
  const api = {
61
61
  NewNetwork: g.NewNetwork,
62
62
  LoadModelFromString: g.LoadModelFromString,
63
+ CallLayerInit: g.CallLayerInit, // Direct access to registry-based layer init
63
64
  // Layer initialization functions using CallLayerInit (registry-based)
64
65
  InitDenseLayer: (inputSize, outputSize, activation) => {
65
66
  return callLayerInit("InitDenseLayer", inputSize, outputSize, activation);
package/dist/loom.wasm CHANGED
Binary file
package/dist/types.d.ts CHANGED
@@ -40,6 +40,13 @@ export interface LoomAPI {
40
40
  * @returns LoomNetwork instance
41
41
  */
42
42
  LoadModelFromString: (modelJSON: string, modelID: string) => LoomNetwork;
43
+ /**
44
+ * Call any layer initialization function from the registry
45
+ * @param functionName - Name of the layer init function (e.g., "InitDenseLayer")
46
+ * @param paramsJSON - JSON string of parameters array
47
+ * @returns JSON string of layer configuration
48
+ */
49
+ CallLayerInit: (functionName: string, paramsJSON: string) => string;
43
50
  /**
44
51
  * Initialize a dense (fully-connected) layer configuration
45
52
  * @param inputSize - Input dimension
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@openfluke/welvet",
3
- "version": "0.1.0",
3
+ "version": "0.1.2",
4
4
  "description": "TypeScript/JavaScript bindings for LOOM neural network framework with WebAssembly support - GPU-accelerated machine learning in the browser",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",