@buley/neural 3.0.0 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/neural.js +539 -0
  2. package/package.json +2 -2
package/dist/neural.js ADDED
@@ -0,0 +1,539 @@
1
+ var b = Object.defineProperty;
2
+ var m = (c, e, i) => e in c ? b(c, e, { enumerable: !0, configurable: !0, writable: !0, value: i }) : c[e] = i;
3
+ var r = (c, e, i) => m(c, typeof e != "symbol" ? e + "" : e, i);
4
+ import { dash as h } from "@buley/dash";
5
+ async function y() {
6
+ console.log("Initializing Neural Schema..."), await h.execute(`
7
+ CREATE TABLE IF NOT EXISTS neurons (
8
+ id TEXT PRIMARY KEY,
9
+ type TEXT NOT NULL,
10
+ bias REAL DEFAULT 0.0,
11
+ activation TEXT DEFAULT 'tanh',
12
+ created_at INTEGER DEFAULT (unixepoch())
13
+ )
14
+ `), await h.execute(`
15
+ CREATE TABLE IF NOT EXISTS synapses (
16
+ id TEXT PRIMARY KEY,
17
+ from_id TEXT NOT NULL,
18
+ to_id TEXT NOT NULL,
19
+ weight REAL DEFAULT 0.0,
20
+ created_at INTEGER DEFAULT (unixepoch()),
21
+ FOREIGN KEY(from_id) REFERENCES neurons(id),
22
+ FOREIGN KEY(to_id) REFERENCES neurons(id)
23
+ )
24
+ `), console.log("Schema initialized.");
25
+ }
26
+ class B {
27
+ async create(e) {
28
+ await h.execute(
29
+ "INSERT INTO neurons (id, type, bias, activation) VALUES (?, ?, ?, ?)",
30
+ [e.id, e.type, e.bias, e.activation]
31
+ );
32
+ }
33
+ // Feature: Add with semantic embedding
34
+ async createWithSemantics(e, i) {
35
+ await this.create(e), await h.addWithEmbedding(e.id, i);
36
+ }
37
+ async getAll() {
38
+ return await h.execute("SELECT * FROM neurons");
39
+ }
40
+ async delete(e) {
41
+ await h.execute("DELETE FROM neurons WHERE id = ?", [e]);
42
+ }
43
+ }
44
+ class _ {
45
+ async create(e) {
46
+ await h.execute(
47
+ "INSERT INTO synapses (id, from_id, to_id, weight) VALUES (?, ?, ?, ?)",
48
+ [e.id, e.from_id, e.to_id, e.weight]
49
+ );
50
+ }
51
+ async getAll() {
52
+ return await h.execute("SELECT * FROM synapses");
53
+ }
54
+ async delete(e) {
55
+ await h.execute("DELETE FROM synapses WHERE id = ?", [e]);
56
+ }
57
+ }
58
+ const v = `
59
+ // Structure of our compute shader
60
+ // Group 0: Bindings for data
61
+ // Binding 0: Matrix W (Weights) - N x N flattened array
62
+ // Binding 1: Vector X (Current Neuron Values) - N length array
63
+ // Binding 2: Vector B (Biases) - N length array
64
+ // Binding 3: Vector Y (Output Neuron Values) - N length array
65
+ // Binding 4: Dimensions Uniform - Struct { size: u32 }
66
+
67
+ struct Dimensions {
68
+ size: u32,
69
+ }
70
+
71
+ @group(0) @binding(0) var<storage, read> weights: array<f32>;
72
+ @group(0) @binding(1) var<storage, read> input: array<f32>;
73
+ @group(0) @binding(2) var<storage, read> biases: array<f32>;
74
+ @group(0) @binding(3) var<storage, read_write> output: array<f32>;
75
+ @group(0) @binding(4) var<uniform> dims: Dimensions;
76
+
77
+ // Activation Functions
78
+ fn tanh_approx(x: f32) -> f32 {
79
+ let e2x = exp(2.0 * x);
80
+ return (e2x - 1.0) / (e2x + 1.0);
81
+ }
82
+
83
+ @compute @workgroup_size(64, 1, 1)
84
+ fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
85
+ let row = global_id.x;
86
+ let batch = global_id.z;
87
+ let size = dims.size;
88
+
89
+ if (row >= size) {
90
+ return;
91
+ }
92
+
93
+ // Dot product: Row of W * Vector X
94
+ var sum: f32 = 0.0;
95
+
96
+ // Batch offset for input/output
97
+ let batch_offset = batch * size;
98
+
99
+ for (var col: u32 = 0u; col < size; col = col + 1u) {
100
+ // W is shared (not batched): weights[row * size + col]
101
+ let w_idx = row * size + col;
102
+
103
+ // Input is batched: input[batch * size + col]
104
+ let input_idx = batch_offset + col;
105
+
106
+ sum = sum + (weights[w_idx] * input[input_idx]);
107
+ }
108
+
109
+ // Add Bias (Shared)
110
+ sum = sum + biases[row];
111
+
112
+ // Activation
113
+ // Output is batched: output[batch * size + row]
114
+ let out_idx = batch_offset + row;
115
+ output[out_idx] = tanh_approx(sum);
116
+ }
117
+ `, E = `struct Dimensions {
118
+ size: u32,
119
+ batchSize: u32,
120
+ }
121
+
122
+ struct TrainingParams {
123
+ learningRate: f32,
124
+ }
125
+
126
+ @group(0) @binding(0) var<storage, read_write> weights: array<f32>;
127
+ @group(0) @binding(1) var<storage, read> values: array<f32>; // Batched Activations (N * B)
128
+ @group(0) @binding(2) var<storage, read> biases: array<f32>;
129
+ @group(0) @binding(3) var<storage, read_write> deltas: array<f32>; // Batched Deltas (N * B)
130
+ @group(0) @binding(4) var<storage, read> targets: array<f32>; // Batched Targets
131
+ @group(0) @binding(5) var<uniform> dims: Dimensions;
132
+ @group(0) @binding(6) var<uniform> params: TrainingParams;
133
+
134
+ fn tanh_derivative(val: f32) -> f32 {
135
+ return 1.0 - (val * val);
136
+ }
137
+
138
+ // 1. Calculate Deltas (Backward Pass) - 3D Dispatched (64, 1, B)
139
+ @compute @workgroup_size(64, 1, 1)
140
+ fn calculate_deltas(@builtin(global_invocation_id) global_id: vec3<u32>) {
141
+ let index = global_id.x;
142
+ let batch = global_id.z;
143
+ let size = dims.size;
144
+
145
+ if (index >= size) { return; }
146
+
147
+ let batch_offset = batch * size;
148
+ let neuron_idx = batch_offset + index;
149
+
150
+ let activation = values[neuron_idx];
151
+ let derivative = tanh_derivative(activation);
152
+
153
+ var error_sum: f32 = 0.0;
154
+
155
+ // Backpropagate error from "Next Layer" (all other neurons k)
156
+ // For each k (destination), we need delta_k.
157
+ // delta_k is also batched! deltas[batch * size + k]
158
+ for (var k: u32 = 0u; k < size; k = k + 1u) {
159
+ // Weight FROM index TO k
160
+ let w_idx = k * size + index;
161
+ let weight_ki = weights[w_idx];
162
+
163
+ let delta_k_idx = batch_offset + k;
164
+ let delta_k = deltas[delta_k_idx];
165
+
166
+ error_sum = error_sum + (delta_k * weight_ki);
167
+ }
168
+
169
+ // Add immediate error (MSE derivative: y - t)
170
+ // targets[batch * size + index]
171
+ let target = targets[neuron_idx];
172
+ if (target > -998.0) {
173
+ error_sum = error_sum + (activation - target);
174
+ }
175
+
176
+ deltas[neuron_idx] = error_sum * derivative;
177
+ }
178
+
179
+ // 2. Update Weights (Optimizer Step) - 1D Dispatched (64, 1, 1) - Accumulates Gradients over Batch
180
+ @compute @workgroup_size(64)
181
+ fn update_weights(@builtin(global_invocation_id) global_id: vec3<u32>) {
182
+ let row = global_id.x; // Target neuron
183
+ let size = dims.size;
184
+ let batch_size = dims.batchSize;
185
+
186
+ if (row >= size) { return; }
187
+
188
+ let lr = params.learningRate;
189
+
190
+ // Update incoming weights to this neuron 'row'
191
+ // W_ji (row, col)
192
+ for (var col: u32 = 0u; col < size; col = col + 1u) {
193
+ let w_idx = row * size + col;
194
+
195
+ // Accumulate gradient over batch
196
+ var gradient_sum: f32 = 0.0;
197
+
198
+ for (var b: u32 = 0u; b < batch_size; b = b + 1u) {
199
+ let batch_offset = b * size;
200
+
201
+ // delta_j (for this batch item)
202
+ let delta_j = deltas[batch_offset + row];
203
+
204
+ // input_i (activation of source col for this batch item)
205
+ let input_val = values[batch_offset + col];
206
+
207
+ gradient_sum = gradient_sum + (delta_j * input_val);
208
+ }
209
+
210
+ // SGD Update (Mean Gradient? Or Sum? Usually Mean for batch)
211
+ // Let's use Sum * (LearningRate / BatchSize) effectively, or just keep LR as is and user adjusts.
212
+ // Standard is Mean Gradient.
213
+
214
+ let mean_gradient = gradient_sum / f32(batch_size);
215
+
216
+ weights[w_idx] = weights[w_idx] - (lr * mean_gradient);
217
+ }
218
+ }
219
+ `;
220
+ class S {
221
+ constructor() {
222
+ r(this, "device", null);
223
+ r(this, "pipeline", null);
224
+ r(this, "bindGroup", null);
225
+ // Training Buffers
226
+ r(this, "deltaBuffer", null);
227
+ r(this, "targetBuffer", null);
228
+ r(this, "paramBuffer", null);
229
+ r(this, "trainingPipeline", null);
230
+ r(this, "deltaPipeline", null);
231
+ r(this, "trainingBindGroup", null);
232
+ // Buffers
233
+ r(this, "weightBuffer", null);
234
+ r(this, "inputBuffer", null);
235
+ r(this, "biasBuffer", null);
236
+ r(this, "outputBuffer", null);
237
+ r(this, "uniformBuffer", null);
238
+ r(this, "networkSize", 0);
239
+ r(this, "batchSize", 1);
240
+ r(this, "subscribers", []);
241
+ }
242
+ async init() {
243
+ if (!navigator.gpu) throw new Error("WebGPU not supported");
244
+ const e = await navigator.gpu.requestAdapter();
245
+ if (!e) throw new Error("No GPU adapter found");
246
+ this.device = await e.requestDevice();
247
+ const i = this.device.createShaderModule({ code: v }), t = this.device.createShaderModule({ code: E });
248
+ this.pipeline = this.device.createComputePipeline({
249
+ layout: "auto",
250
+ compute: { module: i, entryPoint: "main" }
251
+ }), this.trainingPipeline = this.device.createComputePipeline({
252
+ layout: "auto",
253
+ compute: { module: t, entryPoint: "update_weights" }
254
+ }), this.deltaPipeline = this.device.createComputePipeline({
255
+ layout: "auto",
256
+ compute: { module: t, entryPoint: "calculate_deltas" }
257
+ }), console.log("GPUEngine initialized");
258
+ }
259
+ // Prepare buffers based on network size (N) and Batch Size (B)
260
+ prepareBuffers(e, i, t, n = 1) {
261
+ if (!this.device || !this.pipeline) throw new Error("GPUEngine not initialized");
262
+ this.networkSize = e, this.batchSize = n, this.weightBuffer = this.createBuffer(i, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST), this.biasBuffer = this.createBuffer(t, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST);
263
+ const a = e * n;
264
+ this.inputBuffer = this.createBuffer(new Float32Array(a), GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST), this.outputBuffer = this.createBuffer(new Float32Array(a), GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST);
265
+ const s = new Uint32Array([e, n]);
266
+ this.uniformBuffer = this.createBuffer(s, GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST), this.bindGroup = this.device.createBindGroup({
267
+ layout: this.pipeline.getBindGroupLayout(0),
268
+ entries: [
269
+ { binding: 0, resource: { buffer: this.weightBuffer } },
270
+ { binding: 1, resource: { buffer: this.inputBuffer } },
271
+ { binding: 2, resource: { buffer: this.biasBuffer } },
272
+ { binding: 3, resource: { buffer: this.outputBuffer } },
273
+ { binding: 4, resource: { buffer: this.uniformBuffer } }
274
+ ]
275
+ });
276
+ }
277
+ createBuffer(e, i) {
278
+ if (!this.device) throw new Error("Device null");
279
+ const t = this.device.createBuffer({
280
+ size: e.byteLength,
281
+ usage: i,
282
+ mappedAtCreation: !0
283
+ });
284
+ return e instanceof Float32Array ? new Float32Array(t.getMappedRange()).set(e) : new Uint32Array(t.getMappedRange()).set(e), t.unmap(), t;
285
+ }
286
+ async runTick(e) {
287
+ if (!this.device || !this.pipeline || !this.bindGroup || !this.inputBuffer || !this.outputBuffer)
288
+ throw new Error("GPU buffers not ready");
289
+ if (e.length !== this.networkSize * this.batchSize)
290
+ throw new Error(`Input size mismatch. Expected ${this.networkSize * this.batchSize}, got ${e.length}`);
291
+ this.device.queue.writeBuffer(this.inputBuffer, 0, e);
292
+ const i = this.device.createCommandEncoder(), t = i.beginComputePass();
293
+ t.setPipeline(this.pipeline), t.setBindGroup(0, this.bindGroup);
294
+ const a = Math.ceil(this.networkSize / 64);
295
+ t.dispatchWorkgroups(a, 1, this.batchSize), t.end();
296
+ const s = e.byteLength, o = this.device.createBuffer({
297
+ size: s,
298
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
299
+ });
300
+ i.copyBufferToBuffer(this.outputBuffer, 0, o, 0, s);
301
+ const u = i.finish();
302
+ this.device.queue.submit([u]), await o.mapAsync(GPUMapMode.READ);
303
+ const d = new Float32Array(o.getMappedRange()), l = new Float32Array(d);
304
+ return o.unmap(), l;
305
+ }
306
+ prepareTrainingBuffers(e, i) {
307
+ if (!this.device || !this.trainingPipeline || !this.weightBuffer || !this.outputBuffer || !this.biasBuffer || !this.uniformBuffer)
308
+ throw new Error("GPU not ready for training");
309
+ if (e.length !== this.networkSize * this.batchSize)
310
+ throw new Error(`Target size mismatch. Expected ${this.networkSize * this.batchSize}, got ${e.length}`);
311
+ const t = this.networkSize * this.batchSize;
312
+ this.deltaBuffer = this.createBuffer(new Float32Array(t), GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC), this.targetBuffer = this.createBuffer(e, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST), this.paramBuffer = this.createBuffer(new Float32Array([i]), GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST), this.trainingBindGroup = this.device.createBindGroup({
313
+ layout: this.trainingPipeline.getBindGroupLayout(0),
314
+ entries: [
315
+ { binding: 0, resource: { buffer: this.weightBuffer } },
316
+ { binding: 1, resource: { buffer: this.outputBuffer } },
317
+ { binding: 2, resource: { buffer: this.biasBuffer } },
318
+ { binding: 3, resource: { buffer: this.deltaBuffer } },
319
+ { binding: 4, resource: { buffer: this.targetBuffer } },
320
+ { binding: 5, resource: { buffer: this.uniformBuffer } },
321
+ { binding: 6, resource: { buffer: this.paramBuffer } }
322
+ ]
323
+ });
324
+ }
325
+ subscribe(e) {
326
+ return this.subscribers.push(e), () => {
327
+ this.subscribers = this.subscribers.filter((i) => i !== e);
328
+ };
329
+ }
330
+ emit(e) {
331
+ this.subscribers.forEach((i) => i(e));
332
+ }
333
+ async train(e, i) {
334
+ var s;
335
+ const t = await this.runTick(e);
336
+ let n = 0;
337
+ for (let o = 0; o < t.length; o++) {
338
+ const u = i[o];
339
+ if (u > -998) {
340
+ const d = t[o] - u;
341
+ n += 0.5 * d * d;
342
+ }
343
+ }
344
+ const a = n / this.batchSize;
345
+ return this.emit({ type: "loss", value: a }), this.targetBuffer && ((s = this.device) == null || s.queue.writeBuffer(this.targetBuffer, 0, i)), await this.trainTick(), this.emit({ type: "epoch", value: 1 }), t;
346
+ }
347
+ async trainTick(e) {
348
+ if (!this.device || !this.trainingPipeline || !this.deltaPipeline || !this.trainingBindGroup || !this.deltaBuffer)
349
+ throw new Error("Training not ready");
350
+ e && e.length > 0 && this.device.queue.writeBuffer(this.deltaBuffer, 0, e);
351
+ const i = this.device.createCommandEncoder(), t = i.beginComputePass();
352
+ t.setPipeline(this.deltaPipeline), t.setBindGroup(0, this.trainingBindGroup);
353
+ const a = Math.ceil(this.networkSize / 64);
354
+ t.dispatchWorkgroups(a, 1, this.batchSize), t.end();
355
+ const s = i.beginComputePass();
356
+ s.setPipeline(this.trainingPipeline), s.setBindGroup(0, this.trainingBindGroup), s.dispatchWorkgroups(a, 1, 1), s.end(), this.device.queue.submit([i.finish()]);
357
+ }
358
+ async injectInput(e) {
359
+ !this.device || !this.inputBuffer || this.device.queue.writeBuffer(this.inputBuffer, 0, e);
360
+ }
361
+ }
362
+ class T {
363
+ constructor() {
364
+ r(this, "context", null);
365
+ r(this, "builder", null);
366
+ r(this, "graph", null);
367
+ // Buffers/State
368
+ r(this, "networkSize", 0);
369
+ r(this, "batchSize", 1);
370
+ // We keep weights/biases in memory to rebuild graph if needed,
371
+ // though for strict WebNN we bakw them into constants.
372
+ r(this, "weights", null);
373
+ r(this, "biases", null);
374
+ r(this, "isReady", !1);
375
+ }
376
+ async init() {
377
+ if (!navigator.ml) {
378
+ console.warn("WebNN: navigator.ml not supported");
379
+ return;
380
+ }
381
+ try {
382
+ this.context = await navigator.ml.createContext({ deviceType: "npu", powerPreference: "low-power" }), console.log("WebNN: NPU Context created"), this.builder = new window.MLGraphBuilder(this.context), this.isReady = !0;
383
+ } catch (e) {
384
+ console.error("WebNN Init Error (likely no NPU or flag disabled):", e);
385
+ }
386
+ }
387
+ async prepareModel(e, i, t, n = 1) {
388
+ if (!(!this.context || !this.builder)) {
389
+ this.networkSize = e, this.batchSize = n, this.weights = i, this.biases = t;
390
+ try {
391
+ const a = this.builder, s = {
392
+ dataType: "float32",
393
+ dimensions: [n, e]
394
+ }, o = a.input("input", s), u = {
395
+ dataType: "float32",
396
+ dimensions: [e, e]
397
+ }, d = a.constant(u, i), l = {
398
+ dataType: "float32",
399
+ dimensions: [e]
400
+ // 1D, will broadcast to [batch, size]
401
+ }, f = a.constant(l, t), p = a.matmul(o, d), g = a.add(p, f), w = a.tanh(g);
402
+ this.graph = await a.build({ output: w }), console.log("WebNN: Graph compiled successfully");
403
+ } catch (a) {
404
+ console.error("WebNN Build Error:", a);
405
+ }
406
+ }
407
+ }
408
+ async runTick(e) {
409
+ if (!this.context || !this.graph)
410
+ throw new Error("WebNN not ready");
411
+ if (e.length !== this.networkSize * this.batchSize)
412
+ throw new Error(`Input size mismatch. Expected ${this.networkSize * this.batchSize}, got ${e.length}`);
413
+ const i = new Float32Array(this.networkSize * this.batchSize), t = { input: e }, n = { output: i };
414
+ return await this.context.compute(this.graph, t, n), i;
415
+ }
416
+ }
417
+ class z {
418
+ constructor() {
419
+ // Maps Neuron logical IDs (UUIDs) to Matrix Indices (0...N)
420
+ r(this, "idToIndex", /* @__PURE__ */ new Map());
421
+ r(this, "indexToId", []);
422
+ }
423
+ // Converts Graph -> Dense Matrices
424
+ flatten(e, i) {
425
+ const t = e.length;
426
+ this.idToIndex.clear(), this.indexToId = new Array(t), e.forEach((o, u) => {
427
+ this.idToIndex.set(o.id, u), this.indexToId[u] = o.id;
428
+ });
429
+ const n = new Float32Array(t), a = new Float32Array(t);
430
+ e.forEach((o, u) => {
431
+ n[u] = o.bias;
432
+ });
433
+ const s = new Float32Array(t * t);
434
+ return i.forEach((o) => {
435
+ const u = this.idToIndex.get(o.from_id), d = this.idToIndex.get(o.to_id);
436
+ if (u !== void 0 && d !== void 0) {
437
+ const l = d * t + u;
438
+ s[l] = o.weight;
439
+ }
440
+ }), { size: t, weights: s, biases: n, initialValues: a };
441
+ }
442
+ }
443
+ class U {
444
+ constructor() {
445
+ r(this, "gpu");
446
+ r(this, "npu");
447
+ r(this, "neuronRepo");
448
+ r(this, "synapseRepo");
449
+ r(this, "translator");
450
+ r(this, "activeBackend", "gpu");
451
+ // Cache
452
+ r(this, "neurons", []);
453
+ r(this, "synapses", []);
454
+ this.gpu = new S(), this.npu = new T(), this.neuronRepo = new B(), this.synapseRepo = new _(), this.translator = new z();
455
+ }
456
+ async init() {
457
+ if (console.log("Neural 2.0 Engine Initializing..."), await h.ready(), await y(), await this.gpu.init(), this.gpu.batchSize = 2, await this.npu.init(), this.npu.isReady && (console.log("Neural Engine: NPU Accelerated Backend Available."), this.activeBackend = "npu"), this.neurons = await this.neuronRepo.getAll(), this.synapses = await this.synapseRepo.getAll(), this.neurons.length === 0) {
458
+ console.log("Seeding test network...");
459
+ const e = "n1-" + crypto.randomUUID(), i = "n2-" + crypto.randomUUID();
460
+ await this.neuronRepo.create({ id: e, type: "input", bias: 0, activation: "tanh" }), await this.neuronRepo.create({ id: i, type: "output", bias: 0.5, activation: "tanh" }), await this.synapseRepo.create({ id: crypto.randomUUID(), from_id: e, to_id: i, weight: 0.8 });
461
+ for (let n = 0; n < 50; n++)
462
+ await this.neuronRepo.create({ id: `auto-${n}`, type: "hidden", bias: 0, activation: "tanh" });
463
+ const t = await this.neuronRepo.getAll();
464
+ for (let n = 0; n < 50; n++) {
465
+ const a = t[Math.floor(Math.random() * t.length)].id, s = t[Math.floor(Math.random() * t.length)].id;
466
+ a !== s && await this.synapseRepo.create({ id: crypto.randomUUID(), from_id: a, to_id: s, weight: Math.random() });
467
+ }
468
+ this.neurons = await this.neuronRepo.getAll(), this.synapses = await this.synapseRepo.getAll();
469
+ }
470
+ return await this.compile(), console.log(`Engine Ready. Active Backend: ${this.activeBackend.toUpperCase()}`), this.getGraphData();
471
+ }
472
+ async compile() {
473
+ console.log(`Compiling graph: ${this.neurons.length} neurons, ${this.synapses.length} synapses`);
474
+ const e = this.translator.flatten(this.neurons, this.synapses);
475
+ return this.gpu.prepareBuffers(e.size, e.weights, e.biases, this.gpu.batchSize), this.gpu.prepareTrainingBuffers(new Float32Array(e.size * this.gpu.batchSize), 0.1), this.npu.isReady && await this.npu.prepareModel(e.size, e.weights, e.biases, 2), e;
476
+ }
477
+ async deployToCloud() {
478
+ console.log("Deploying heavy layers to Hybrid Cloud..."), this.neurons = this.neurons.map((e) => ({
479
+ ...e,
480
+ type: Math.random() > 0.8 ? "cloud" : e.type
481
+ }));
482
+ for (const e of this.neurons)
483
+ e.type;
484
+ return this.getGraphData();
485
+ }
486
+ getGraphData() {
487
+ const e = /* @__PURE__ */ new Map();
488
+ this.neurons.forEach((n, a) => e.set(n.id, a));
489
+ const i = this.synapses.map((n) => ({
490
+ id: n.id,
491
+ source: e.get(n.from_id) || 0,
492
+ target: e.get(n.to_id) || 0,
493
+ weight: n.weight
494
+ })), t = this.neurons.map((n, a) => ({
495
+ id: n.id,
496
+ index: a,
497
+ type: n.type
498
+ }));
499
+ return {
500
+ nodeCount: this.neurons.length,
501
+ nodes: t,
502
+ edges: i
503
+ };
504
+ }
505
+ async deleteSynapse(e) {
506
+ return console.log(`Lesioning synapse: ${e}`), await this.synapseRepo.delete(e), this.synapses = this.synapses.filter((i) => i.id !== e), await this.compile(), this.getGraphData();
507
+ }
508
+ exportGraph() {
509
+ return {
510
+ version: "2.0",
511
+ neurons: this.neurons,
512
+ synapses: this.synapses
513
+ };
514
+ }
515
+ async importGraph(e) {
516
+ if (!e.neurons || !e.synapses) throw new Error("Invalid graph data");
517
+ console.log("Importing graph...");
518
+ const i = await this.neuronRepo.getAll();
519
+ for (const n of i) await this.neuronRepo.delete(n.id);
520
+ const t = await this.synapseRepo.getAll();
521
+ for (const n of t) await this.synapseRepo.delete(n.id);
522
+ for (const n of e.neurons) await this.neuronRepo.create(n);
523
+ for (const n of e.synapses) await this.synapseRepo.create(n);
524
+ return this.neurons = await this.neuronRepo.getAll(), this.synapses = await this.synapseRepo.getAll(), console.log(`Compiling imported graph: ${this.neurons.length} neurons, ${this.synapses.length} synapses`), await this.compile(), this.getGraphData();
525
+ }
526
+ async injectInput(e) {
527
+ this.activeBackend === "npu" && this.npu.isReady || await this.gpu.injectInput(e);
528
+ }
529
+ async runTick(e) {
530
+ return this.activeBackend === "npu" && this.npu.isReady ? this.npu.runTick(e) : this.gpu.runTick(e);
531
+ }
532
+ }
533
+ async function P() {
534
+ return new U().init();
535
+ }
536
+ export {
537
+ U as NeuralEngine,
538
+ P as init
539
+ };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@buley/neural",
3
- "version": "3.0.0",
3
+ "version": "4.0.0",
4
4
  "description": "A Transparent, Local-First, WebGPU-Accelerated Neural Graph Database.",
5
5
  "type": "module",
6
6
  "main": "./src/index.ts",
@@ -10,7 +10,7 @@
10
10
  "bench": "bun src/bench/benchmark.ts"
11
11
  },
12
12
  "dependencies": {
13
- "@buley/dash": "2.1.4"
13
+ "@buley/dash": "4.0.0"
14
14
  },
15
15
  "devDependencies": {
16
16
  "@webgpu/types": "^0.1.69",