@buley/neural 2.0.4 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/engine/webnn.ts +118 -0
- package/src/index.ts +42 -18
- package/src/webnn-types.d.ts +43 -0
package/package.json
CHANGED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
|
|
2
|
+
/// <reference path="../webnn-types.d.ts" />
|
|
3
|
+
|
|
4
|
+
export class WebNNEngine {
|
|
5
|
+
context: MLContext | null = null;
|
|
6
|
+
builder: MLGraphBuilder | null = null;
|
|
7
|
+
graph: MLGraph | null = null;
|
|
8
|
+
|
|
9
|
+
// Buffers/State
|
|
10
|
+
networkSize: number = 0;
|
|
11
|
+
batchSize: number = 1;
|
|
12
|
+
|
|
13
|
+
// We keep weights/biases in memory to rebuild graph if needed,
|
|
14
|
+
// though for strict WebNN we bakw them into constants.
|
|
15
|
+
weights: Float32Array | null = null;
|
|
16
|
+
biases: Float32Array | null = null;
|
|
17
|
+
|
|
18
|
+
isReady = false;
|
|
19
|
+
|
|
20
|
+
async init() {
|
|
21
|
+
if (!navigator.ml) {
|
|
22
|
+
console.warn("WebNN: navigator.ml not supported");
|
|
23
|
+
return;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
try {
|
|
27
|
+
// Prefer NPU, fallback to GPU if NPU not available, though we really want NPU for "cool factor"
|
|
28
|
+
// Note: browser support for 'npu' deviceType is bleeding edge.
|
|
29
|
+
this.context = await navigator.ml.createContext({ deviceType: 'npu', powerPreference: 'low-power' });
|
|
30
|
+
console.log("WebNN: NPU Context created");
|
|
31
|
+
|
|
32
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
33
|
+
this.builder = new (window as any).MLGraphBuilder(this.context);
|
|
34
|
+
this.isReady = true;
|
|
35
|
+
} catch (e) {
|
|
36
|
+
console.error("WebNN Init Error (likely no NPU or flag disabled):", e);
|
|
37
|
+
// Fallback? or just stay uninitialized
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async prepareModel(size: number, weights: Float32Array, biases: Float32Array, batchSize: number = 1) {
|
|
42
|
+
if (!this.context || !this.builder) return;
|
|
43
|
+
this.networkSize = size;
|
|
44
|
+
this.batchSize = batchSize;
|
|
45
|
+
this.weights = weights;
|
|
46
|
+
this.biases = biases;
|
|
47
|
+
|
|
48
|
+
// Build the Computational Graph
|
|
49
|
+
// Model: Y = Tanh((X * W) + B)
|
|
50
|
+
// Shapes:
|
|
51
|
+
// X: [batchSize, networkSize]
|
|
52
|
+
// W: [networkSize, networkSize]
|
|
53
|
+
// B: [networkSize] (Broadcasted)
|
|
54
|
+
// Y: [batchSize, networkSize]
|
|
55
|
+
|
|
56
|
+
try {
|
|
57
|
+
const builder = this.builder;
|
|
58
|
+
|
|
59
|
+
// 1. Input Operand (Variable)
|
|
60
|
+
const inputDesc: MLOperandDescriptor = {
|
|
61
|
+
dataType: 'float32',
|
|
62
|
+
dimensions: [batchSize, size]
|
|
63
|
+
};
|
|
64
|
+
const input = builder.input('input', inputDesc);
|
|
65
|
+
|
|
66
|
+
// 2. Constants (Weights & Biases)
|
|
67
|
+
// Note: WebNN matmul expects typically [I, J] * [J, K] -> [I, K]
|
|
68
|
+
// Our weights are N*N flattened.
|
|
69
|
+
const weightDesc: MLOperandDescriptor = {
|
|
70
|
+
dataType: 'float32',
|
|
71
|
+
dimensions: [size, size]
|
|
72
|
+
};
|
|
73
|
+
// WebNN might require specific buffer types, Float32Array is good.
|
|
74
|
+
const weightConstant = builder.constant(weightDesc, weights);
|
|
75
|
+
|
|
76
|
+
const biasDesc: MLOperandDescriptor = {
|
|
77
|
+
dataType: 'float32',
|
|
78
|
+
dimensions: [size] // 1D, will broadcast to [batch, size]
|
|
79
|
+
};
|
|
80
|
+
const biasConstant = builder.constant(biasDesc, biases);
|
|
81
|
+
|
|
82
|
+
// 3. Operations
|
|
83
|
+
// MatMul: [batch, size] * [size, size] -> [batch, size]
|
|
84
|
+
const matmul = builder.matmul(input, weightConstant);
|
|
85
|
+
|
|
86
|
+
// Add Bias (Broadcast)
|
|
87
|
+
const added = builder.add(matmul, biasConstant);
|
|
88
|
+
|
|
89
|
+
// Activation
|
|
90
|
+
const output = builder.tanh(added);
|
|
91
|
+
|
|
92
|
+
// 4. Build
|
|
93
|
+
this.graph = await builder.build({ 'output': output });
|
|
94
|
+
console.log("WebNN: Graph compiled successfully");
|
|
95
|
+
|
|
96
|
+
} catch (e) {
|
|
97
|
+
console.error("WebNN Build Error:", e);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
async runTick(inputs: Float32Array): Promise<Float32Array> {
|
|
102
|
+
if (!this.context || !this.graph) {
|
|
103
|
+
throw new Error("WebNN not ready");
|
|
104
|
+
}
|
|
105
|
+
if (inputs.length !== this.networkSize * this.batchSize) {
|
|
106
|
+
throw new Error(`Input size mismatch. Expected ${this.networkSize * this.batchSize}, got ${inputs.length}`);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const outputs = new Float32Array(this.networkSize * this.batchSize);
|
|
110
|
+
|
|
111
|
+
const inputsMap = { 'input': inputs };
|
|
112
|
+
const outputsMap = { 'output': outputs };
|
|
113
|
+
|
|
114
|
+
await this.context.compute(this.graph, inputsMap, outputsMap);
|
|
115
|
+
|
|
116
|
+
return outputs;
|
|
117
|
+
}
|
|
118
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -2,6 +2,7 @@ import { dash } from "@buley/dash";
|
|
|
2
2
|
import { initializeSchema } from "./db/schema";
|
|
3
3
|
import { NeuronRepository, SynapseRepository } from "./db/repository";
|
|
4
4
|
import { GPUEngine } from "./engine/gpu";
|
|
5
|
+
import { WebNNEngine } from "./engine/webnn";
|
|
5
6
|
import { Translator } from "./engine/translator";
|
|
6
7
|
import { Neuron, Synapse } from "./types";
|
|
7
8
|
|
|
@@ -9,12 +10,16 @@ export type { Neuron, Synapse } from "./types";
|
|
|
9
10
|
|
|
10
11
|
export class NeuralEngine {
|
|
11
12
|
gpu: GPUEngine;
|
|
13
|
+
npu: WebNNEngine;
|
|
12
14
|
neuronRepo: NeuronRepository;
|
|
13
15
|
synapseRepo: SynapseRepository;
|
|
14
16
|
private translator: Translator;
|
|
15
17
|
|
|
18
|
+
activeBackend: 'gpu' | 'npu' = 'gpu';
|
|
19
|
+
|
|
16
20
|
constructor() {
|
|
17
21
|
this.gpu = new GPUEngine();
|
|
22
|
+
this.npu = new WebNNEngine();
|
|
18
23
|
this.neuronRepo = new NeuronRepository();
|
|
19
24
|
this.synapseRepo = new SynapseRepository();
|
|
20
25
|
this.translator = new Translator();
|
|
@@ -35,6 +40,13 @@ export class NeuralEngine {
|
|
|
35
40
|
await this.gpu.init();
|
|
36
41
|
this.gpu.batchSize = 2; // Default to mini-batch of 2 for demo
|
|
37
42
|
|
|
43
|
+
// Try NPU
|
|
44
|
+
await this.npu.init();
|
|
45
|
+
if (this.npu.isReady) {
|
|
46
|
+
console.log("Neural Engine: NPU Accelerated Backend Available.");
|
|
47
|
+
this.activeBackend = 'npu';
|
|
48
|
+
}
|
|
49
|
+
|
|
38
50
|
// 3. Hydration
|
|
39
51
|
this.neurons = await this.neuronRepo.getAll();
|
|
40
52
|
this.synapses = await this.synapseRepo.getAll();
|
|
@@ -64,17 +76,27 @@ export class NeuralEngine {
|
|
|
64
76
|
this.synapses = await this.synapseRepo.getAll();
|
|
65
77
|
}
|
|
66
78
|
|
|
67
|
-
// 4. Compile to
|
|
79
|
+
// 4. Compile to Compute Backends
|
|
80
|
+
await this.compile();
|
|
81
|
+
|
|
82
|
+
console.log(`Engine Ready. Active Backend: ${this.activeBackend.toUpperCase()}`);
|
|
83
|
+
return this.getGraphData();
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
async compile() {
|
|
68
87
|
console.log(`Compiling graph: ${this.neurons.length} neurons, ${this.synapses.length} synapses`);
|
|
69
88
|
const data = this.translator.flatten(this.neurons, this.synapses);
|
|
70
89
|
|
|
90
|
+
// GPU
|
|
71
91
|
this.gpu.prepareBuffers(data.size, data.weights, data.biases, this.gpu.batchSize);
|
|
72
|
-
// Also prepare training buffers!
|
|
73
|
-
// Init target buffer with zeros
|
|
74
92
|
this.gpu.prepareTrainingBuffers(new Float32Array(data.size * this.gpu.batchSize), 0.1);
|
|
75
93
|
|
|
76
|
-
|
|
77
|
-
|
|
94
|
+
// NPU
|
|
95
|
+
if (this.npu.isReady) {
|
|
96
|
+
await this.npu.prepareModel(data.size, data.weights, data.biases, 2);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
return data; // Return data for init/others if needed
|
|
78
100
|
}
|
|
79
101
|
|
|
80
102
|
async deployToCloud() {
|
|
@@ -131,12 +153,7 @@ export class NeuralEngine {
|
|
|
131
153
|
this.synapses = this.synapses.filter(s => s.id !== id);
|
|
132
154
|
|
|
133
155
|
// Recompile (Heavy!)
|
|
134
|
-
|
|
135
|
-
// But for "The Visible Brain" seeing it disappear is cooler.
|
|
136
|
-
const data = this.translator.flatten(this.neurons, this.synapses);
|
|
137
|
-
this.gpu.prepareBuffers(data.size, data.weights, data.biases, this.gpu.batchSize);
|
|
138
|
-
// Reset training buffers too to be safe/simple
|
|
139
|
-
this.gpu.prepareTrainingBuffers(new Float32Array(data.size * this.gpu.batchSize), 0.1);
|
|
156
|
+
await this.compile();
|
|
140
157
|
|
|
141
158
|
return this.getGraphData();
|
|
142
159
|
}
|
|
@@ -170,18 +187,25 @@ export class NeuralEngine {
|
|
|
170
187
|
this.synapses = await this.synapseRepo.getAll();
|
|
171
188
|
|
|
172
189
|
console.log(`Compiling imported graph: ${this.neurons.length} neurons, ${this.synapses.length} synapses`);
|
|
173
|
-
|
|
174
|
-
this.gpu.prepareBuffers(graph.size, graph.weights, graph.biases, this.gpu.batchSize);
|
|
175
|
-
this.gpu.prepareTrainingBuffers(new Float32Array(graph.size * this.gpu.batchSize), 0.1);
|
|
190
|
+
await this.compile();
|
|
176
191
|
|
|
177
192
|
return this.getGraphData();
|
|
178
193
|
}
|
|
179
194
|
|
|
180
195
|
async injectInput(data: Float32Array) {
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
196
|
+
if (this.activeBackend === 'npu' && this.npu.isReady) {
|
|
197
|
+
// WebNN takes input at runTick
|
|
198
|
+
} else {
|
|
199
|
+
await this.gpu.injectInput(data);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
async runTick(inputs: Float32Array): Promise<Float32Array> {
|
|
204
|
+
if (this.activeBackend === 'npu' && this.npu.isReady) {
|
|
205
|
+
return this.npu.runTick(inputs);
|
|
206
|
+
} else {
|
|
207
|
+
return this.gpu.runTick(inputs);
|
|
208
|
+
}
|
|
185
209
|
}
|
|
186
210
|
}
|
|
187
211
|
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
|
|
2
|
+
// Minimal WebNN Type Definitions for TypeScript
|
|
3
|
+
// Based on W3C Web Neural Network API Draft
|
|
4
|
+
|
|
5
|
+
interface MLContext {
|
|
6
|
+
compute(graph: MLGraph, inputs: Record<string, ArrayBufferView>, outputs: Record<string, ArrayBufferView>): Promise<MLComputeResult>;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
interface MLComputeResult {
|
|
10
|
+
inputs: Record<string, ArrayBufferView>;
|
|
11
|
+
outputs: Record<string, ArrayBufferView>;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
interface MLGraphBuilder {
|
|
15
|
+
input(name: string, descriptor: MLOperandDescriptor): MLOperand;
|
|
16
|
+
constant(descriptor: MLOperandDescriptor, buffer: ArrayBufferView): MLOperand;
|
|
17
|
+
matmul(a: MLOperand, b: MLOperand): MLOperand;
|
|
18
|
+
add(a: MLOperand, b: MLOperand): MLOperand;
|
|
19
|
+
tanh(x: MLOperand): MLOperand;
|
|
20
|
+
build(outputs: Record<string, MLOperand>): Promise<MLGraph>;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
interface MLGraph {}
|
|
24
|
+
|
|
25
|
+
interface MLOperand {}
|
|
26
|
+
|
|
27
|
+
interface MLOperandDescriptor {
|
|
28
|
+
dataType: 'float32' | 'float16' | 'int32' | 'uint32' | 'int8' | 'uint8';
|
|
29
|
+
dimensions: number[];
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
interface MLContextOptions {
|
|
33
|
+
deviceType?: 'cpu' | 'gpu' | 'npu';
|
|
34
|
+
powerPreference?: 'default' | 'high-performance' | 'low-power';
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
interface NavigatorML {
|
|
38
|
+
createContext(options?: MLContextOptions): Promise<MLContext>;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
interface Navigator {
|
|
42
|
+
ml?: NavigatorML;
|
|
43
|
+
}
|