@buley/neural 2.0.0 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -2
- package/src/bench/benchmark.ts +11 -11
- package/src/db/repository.test.ts +2 -2
- package/src/db/repository.ts +8 -0
- package/src/engine/gpu.test.ts +3 -3
- package/src/engine/gpu.ts +14 -3
- package/src/engine/training.test.ts +3 -2
- package/src/index.ts +71 -0
- package/src/vite-env.d.ts +6 -0
- package/vite.config.ts +9 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@buley/neural",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.2",
|
|
4
4
|
"description": "A Transparent, Local-First, WebGPU-Accelerated Neural Graph Database.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./src/index.ts",
|
|
@@ -10,10 +10,11 @@
|
|
|
10
10
|
"bench": "bun src/bench/benchmark.ts"
|
|
11
11
|
},
|
|
12
12
|
"dependencies": {
|
|
13
|
-
"@buley/dash": "
|
|
13
|
+
"@buley/dash": "2.1.4"
|
|
14
14
|
},
|
|
15
15
|
"devDependencies": {
|
|
16
16
|
"@webgpu/types": "^0.1.69",
|
|
17
|
+
"bun-types": "^1.3.6",
|
|
17
18
|
"typescript": "^5.3.3",
|
|
18
19
|
"vite": "^5.0.0"
|
|
19
20
|
}
|
package/src/bench/benchmark.ts
CHANGED
|
@@ -81,19 +81,19 @@ if (!global.navigator?.gpu) {
|
|
|
81
81
|
console.log("No WebGPU detected in global scope. Mocking for CLI structure verification...");
|
|
82
82
|
// @ts-ignore
|
|
83
83
|
global.navigator = {
|
|
84
|
-
gpu: {
|
|
85
|
-
requestAdapter: async () => ({
|
|
86
|
-
requestDevice: async () => ({
|
|
87
|
-
createShaderModule: () => ({}),
|
|
88
|
-
createComputePipeline: () => ({ getBindGroupLayout: () => ({}) }),
|
|
89
|
-
createBuffer: (d: any) => ({ getMappedRange: () => new ArrayBuffer(d.size), unmap: () => {}, mapAsync: async () => {} }),
|
|
90
|
-
createBindGroup: () => ({}),
|
|
84
|
+
gpu: { ...({} as any) as GPU,
|
|
85
|
+
requestAdapter: async () => ({ ...({} as any) as GPUAdapter, // Force cast for mock
|
|
86
|
+
requestDevice: async () => ({ ...({} as any) as GPUDevice,
|
|
87
|
+
createShaderModule: () => ({} as unknown as GPUShaderModule),
|
|
88
|
+
createComputePipeline: () => ({ getBindGroupLayout: () => ({} as unknown as GPUBindGroupLayout) } as unknown as GPUComputePipeline),
|
|
89
|
+
createBuffer: (d: any) => ({ getMappedRange: () => new ArrayBuffer(d.size), unmap: () => {}, mapAsync: async () => {} } as unknown as GPUBuffer),
|
|
90
|
+
createBindGroup: () => ({} as unknown as GPUBindGroup),
|
|
91
91
|
createCommandEncoder: () => ({
|
|
92
|
-
beginComputePass: () => ({ setPipeline:()=>{}, setBindGroup:()=>{}, dispatchWorkgroups:()=>{}, end:()=>{} }),
|
|
92
|
+
beginComputePass: () => ({ setPipeline:()=>{}, setBindGroup:()=>{}, dispatchWorkgroups:()=>{}, end:()=>{} } as unknown as GPUComputePassEncoder),
|
|
93
93
|
copyBufferToBuffer: ()=>{},
|
|
94
|
-
finish: ()=>({})
|
|
95
|
-
}),
|
|
96
|
-
queue: { writeBuffer: ()=>{}, submit: ()=>{} }
|
|
94
|
+
finish: ()=>(({} as any) as GPUCommandBuffer)
|
|
95
|
+
} as unknown as GPUCommandEncoder),
|
|
96
|
+
queue: { writeBuffer: ()=>{}, submit: ()=>{} } as unknown as GPUQueue
|
|
97
97
|
})
|
|
98
98
|
})
|
|
99
99
|
}
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { expect, test, describe, mock
|
|
1
|
+
import { expect, test, describe, mock } from "bun:test";
|
|
2
2
|
import { NeuronRepository, SynapseRepository } from "./repository";
|
|
3
3
|
|
|
4
4
|
// Mock @buley/dash
|
|
5
5
|
const mockDash = {
|
|
6
|
-
execute: mock((query,
|
|
6
|
+
execute: mock((query, _params) => {
|
|
7
7
|
// Simple mock implementation
|
|
8
8
|
if (query.includes("INSERT")) return Promise.resolve();
|
|
9
9
|
if (query.includes("SELECT * FROM neurons")) return Promise.resolve([
|
package/src/db/repository.ts
CHANGED
|
@@ -20,6 +20,10 @@ export class NeuronRepository {
|
|
|
20
20
|
async getAll(): Promise<Neuron[]> {
|
|
21
21
|
return await dash.execute("SELECT * FROM neurons") as Neuron[];
|
|
22
22
|
}
|
|
23
|
+
|
|
24
|
+
async delete(id: string): Promise<void> {
|
|
25
|
+
await dash.execute("DELETE FROM neurons WHERE id = ?", [id]);
|
|
26
|
+
}
|
|
23
27
|
}
|
|
24
28
|
|
|
25
29
|
export class SynapseRepository {
|
|
@@ -33,4 +37,8 @@ export class SynapseRepository {
|
|
|
33
37
|
async getAll(): Promise<Synapse[]> {
|
|
34
38
|
return await dash.execute("SELECT * FROM synapses") as Synapse[];
|
|
35
39
|
}
|
|
40
|
+
|
|
41
|
+
async delete(id: string): Promise<void> {
|
|
42
|
+
await dash.execute("DELETE FROM synapses WHERE id = ?", [id]);
|
|
43
|
+
}
|
|
36
44
|
}
|
package/src/engine/gpu.test.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { expect, test, describe, mock
|
|
1
|
+
import { expect, test, describe, mock } from "bun:test";
|
|
2
2
|
import { GPUEngine } from "./gpu";
|
|
3
3
|
|
|
4
4
|
// Mock WebGPU Globals
|
|
@@ -37,8 +37,8 @@ const mockAdapter = {
|
|
|
37
37
|
// @ts-ignore
|
|
38
38
|
global.navigator = {
|
|
39
39
|
gpu: {
|
|
40
|
-
requestAdapter: mock(async () => mockAdapter)
|
|
41
|
-
}
|
|
40
|
+
requestAdapter: mock(async () => mockAdapter as unknown as GPUAdapter)
|
|
41
|
+
} as unknown as GPU
|
|
42
42
|
};
|
|
43
43
|
|
|
44
44
|
// Polyfill Globals
|
package/src/engine/gpu.ts
CHANGED
|
@@ -112,7 +112,7 @@ export class GPUEngine {
|
|
|
112
112
|
}
|
|
113
113
|
|
|
114
114
|
// Upload Input
|
|
115
|
-
this.device.queue.writeBuffer(this.inputBuffer, 0, inputs);
|
|
115
|
+
this.device.queue.writeBuffer(this.inputBuffer, 0, inputs as unknown as BufferSource);
|
|
116
116
|
|
|
117
117
|
// Encode Command
|
|
118
118
|
const commandEncoder = this.device.createCommandEncoder();
|
|
@@ -213,7 +213,7 @@ export class GPUEngine {
|
|
|
213
213
|
// Let's assume prepareTrainingBuffers was called ONCE before loop.
|
|
214
214
|
// We just need to update TARGETS buffer!
|
|
215
215
|
if (this.targetBuffer) {
|
|
216
|
-
|
|
216
|
+
this.device?.queue.writeBuffer(this.targetBuffer, 0, targets as unknown as BufferSource);
|
|
217
217
|
}
|
|
218
218
|
|
|
219
219
|
// Run Training Shaders
|
|
@@ -229,7 +229,7 @@ export class GPUEngine {
|
|
|
229
229
|
}
|
|
230
230
|
|
|
231
231
|
if (deltas && deltas.length > 0) {
|
|
232
|
-
this.device.queue.writeBuffer(this.deltaBuffer, 0, deltas);
|
|
232
|
+
this.device.queue.writeBuffer(this.deltaBuffer, 0, deltas as unknown as BufferSource);
|
|
233
233
|
}
|
|
234
234
|
|
|
235
235
|
const commandEncoder = this.device.createCommandEncoder();
|
|
@@ -252,4 +252,15 @@ export class GPUEngine {
|
|
|
252
252
|
|
|
253
253
|
this.device.queue.submit([commandEncoder.finish()]);
|
|
254
254
|
}
|
|
255
|
+
|
|
256
|
+
async injectInput(data: Float32Array): Promise<void> {
|
|
257
|
+
if (!this.device || !this.inputBuffer) return;
|
|
258
|
+
|
|
259
|
+
// We only write what we are given, usually just the first N inputs (Microphone bins)
|
|
260
|
+
// If data is smaller than buffer, we use queue.writeBuffer which handles partial writes
|
|
261
|
+
this.device.queue.writeBuffer(this.inputBuffer, 0, data as unknown as BufferSource);
|
|
262
|
+
|
|
263
|
+
// Trigger a tick? Or let the outer loop do it?
|
|
264
|
+
// Let's just update the buffer. The UI loop calls runTick() or similar.
|
|
265
|
+
}
|
|
255
266
|
}
|
|
@@ -78,8 +78,9 @@ describe("Training Loop", () => {
|
|
|
78
78
|
|
|
79
79
|
// Should dispatch twice (Delta Calc + Weight Update)
|
|
80
80
|
// We can check calls to createCommandEncoder -> beginComputePass
|
|
81
|
-
const encoder = mockDevice.createCommandEncoder.mock.results.at(-1)?.value;
|
|
82
|
-
|
|
81
|
+
// const encoder = mockDevice.createCommandEncoder.mock.results.at(-1)?.value;
|
|
82
|
+
// Check for pass calls if needed, otherwise ignore for now
|
|
83
|
+
// const pass = encoder?.beginComputePass.mock.results.at(-1)?.value;
|
|
83
84
|
// mock logic is a bit simple, let's just check overall calls
|
|
84
85
|
expect(mockDevice.queue.submit).toHaveBeenCalled();
|
|
85
86
|
});
|
package/src/index.ts
CHANGED
|
@@ -74,6 +74,26 @@ export class NeuralEngine {
|
|
|
74
74
|
return data;
|
|
75
75
|
}
|
|
76
76
|
|
|
77
|
+
async deployToCloud() {
|
|
78
|
+
console.log("Deploying heavy layers to Hybrid Cloud...");
|
|
79
|
+
// Randomly tagging neurons as "cloud"
|
|
80
|
+
this.neurons = this.neurons.map(n => ({
|
|
81
|
+
...n,
|
|
82
|
+
type: Math.random() > 0.8 ? 'cloud' : n.type
|
|
83
|
+
}));
|
|
84
|
+
// Update repo? For demo we might just keep in memory or update repo.
|
|
85
|
+
// Let's update repo for persistence
|
|
86
|
+
for (const n of this.neurons) {
|
|
87
|
+
if (n.type === 'cloud') {
|
|
88
|
+
// Assuming repo has update or we just overwrite.
|
|
89
|
+
// Repo might not support update efficiently.
|
|
90
|
+
// For demo/speed, we likely just keep in memory for this session
|
|
91
|
+
// unless we want it to persist.
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
return this.getGraphData();
|
|
95
|
+
}
|
|
96
|
+
|
|
77
97
|
getGraphData() {
|
|
78
98
|
// Map ID -> Index
|
|
79
99
|
const map = new Map<string, number>();
|
|
@@ -86,8 +106,16 @@ export class NeuralEngine {
|
|
|
86
106
|
weight: s.weight
|
|
87
107
|
}));
|
|
88
108
|
|
|
109
|
+
// Return full nodes for visualization customization
|
|
110
|
+
const nodes = this.neurons.map((n, i) => ({
|
|
111
|
+
id: n.id,
|
|
112
|
+
index: i,
|
|
113
|
+
type: n.type
|
|
114
|
+
}));
|
|
115
|
+
|
|
89
116
|
return {
|
|
90
117
|
nodeCount: this.neurons.length,
|
|
118
|
+
nodes,
|
|
91
119
|
edges
|
|
92
120
|
};
|
|
93
121
|
}
|
|
@@ -109,6 +137,49 @@ export class NeuralEngine {
|
|
|
109
137
|
|
|
110
138
|
return this.getGraphData();
|
|
111
139
|
}
|
|
140
|
+
|
|
141
|
+
exportGraph() {
|
|
142
|
+
return {
|
|
143
|
+
version: "2.0",
|
|
144
|
+
neurons: this.neurons,
|
|
145
|
+
synapses: this.synapses
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
async importGraph(data: any) {
|
|
150
|
+
if (!data.neurons || !data.synapses) throw new Error("Invalid graph data");
|
|
151
|
+
|
|
152
|
+
console.log("Importing graph...");
|
|
153
|
+
|
|
154
|
+
// 1. Clear existing
|
|
155
|
+
const oldNeurons = await this.neuronRepo.getAll();
|
|
156
|
+
for (const n of oldNeurons) await this.neuronRepo.delete(n.id);
|
|
157
|
+
|
|
158
|
+
const oldSynapses = await this.synapseRepo.getAll();
|
|
159
|
+
for (const s of oldSynapses) await this.synapseRepo.delete(s.id);
|
|
160
|
+
|
|
161
|
+
// 2. Insert new
|
|
162
|
+
for (const n of data.neurons) await this.neuronRepo.create(n);
|
|
163
|
+
for (const s of data.synapses) await this.synapseRepo.create(s);
|
|
164
|
+
|
|
165
|
+
// 3. Hydrate & Compile
|
|
166
|
+
this.neurons = await this.neuronRepo.getAll();
|
|
167
|
+
this.synapses = await this.synapseRepo.getAll();
|
|
168
|
+
|
|
169
|
+
console.log(`Compiling imported graph: ${this.neurons.length} neurons, ${this.synapses.length} synapses`);
|
|
170
|
+
const graph = this.translator.flatten(this.neurons, this.synapses);
|
|
171
|
+
this.gpu.prepareBuffers(graph.size, graph.weights, graph.biases, this.gpu.batchSize);
|
|
172
|
+
this.gpu.prepareTrainingBuffers(new Float32Array(graph.size * this.gpu.batchSize), 0.1);
|
|
173
|
+
|
|
174
|
+
return this.getGraphData();
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
async injectInput(data: Float32Array) {
|
|
178
|
+
// Map data to input neurons
|
|
179
|
+
// In simulation, we assume first N neurons are inputs.
|
|
180
|
+
// Or we just overwrite the first N values of the input buffer.
|
|
181
|
+
await this.gpu.injectInput(data);
|
|
182
|
+
}
|
|
112
183
|
}
|
|
113
184
|
|
|
114
185
|
// Keep a standalone init for backward compatibility or simple scripts if needed
|
package/vite.config.ts
CHANGED
|
@@ -10,4 +10,13 @@ export default defineConfig({
|
|
|
10
10
|
optimizeDeps: {
|
|
11
11
|
exclude: ['@buley/dash', '@sqlite.org/sqlite-wasm'],
|
|
12
12
|
},
|
|
13
|
+
build: {
|
|
14
|
+
lib: {
|
|
15
|
+
entry: './src/index.ts',
|
|
16
|
+
formats: ['es'],
|
|
17
|
+
},
|
|
18
|
+
rollupOptions: {
|
|
19
|
+
external: ['@buley/dash', '@webgpu/types'],
|
|
20
|
+
}
|
|
21
|
+
}
|
|
13
22
|
});
|