@affectively/neural 5.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -0
- package/README.md +32 -0
- package/dist/backend/factory.d.ts +7 -0
- package/dist/backend/factory.js +37 -0
- package/dist/backend/types.d.ts +33 -0
- package/dist/backend/types.js +1 -0
- package/dist/backend/webgpu/index.d.ts +9 -0
- package/dist/backend/webgpu/index.js +37 -0
- package/dist/backend/webnn/index.d.ts +14 -0
- package/dist/backend/webnn/index.js +39 -0
- package/dist/index.d.ts +7 -0
- package/dist/index.js +12 -0
- package/package.json +20 -0
- package/src/backend/factory.ts +43 -0
- package/src/backend/types.ts +39 -0
- package/src/backend/webgpu/index.ts +46 -0
- package/src/backend/webnn/index.ts +55 -0
- package/src/index.ts +14 -0
- package/tsconfig.json +16 -0
- package/verify.ts +22 -0
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
## [4.1.0] - 2026-01-23
|
|
4
|
+
|
|
5
|
+
- Bumped version to align with Dash 4.1.0.
|
|
6
|
+
|
|
7
|
+
## [4.0.0] - 2026-01-23
|
|
8
|
+
|
|
9
|
+
- **Sync Versioning**: Aligned with Dash 4.0.0 release.
|
|
10
|
+
|
|
11
|
+
## [3.0.0] - 2026-01-23
|
|
12
|
+
|
|
13
|
+
### Added
|
|
14
|
+
|
|
15
|
+
- **Initial Release**: Scaffolded `@affectively/neural` package.
|
|
16
|
+
- **Hybrid Backend**: Implemented dual-backend architecture.
|
|
17
|
+
- **WebNN**: Experimental support for NPU acceleration (`WebNNBackend`).
|
|
18
|
+
- **WebGPU**: Baseline support for GPU acceleration (`WebGPUBackend`).
|
|
19
|
+
- **Factory**: Added `getBackend()` with auto-selection logic (WebNN -> WebGPU).
|
package/README.md
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# @affectively/neural (v3.0.0)
|
|
2
|
+
|
|
3
|
+
**The "Transparent Brain" Neural Engine.**
|
|
4
|
+
|
|
5
|
+
`@affectively/neural` is a high-performance, local-first neural graph engine designed to run at native speeds in the browser. It features a hybrid backend architecture that automatically leverages the best available hardware acceleration.
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
|
|
9
|
+
- **Hybrid Compute Engine**:
|
|
10
|
+
- **WebNN (NPU)**: Experimental support for direct Neural Processing Unit access. Use for maximum efficiency and battery life.
|
|
11
|
+
- **WebGPU (GPU)**: Baseline high-performance parallel compute using `navigator.gpu`.
|
|
12
|
+
- **Auto-Selection**: Automatically detects `navigator.ml` and falls back to WebGPU if unavailable.
|
|
13
|
+
- **Zero-Copy Architecture** (Planned): Shared memory buffers between storage (`@affectively/dash`) and compute.
|
|
14
|
+
|
|
15
|
+
## Usage
|
|
16
|
+
|
|
17
|
+
```typescript
|
|
18
|
+
import { NeuralEngine } from "@affectively/neural";
|
|
19
|
+
|
|
20
|
+
// Automatically initializes the best backend (WebNN -> WebGPU)
|
|
21
|
+
const engine = await NeuralEngine.create();
|
|
22
|
+
|
|
23
|
+
// Create a tensor
|
|
24
|
+
const data = new Float32Array([1, 2, 3, 4]);
|
|
25
|
+
const tensor = await engine.createTensor(data, [2, 2]);
|
|
26
|
+
|
|
27
|
+
console.log("Engine running on:", engine.name);
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
## Architecture
|
|
31
|
+
|
|
32
|
+
This package is part of the Neural 2.0 (now 3.0) ecosystem. It handles the _Execution_ layer, while `@affectively/dash` handles the _Representation_ (persistence) layer.
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import { WebGPUBackend } from './webgpu';
|
|
2
|
+
import { WebNNBackend } from './webnn';
|
|
3
|
+
export var BackendType;
|
|
4
|
+
(function (BackendType) {
|
|
5
|
+
BackendType["WebGPU"] = "WebGPU";
|
|
6
|
+
BackendType["WebNN"] = "WebNN";
|
|
7
|
+
BackendType["Auto"] = "Auto";
|
|
8
|
+
})(BackendType || (BackendType = {}));
|
|
9
|
+
export async function getBackend(type = BackendType.Auto) {
|
|
10
|
+
const webnn = new WebNNBackend();
|
|
11
|
+
const webgpu = new WebGPUBackend();
|
|
12
|
+
if (type === BackendType.WebNN) {
|
|
13
|
+
await webnn.initialize();
|
|
14
|
+
return webnn;
|
|
15
|
+
}
|
|
16
|
+
if (type === BackendType.WebGPU) {
|
|
17
|
+
await webgpu.initialize();
|
|
18
|
+
return webgpu;
|
|
19
|
+
}
|
|
20
|
+
// Auto Selection Strategy:
|
|
21
|
+
// 1. Try WebNN (NPU) first for efficiency/speed
|
|
22
|
+
// 2. Fallback to WebGPU
|
|
23
|
+
// 3. (Future) Fallback to WASM
|
|
24
|
+
if (await webnn.isSupported()) {
|
|
25
|
+
try {
|
|
26
|
+
console.log('Neural 3.0: Attempting WebNN initialization...');
|
|
27
|
+
await webnn.initialize();
|
|
28
|
+
return webnn;
|
|
29
|
+
}
|
|
30
|
+
catch (err) {
|
|
31
|
+
console.warn('Neural 3.0: WebNN failed to initialize, falling back to WebGPU', err);
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
console.log('Neural 3.0: initializing WebGPU...');
|
|
35
|
+
await webgpu.initialize();
|
|
36
|
+
return webgpu;
|
|
37
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Represents a tensor on the compute device.
|
|
3
|
+
*/
|
|
4
|
+
export interface Tensor {
|
|
5
|
+
/** Raw data (if downloaded to CPU) */
|
|
6
|
+
data?: Float32Array;
|
|
7
|
+
/** Shape of the tensor */
|
|
8
|
+
shape: number[];
|
|
9
|
+
/**
|
|
10
|
+
* The underlying buffer on the device.
|
|
11
|
+
* - WebGPU: GPUBuffer
|
|
12
|
+
* - WebNN: MLOperand
|
|
13
|
+
*/
|
|
14
|
+
buffer?: any;
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Abstract interface for a compute backend (WebGPU or WebNN).
|
|
18
|
+
*/
|
|
19
|
+
export interface ComputeBackend {
|
|
20
|
+
name: string;
|
|
21
|
+
/**
|
|
22
|
+
* Checks if this backend is supported in the current environment.
|
|
23
|
+
*/
|
|
24
|
+
isSupported(): Promise<boolean>;
|
|
25
|
+
/**
|
|
26
|
+
* Initializes the backend context.
|
|
27
|
+
*/
|
|
28
|
+
initialize(): Promise<void>;
|
|
29
|
+
/**
|
|
30
|
+
* Creates a tensor from the given data.
|
|
31
|
+
*/
|
|
32
|
+
createTensor(data: Float32Array, shape: number[]): Promise<Tensor>;
|
|
33
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { ComputeBackend, Tensor } from '../types';
|
|
2
|
+
export declare class WebGPUBackend implements ComputeBackend {
|
|
3
|
+
name: string;
|
|
4
|
+
private adapter;
|
|
5
|
+
private device;
|
|
6
|
+
isSupported(): Promise<boolean>;
|
|
7
|
+
initialize(): Promise<void>;
|
|
8
|
+
createTensor(data: Float32Array, shape: number[]): Promise<Tensor>;
|
|
9
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
export class WebGPUBackend {
|
|
2
|
+
name = 'WebGPU';
|
|
3
|
+
adapter = null;
|
|
4
|
+
device = null;
|
|
5
|
+
async isSupported() {
|
|
6
|
+
return typeof navigator !== 'undefined' && 'gpu' in navigator;
|
|
7
|
+
}
|
|
8
|
+
async initialize() {
|
|
9
|
+
if (!await this.isSupported()) {
|
|
10
|
+
throw new Error('WebGPU is not supported');
|
|
11
|
+
}
|
|
12
|
+
this.adapter = await navigator.gpu.requestAdapter();
|
|
13
|
+
if (!this.adapter) {
|
|
14
|
+
throw new Error('Failed to request WebGPU adapter');
|
|
15
|
+
}
|
|
16
|
+
this.device = await this.adapter.requestDevice();
|
|
17
|
+
console.log(`WebGPU initialized: ${this.adapter.info.vendor} ${this.adapter.info.architecture}`);
|
|
18
|
+
}
|
|
19
|
+
async createTensor(data, shape) {
|
|
20
|
+
if (!this.device)
|
|
21
|
+
throw new Error('WebGPUBackend not initialized');
|
|
22
|
+
// Create a buffer for the tensor on the GPU
|
|
23
|
+
const buffer = this.device.createBuffer({
|
|
24
|
+
size: data.byteLength,
|
|
25
|
+
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC,
|
|
26
|
+
mappedAtCreation: true
|
|
27
|
+
});
|
|
28
|
+
// Write data to the mapped buffer
|
|
29
|
+
new Float32Array(buffer.getMappedRange()).set(data);
|
|
30
|
+
buffer.unmap();
|
|
31
|
+
return {
|
|
32
|
+
data, // Keep a CPU copy for now for debugging
|
|
33
|
+
shape,
|
|
34
|
+
buffer
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { ComputeBackend, Tensor } from '../types';
|
|
2
|
+
declare global {
|
|
3
|
+
interface Navigator {
|
|
4
|
+
ml: any;
|
|
5
|
+
}
|
|
6
|
+
}
|
|
7
|
+
export declare class WebNNBackend implements ComputeBackend {
|
|
8
|
+
name: string;
|
|
9
|
+
private context;
|
|
10
|
+
private builder;
|
|
11
|
+
isSupported(): Promise<boolean>;
|
|
12
|
+
initialize(): Promise<void>;
|
|
13
|
+
createTensor(data: Float32Array, shape: number[]): Promise<Tensor>;
|
|
14
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
export class WebNNBackend {
|
|
2
|
+
name = 'WebNN';
|
|
3
|
+
context = null;
|
|
4
|
+
builder = null;
|
|
5
|
+
async isSupported() {
|
|
6
|
+
return typeof navigator !== 'undefined' && 'ml' in navigator;
|
|
7
|
+
}
|
|
8
|
+
async initialize() {
|
|
9
|
+
if (!await this.isSupported()) {
|
|
10
|
+
throw new Error('WebNN is not supported');
|
|
11
|
+
}
|
|
12
|
+
try {
|
|
13
|
+
// 1. Create Context (Access to NPU/GPU/CPU)
|
|
14
|
+
this.context = await navigator.ml.createContext();
|
|
15
|
+
// 2. Create Graph Builder (Use this to construct the compute graph)
|
|
16
|
+
this.builder = new window.MLGraphBuilder(this.context);
|
|
17
|
+
console.log('WebNN: Context initialized successfully');
|
|
18
|
+
}
|
|
19
|
+
catch (err) {
|
|
20
|
+
console.error('WebNN: Initialization failed', err);
|
|
21
|
+
throw err;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
async createTensor(data, shape) {
|
|
25
|
+
if (!this.context)
|
|
26
|
+
throw new Error('WebNNBackend not initialized');
|
|
27
|
+
// In WebNN, we often bind data at execution time rather than uploading strictly beforehand
|
|
28
|
+
// But for the abstraction, we can treat constant data as inputs.
|
|
29
|
+
// For now, return the CPU data, and we will bind it as an MLBuffer or Input/Constant during graph execution.
|
|
30
|
+
// Note: WebNN currently deals in Operations.
|
|
31
|
+
// Data is usually an input to the `compute` method.
|
|
32
|
+
// We will store the Float32Array as the "buffer" for now.
|
|
33
|
+
return {
|
|
34
|
+
data,
|
|
35
|
+
shape,
|
|
36
|
+
buffer: data // Holding the reference for now
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
export * from './backend/types';
|
|
2
|
+
export * from './backend/factory';
|
|
3
|
+
export * from './backend/webgpu';
|
|
4
|
+
export * from './backend/webnn';
|
|
5
|
+
// Re-export specific classes if needed for direct access
|
|
6
|
+
import { getBackend } from './backend/factory';
|
|
7
|
+
export class NeuralEngine {
|
|
8
|
+
// Placeholder for the high-level engine that uses the backend
|
|
9
|
+
static async create() {
|
|
10
|
+
return getBackend();
|
|
11
|
+
}
|
|
12
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@affectively/neural",
|
|
3
|
+
"version": "5.0.0",
|
|
4
|
+
"publishConfig": {
|
|
5
|
+
"access": "public"
|
|
6
|
+
},
|
|
7
|
+
"main": "dist/index.js",
|
|
8
|
+
"module": "dist/index.js",
|
|
9
|
+
"types": "dist/index.d.ts",
|
|
10
|
+
"scripts": {
|
|
11
|
+
"build": "tsc"
|
|
12
|
+
},
|
|
13
|
+
"peerDependencies": {
|
|
14
|
+
"typescript": "^5.0.0"
|
|
15
|
+
},
|
|
16
|
+
"devDependencies": {
|
|
17
|
+
"@webgpu/types": "^0.1.69",
|
|
18
|
+
"typescript": "^5.0.0"
|
|
19
|
+
}
|
|
20
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import { ComputeBackend } from './types';
|
|
2
|
+
import { WebGPUBackend } from './webgpu';
|
|
3
|
+
import { WebNNBackend } from './webnn';
|
|
4
|
+
|
|
5
|
+
export enum BackendType {
|
|
6
|
+
WebGPU = 'WebGPU',
|
|
7
|
+
WebNN = 'WebNN',
|
|
8
|
+
Auto = 'Auto'
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export async function getBackend(type: BackendType = BackendType.Auto): Promise<ComputeBackend> {
|
|
12
|
+
const webnn = new WebNNBackend();
|
|
13
|
+
const webgpu = new WebGPUBackend();
|
|
14
|
+
|
|
15
|
+
if (type === BackendType.WebNN) {
|
|
16
|
+
await webnn.initialize();
|
|
17
|
+
return webnn;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
if (type === BackendType.WebGPU) {
|
|
21
|
+
await webgpu.initialize();
|
|
22
|
+
return webgpu;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
// Auto Selection Strategy:
|
|
26
|
+
// 1. Try WebNN (NPU) first for efficiency/speed
|
|
27
|
+
// 2. Fallback to WebGPU
|
|
28
|
+
// 3. (Future) Fallback to WASM
|
|
29
|
+
|
|
30
|
+
if (await webnn.isSupported()) {
|
|
31
|
+
try {
|
|
32
|
+
console.log('Neural 3.0: Attempting WebNN initialization...');
|
|
33
|
+
await webnn.initialize();
|
|
34
|
+
return webnn;
|
|
35
|
+
} catch (err) {
|
|
36
|
+
console.warn('Neural 3.0: WebNN failed to initialize, falling back to WebGPU', err);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
console.log('Neural 3.0: initializing WebGPU...');
|
|
41
|
+
await webgpu.initialize();
|
|
42
|
+
return webgpu;
|
|
43
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Represents a tensor on the compute device.
|
|
3
|
+
*/
|
|
4
|
+
export interface Tensor {
|
|
5
|
+
/** Raw data (if downloaded to CPU) */
|
|
6
|
+
data?: Float32Array;
|
|
7
|
+
/** Shape of the tensor */
|
|
8
|
+
shape: number[];
|
|
9
|
+
/**
|
|
10
|
+
* The underlying buffer on the device.
|
|
11
|
+
* - WebGPU: GPUBuffer
|
|
12
|
+
* - WebNN: MLOperand
|
|
13
|
+
*/
|
|
14
|
+
buffer?: any;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Abstract interface for a compute backend (WebGPU or WebNN).
|
|
19
|
+
*/
|
|
20
|
+
export interface ComputeBackend {
|
|
21
|
+
name: string;
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Checks if this backend is supported in the current environment.
|
|
25
|
+
*/
|
|
26
|
+
isSupported(): Promise<boolean>;
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Initializes the backend context.
|
|
30
|
+
*/
|
|
31
|
+
initialize(): Promise<void>;
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Creates a tensor from the given data.
|
|
35
|
+
*/
|
|
36
|
+
createTensor(data: Float32Array, shape: number[]): Promise<Tensor>;
|
|
37
|
+
|
|
38
|
+
// Future operators will be added here (matmul, add, etc.)
|
|
39
|
+
}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import { ComputeBackend, Tensor } from '../types';
|
|
2
|
+
|
|
3
|
+
export class WebGPUBackend implements ComputeBackend {
|
|
4
|
+
name = 'WebGPU';
|
|
5
|
+
private adapter: GPUAdapter | null = null;
|
|
6
|
+
private device: GPUDevice | null = null;
|
|
7
|
+
|
|
8
|
+
async isSupported(): Promise<boolean> {
|
|
9
|
+
return typeof navigator !== 'undefined' && 'gpu' in navigator;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
async initialize(): Promise<void> {
|
|
13
|
+
if (!await this.isSupported()) {
|
|
14
|
+
throw new Error('WebGPU is not supported');
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
this.adapter = await navigator.gpu.requestAdapter();
|
|
18
|
+
if (!this.adapter) {
|
|
19
|
+
throw new Error('Failed to request WebGPU adapter');
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
this.device = await this.adapter.requestDevice();
|
|
23
|
+
console.log(`WebGPU initialized: ${this.adapter.info.vendor} ${this.adapter.info.architecture}`);
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
async createTensor(data: Float32Array, shape: number[]): Promise<Tensor> {
|
|
27
|
+
if (!this.device) throw new Error('WebGPUBackend not initialized');
|
|
28
|
+
|
|
29
|
+
// Create a buffer for the tensor on the GPU
|
|
30
|
+
const buffer = this.device.createBuffer({
|
|
31
|
+
size: data.byteLength,
|
|
32
|
+
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC,
|
|
33
|
+
mappedAtCreation: true
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
// Write data to the mapped buffer
|
|
37
|
+
new Float32Array(buffer.getMappedRange()).set(data);
|
|
38
|
+
buffer.unmap();
|
|
39
|
+
|
|
40
|
+
return {
|
|
41
|
+
data, // Keep a CPU copy for now for debugging
|
|
42
|
+
shape,
|
|
43
|
+
buffer
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { ComputeBackend, Tensor } from '../types';
|
|
2
|
+
|
|
3
|
+
// Augment the navigator interface for WebNN (types might be missing in default libs)
|
|
4
|
+
declare global {
|
|
5
|
+
interface Navigator {
|
|
6
|
+
ml: any;
|
|
7
|
+
}
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export class WebNNBackend implements ComputeBackend {
|
|
11
|
+
name = 'WebNN';
|
|
12
|
+
private context: any = null;
|
|
13
|
+
private builder: any = null;
|
|
14
|
+
|
|
15
|
+
async isSupported(): Promise<boolean> {
|
|
16
|
+
return typeof navigator !== 'undefined' && 'ml' in navigator;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
async initialize(): Promise<void> {
|
|
20
|
+
if (!await this.isSupported()) {
|
|
21
|
+
throw new Error('WebNN is not supported');
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
try {
|
|
25
|
+
// 1. Create Context (Access to NPU/GPU/CPU)
|
|
26
|
+
this.context = await navigator.ml.createContext();
|
|
27
|
+
|
|
28
|
+
// 2. Create Graph Builder (Use this to construct the compute graph)
|
|
29
|
+
this.builder = new (window as any).MLGraphBuilder(this.context);
|
|
30
|
+
|
|
31
|
+
console.log('WebNN: Context initialized successfully');
|
|
32
|
+
} catch (err) {
|
|
33
|
+
console.error('WebNN: Initialization failed', err);
|
|
34
|
+
throw err;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
async createTensor(data: Float32Array, shape: number[]): Promise<Tensor> {
|
|
39
|
+
if (!this.context) throw new Error('WebNNBackend not initialized');
|
|
40
|
+
|
|
41
|
+
// In WebNN, we often bind data at execution time rather than uploading strictly beforehand
|
|
42
|
+
// But for the abstraction, we can treat constant data as inputs.
|
|
43
|
+
// For now, return the CPU data, and we will bind it as an MLBuffer or Input/Constant during graph execution.
|
|
44
|
+
|
|
45
|
+
// Note: WebNN currently deals in Operations.
|
|
46
|
+
// Data is usually an input to the `compute` method.
|
|
47
|
+
// We will store the Float32Array as the "buffer" for now.
|
|
48
|
+
|
|
49
|
+
return {
|
|
50
|
+
data,
|
|
51
|
+
shape,
|
|
52
|
+
buffer: data // Holding the reference for now
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
export * from './backend/types';
|
|
2
|
+
export * from './backend/factory';
|
|
3
|
+
export * from './backend/webgpu';
|
|
4
|
+
export * from './backend/webnn';
|
|
5
|
+
|
|
6
|
+
// Re-export specific classes if needed for direct access
|
|
7
|
+
import { getBackend } from './backend/factory';
|
|
8
|
+
|
|
9
|
+
export class NeuralEngine {
|
|
10
|
+
// Placeholder for the high-level engine that uses the backend
|
|
11
|
+
static async create() {
|
|
12
|
+
return getBackend();
|
|
13
|
+
}
|
|
14
|
+
}
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ESNext",
|
|
4
|
+
"module": "ESNext",
|
|
5
|
+
"moduleResolution": "bundler",
|
|
6
|
+
"declaration": true,
|
|
7
|
+
"outDir": "dist",
|
|
8
|
+
"strict": true,
|
|
9
|
+
"esModuleInterop": true,
|
|
10
|
+
"skipLibCheck": true,
|
|
11
|
+
"forceConsistentCasingInFileNames": true,
|
|
12
|
+
"lib": ["ESNext", "DOM"],
|
|
13
|
+
"types": ["@webgpu/types"]
|
|
14
|
+
},
|
|
15
|
+
"include": ["src/**/*"]
|
|
16
|
+
}
|
package/verify.ts
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { getBackend, BackendType } from './src';
|
|
2
|
+
|
|
3
|
+
async function main() {
|
|
4
|
+
console.log('--- Neural 3.0 Verification ---');
|
|
5
|
+
|
|
6
|
+
// Mock navigator for Node environment testing if needed,
|
|
7
|
+
// but strictly this is meant for browser.
|
|
8
|
+
// We can just check if it compiles for now.
|
|
9
|
+
|
|
10
|
+
try {
|
|
11
|
+
const backend = await getBackend(BackendType.Auto);
|
|
12
|
+
console.log(`Success: Initialized ${backend.name} backend.`);
|
|
13
|
+
} catch (err) {
|
|
14
|
+
if (typeof navigator === 'undefined') {
|
|
15
|
+
console.log('Running in Node - skipping browser-only initialization.');
|
|
16
|
+
} else {
|
|
17
|
+
console.error('Initialization failed:', err);
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
main();
|