@digitaldefiance/node-accelerate 1.0.7 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1271 -165
- package/accelerate.cc +1346 -0
- package/examples/advanced-functions.js +0 -0
- package/examples/data-processing.js +123 -0
- package/examples/mathematical-functions.js +101 -0
- package/examples/ml-pipeline.js +317 -0
- package/examples/signal-processing-advanced.js +98 -0
- package/examples/statistical-operations.js +44 -0
- package/examples/trigonometric-functions.js +52 -0
- package/index.d.ts +720 -0
- package/index.js +636 -0
- package/package.json +12 -3
|
File without changes
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Data Processing Examples
|
|
3
|
+
* Demonstrates clipping, thresholding, interpolation, and matrix operations
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const accelerate = require('..');
|
|
7
|
+
|
|
8
|
+
console.log('=== Data Processing ===\n');
|
|
9
|
+
|
|
10
|
+
// 1. Clipping
|
|
11
|
+
console.log('--- Clipping ---');
|
|
12
|
+
const data = new Float64Array(10);
|
|
13
|
+
for (let i = 0; i < 10; i++) {
|
|
14
|
+
data[i] = (i - 5) * 10; // -50 to 40
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
const clipped = new Float64Array(10);
|
|
18
|
+
accelerate.vclip(data, clipped, -20, 20);
|
|
19
|
+
|
|
20
|
+
console.log('Original:', Array.from(data));
|
|
21
|
+
console.log('Clipped [-20, 20]:', Array.from(clipped));
|
|
22
|
+
|
|
23
|
+
// 2. Thresholding
|
|
24
|
+
console.log('\n--- Thresholding ---');
|
|
25
|
+
const signal = new Float64Array(10);
|
|
26
|
+
for (let i = 0; i < 10; i++) {
|
|
27
|
+
signal[i] = Math.random() * 100;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
const thresholded = new Float64Array(10);
|
|
31
|
+
accelerate.vthreshold(signal, thresholded, 50);
|
|
32
|
+
|
|
33
|
+
console.log('Original:', Array.from(signal).map(x => x.toFixed(1)));
|
|
34
|
+
console.log('Threshold > 50:', Array.from(thresholded).map(x => x.toFixed(1)));
|
|
35
|
+
|
|
36
|
+
// 3. Vector Reversal
|
|
37
|
+
console.log('\n--- Vector Reversal ---');
|
|
38
|
+
const sequence = new Float64Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
|
|
39
|
+
const reversed = new Float64Array(10);
|
|
40
|
+
|
|
41
|
+
accelerate.vreverse(sequence, reversed);
|
|
42
|
+
|
|
43
|
+
console.log('Original:', Array.from(sequence));
|
|
44
|
+
console.log('Reversed:', Array.from(reversed));
|
|
45
|
+
|
|
46
|
+
// 4. Matrix Transpose
|
|
47
|
+
console.log('\n--- Matrix Transpose ---');
|
|
48
|
+
const rows = 3, cols = 4;
|
|
49
|
+
const matrix = new Float64Array(rows * cols);
|
|
50
|
+
|
|
51
|
+
// Fill matrix
|
|
52
|
+
for (let i = 0; i < rows; i++) {
|
|
53
|
+
for (let j = 0; j < cols; j++) {
|
|
54
|
+
matrix[i * cols + j] = i * cols + j + 1;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const transposed = new Float64Array(rows * cols);
|
|
59
|
+
accelerate.transpose(matrix, transposed, rows, cols);
|
|
60
|
+
|
|
61
|
+
console.log('Original matrix (3×4):');
|
|
62
|
+
for (let i = 0; i < rows; i++) {
|
|
63
|
+
const row = [];
|
|
64
|
+
for (let j = 0; j < cols; j++) {
|
|
65
|
+
row.push(matrix[i * cols + j].toFixed(0).padStart(3));
|
|
66
|
+
}
|
|
67
|
+
console.log(' [' + row.join(' ') + ']');
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
console.log('Transposed matrix (4×3):');
|
|
71
|
+
for (let i = 0; i < cols; i++) {
|
|
72
|
+
const row = [];
|
|
73
|
+
for (let j = 0; j < rows; j++) {
|
|
74
|
+
row.push(transposed[i * rows + j].toFixed(0).padStart(3));
|
|
75
|
+
}
|
|
76
|
+
console.log(' [' + row.join(' ') + ']');
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// 5. Linear Interpolation
|
|
80
|
+
console.log('\n--- Linear Interpolation ---');
|
|
81
|
+
const xData = new Float64Array([0, 1, 2, 3, 4]);
|
|
82
|
+
const yData = new Float64Array([0, 1, 4, 9, 16]); // y = x²
|
|
83
|
+
|
|
84
|
+
const xiData = new Float64Array([0.5, 1.5, 2.5, 3.5]);
|
|
85
|
+
const yiData = new Float64Array(4);
|
|
86
|
+
|
|
87
|
+
accelerate.interp1d(xData, yData, xiData, yiData);
|
|
88
|
+
|
|
89
|
+
console.log('Known points (x, y):');
|
|
90
|
+
for (let i = 0; i < xData.length; i++) {
|
|
91
|
+
console.log(` (${xData[i]}, ${yData[i]})`);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
console.log('Interpolated points:');
|
|
95
|
+
for (let i = 0; i < xiData.length; i++) {
|
|
96
|
+
console.log(` x=${xiData[i]} → y=${yiData[i].toFixed(2)}`);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// 6. Data Normalization Pipeline
|
|
100
|
+
console.log('\n--- Data Normalization Pipeline ---');
|
|
101
|
+
const rawData = new Float64Array(100);
|
|
102
|
+
for (let i = 0; i < 100; i++) {
|
|
103
|
+
rawData[i] = Math.random() * 200 - 100; // -100 to 100
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// Step 1: Clip outliers
|
|
107
|
+
const clippedData = new Float64Array(100);
|
|
108
|
+
accelerate.vclip(rawData, clippedData, -50, 50);
|
|
109
|
+
|
|
110
|
+
// Step 2: Take absolute value
|
|
111
|
+
const absData = new Float64Array(100);
|
|
112
|
+
accelerate.vabs(clippedData, absData);
|
|
113
|
+
|
|
114
|
+
// Step 3: Normalize to [0, 1]
|
|
115
|
+
const maxVal = accelerate.max(absData);
|
|
116
|
+
const normalized = new Float64Array(100);
|
|
117
|
+
accelerate.vscale(absData, 1.0 / maxVal, normalized);
|
|
118
|
+
|
|
119
|
+
console.log('Pipeline statistics:');
|
|
120
|
+
console.log(' Raw data range:', [accelerate.min(rawData).toFixed(2), accelerate.max(rawData).toFixed(2)]);
|
|
121
|
+
console.log(' After clipping:', [accelerate.min(clippedData).toFixed(2), accelerate.max(clippedData).toFixed(2)]);
|
|
122
|
+
console.log(' After abs:', [accelerate.min(absData).toFixed(2), accelerate.max(absData).toFixed(2)]);
|
|
123
|
+
console.log(' After normalization:', [accelerate.min(normalized).toFixed(2), accelerate.max(normalized).toFixed(2)]);
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Mathematical Functions Examples
|
|
3
|
+
* Demonstrates exp, log, power, and other math operations
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const accelerate = require('..');
|
|
7
|
+
|
|
8
|
+
console.log('=== Mathematical Functions ===\n');
|
|
9
|
+
|
|
10
|
+
const n = 1000;
|
|
11
|
+
|
|
12
|
+
// 1. Exponential and Logarithm
|
|
13
|
+
console.log('--- Exponential and Logarithm ---');
|
|
14
|
+
const x = new Float64Array(n);
|
|
15
|
+
const expX = new Float64Array(n);
|
|
16
|
+
const logExpX = new Float64Array(n);
|
|
17
|
+
|
|
18
|
+
for (let i = 0; i < n; i++) {
|
|
19
|
+
x[i] = i / 100; // 0 to 10
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
accelerate.vexp(x, expX);
|
|
23
|
+
accelerate.vlog(expX, logExpX);
|
|
24
|
+
|
|
25
|
+
// Verify log(exp(x)) = x
|
|
26
|
+
let maxError = 0;
|
|
27
|
+
for (let i = 0; i < n; i++) {
|
|
28
|
+
const error = Math.abs(x[i] - logExpX[i]);
|
|
29
|
+
if (error > maxError) maxError = error;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
console.log('log(exp(x)) = x verification');
|
|
33
|
+
console.log('Max error:', maxError.toFixed(10));
|
|
34
|
+
|
|
35
|
+
// 2. Power functions
|
|
36
|
+
console.log('\n--- Power Functions ---');
|
|
37
|
+
const base = new Float64Array(100);
|
|
38
|
+
const exponent = new Float64Array(100);
|
|
39
|
+
const result = new Float64Array(100);
|
|
40
|
+
|
|
41
|
+
for (let i = 0; i < 100; i++) {
|
|
42
|
+
base[i] = i / 10 + 1; // 1 to 11
|
|
43
|
+
exponent[i] = 2; // Square everything
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
accelerate.vpow(base, exponent, result);
|
|
47
|
+
|
|
48
|
+
console.log('First 5 values squared:');
|
|
49
|
+
for (let i = 0; i < 5; i++) {
|
|
50
|
+
console.log(` ${base[i].toFixed(1)}^2 = ${result[i].toFixed(2)}`);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// 3. Square root verification
|
|
54
|
+
console.log('\n--- Square Root ---');
|
|
55
|
+
const squares = new Float64Array(100);
|
|
56
|
+
const sqrtResult = new Float64Array(100);
|
|
57
|
+
|
|
58
|
+
for (let i = 0; i < 100; i++) {
|
|
59
|
+
squares[i] = i * i;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
accelerate.vsqrt(squares, sqrtResult);
|
|
63
|
+
|
|
64
|
+
console.log('sqrt(x²) = x verification:');
|
|
65
|
+
console.log(' sqrt(25) =', sqrtResult[5].toFixed(4));
|
|
66
|
+
console.log(' sqrt(100) =', sqrtResult[10].toFixed(4));
|
|
67
|
+
console.log(' sqrt(10000) =', sqrtResult[100-1].toFixed(4));
|
|
68
|
+
|
|
69
|
+
// 4. Log base 10
|
|
70
|
+
console.log('\n--- Logarithm Base 10 ---');
|
|
71
|
+
const powers10 = new Float64Array([1, 10, 100, 1000, 10000]);
|
|
72
|
+
const log10Result = new Float64Array(5);
|
|
73
|
+
|
|
74
|
+
accelerate.vlog10(powers10, log10Result);
|
|
75
|
+
|
|
76
|
+
console.log('log10 of powers of 10:');
|
|
77
|
+
for (let i = 0; i < 5; i++) {
|
|
78
|
+
console.log(` log10(${powers10[i]}) = ${log10Result[i].toFixed(4)}`);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// 5. Absolute value
|
|
82
|
+
console.log('\n--- Absolute Value ---');
|
|
83
|
+
const mixed = new Float64Array([-5, -3, -1, 0, 1, 3, 5]);
|
|
84
|
+
const absResult = new Float64Array(7);
|
|
85
|
+
|
|
86
|
+
accelerate.vabs(mixed, absResult);
|
|
87
|
+
|
|
88
|
+
console.log('Absolute values:');
|
|
89
|
+
console.log(' Input:', Array.from(mixed));
|
|
90
|
+
console.log(' Output:', Array.from(absResult));
|
|
91
|
+
|
|
92
|
+
// 6. Negation
|
|
93
|
+
console.log('\n--- Negation ---');
|
|
94
|
+
const positive = new Float64Array([1, 2, 3, 4, 5]);
|
|
95
|
+
const negative = new Float64Array(5);
|
|
96
|
+
|
|
97
|
+
accelerate.vneg(positive, negative);
|
|
98
|
+
|
|
99
|
+
console.log('Negation:');
|
|
100
|
+
console.log(' Input:', Array.from(positive));
|
|
101
|
+
console.log(' Output:', Array.from(negative));
|
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Complete Machine Learning Pipeline Example
|
|
3
|
+
* Demonstrates data preprocessing, neural network inference, and evaluation
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const accelerate = require('..');
|
|
7
|
+
|
|
8
|
+
console.log('=== Machine Learning Pipeline ===\n');
|
|
9
|
+
|
|
10
|
+
// ============================================================================
|
|
11
|
+
// 1. DATA PREPROCESSING
|
|
12
|
+
// ============================================================================
|
|
13
|
+
|
|
14
|
+
console.log('--- Step 1: Data Preprocessing ---');
|
|
15
|
+
|
|
16
|
+
// Generate synthetic dataset
|
|
17
|
+
const numSamples = 1000;
|
|
18
|
+
const numFeatures = 10;
|
|
19
|
+
const rawData = new Float64Array(numSamples * numFeatures);
|
|
20
|
+
|
|
21
|
+
for (let i = 0; i < rawData.length; i++) {
|
|
22
|
+
rawData[i] = Math.random() * 200 - 100; // Range: -100 to 100
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
console.log('Raw data shape:', [numSamples, numFeatures]);
|
|
26
|
+
console.log('Raw data range:', [
|
|
27
|
+
accelerate.min(rawData).toFixed(2),
|
|
28
|
+
accelerate.max(rawData).toFixed(2)
|
|
29
|
+
]);
|
|
30
|
+
|
|
31
|
+
// Step 1a: Clip outliers
|
|
32
|
+
const clipped = new Float64Array(rawData.length);
|
|
33
|
+
accelerate.vclip(rawData, clipped, -50, 50);
|
|
34
|
+
|
|
35
|
+
// Step 1b: Standardization (z-score normalization)
|
|
36
|
+
function standardize(data, output) {
|
|
37
|
+
const mean = accelerate.mean(data);
|
|
38
|
+
const std = accelerate.stddev(data);
|
|
39
|
+
|
|
40
|
+
for (let i = 0; i < data.length; i++) {
|
|
41
|
+
output[i] = (data[i] - mean) / std;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
return output;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
const normalized = new Float64Array(clipped.length);
|
|
48
|
+
standardize(clipped, normalized);
|
|
49
|
+
|
|
50
|
+
console.log('After preprocessing:');
|
|
51
|
+
console.log(' Mean:', accelerate.mean(normalized).toFixed(6));
|
|
52
|
+
console.log(' Std Dev:', accelerate.stddev(normalized).toFixed(6));
|
|
53
|
+
console.log(' Range:', [
|
|
54
|
+
accelerate.min(normalized).toFixed(2),
|
|
55
|
+
accelerate.max(normalized).toFixed(2)
|
|
56
|
+
]);
|
|
57
|
+
|
|
58
|
+
// ============================================================================
|
|
59
|
+
// 2. NEURAL NETWORK INFERENCE
|
|
60
|
+
// ============================================================================
|
|
61
|
+
|
|
62
|
+
console.log('\n--- Step 2: Neural Network Inference ---');
|
|
63
|
+
|
|
64
|
+
// Network architecture: 10 -> 64 -> 32 -> 3 (classification)
|
|
65
|
+
const layer1Weights = new Float64Array(10 * 64);
|
|
66
|
+
const layer1Bias = new Float64Array(64);
|
|
67
|
+
const layer2Weights = new Float64Array(64 * 32);
|
|
68
|
+
const layer2Bias = new Float64Array(32);
|
|
69
|
+
const layer3Weights = new Float64Array(32 * 3);
|
|
70
|
+
const layer3Bias = new Float64Array(3);
|
|
71
|
+
|
|
72
|
+
// Initialize with random weights (Xavier initialization)
|
|
73
|
+
function initializeWeights(weights, fanIn, fanOut) {
|
|
74
|
+
const limit = Math.sqrt(6.0 / (fanIn + fanOut));
|
|
75
|
+
for (let i = 0; i < weights.length; i++) {
|
|
76
|
+
weights[i] = (Math.random() * 2 - 1) * limit;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
initializeWeights(layer1Weights, 10, 64);
|
|
81
|
+
initializeWeights(layer2Weights, 64, 32);
|
|
82
|
+
initializeWeights(layer3Weights, 32, 3);
|
|
83
|
+
|
|
84
|
+
// ReLU activation
|
|
85
|
+
function relu(input, output) {
|
|
86
|
+
accelerate.vclip(input, output, 0, Infinity);
|
|
87
|
+
return output;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Softmax activation
|
|
91
|
+
function softmax(logits, output) {
|
|
92
|
+
// Subtract max for numerical stability
|
|
93
|
+
const maxVal = accelerate.max(logits);
|
|
94
|
+
const shifted = new Float64Array(logits.length);
|
|
95
|
+
|
|
96
|
+
for (let i = 0; i < logits.length; i++) {
|
|
97
|
+
shifted[i] = logits[i] - maxVal;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Compute exp
|
|
101
|
+
accelerate.vexp(shifted, output);
|
|
102
|
+
|
|
103
|
+
// Normalize
|
|
104
|
+
const sum = accelerate.sum(output);
|
|
105
|
+
accelerate.vscale(output, 1.0 / sum, output);
|
|
106
|
+
|
|
107
|
+
return output;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Forward pass for one sample
|
|
111
|
+
function forward(input) {
|
|
112
|
+
// Layer 1: input (10) -> hidden1 (64)
|
|
113
|
+
const hidden1 = new Float64Array(64);
|
|
114
|
+
accelerate.matvec(layer1Weights, input, hidden1, 64, 10);
|
|
115
|
+
accelerate.vadd(hidden1, layer1Bias, hidden1);
|
|
116
|
+
relu(hidden1, hidden1);
|
|
117
|
+
|
|
118
|
+
// Layer 2: hidden1 (64) -> hidden2 (32)
|
|
119
|
+
const hidden2 = new Float64Array(32);
|
|
120
|
+
accelerate.matvec(layer2Weights, hidden1, hidden2, 32, 64);
|
|
121
|
+
accelerate.vadd(hidden2, layer2Bias, hidden2);
|
|
122
|
+
relu(hidden2, hidden2);
|
|
123
|
+
|
|
124
|
+
// Layer 3: hidden2 (32) -> output (3)
|
|
125
|
+
const logits = new Float64Array(3);
|
|
126
|
+
accelerate.matvec(layer3Weights, hidden2, logits, 3, 32);
|
|
127
|
+
accelerate.vadd(logits, layer3Bias, logits);
|
|
128
|
+
|
|
129
|
+
// Softmax
|
|
130
|
+
const probs = new Float64Array(3);
|
|
131
|
+
softmax(logits, probs);
|
|
132
|
+
|
|
133
|
+
return probs;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// Run inference on first sample
|
|
137
|
+
const sampleInput = normalized.subarray(0, 10);
|
|
138
|
+
const predictions = forward(sampleInput);
|
|
139
|
+
|
|
140
|
+
console.log('Network architecture: 10 -> 64 -> 32 -> 3');
|
|
141
|
+
console.log('Sample prediction:', Array.from(predictions).map(x => x.toFixed(4)));
|
|
142
|
+
console.log('Predicted class:', predictions.indexOf(accelerate.max(predictions)));
|
|
143
|
+
|
|
144
|
+
// Batch inference
|
|
145
|
+
console.log('\nRunning batch inference...');
|
|
146
|
+
console.time('Batch inference (1000 samples)');
|
|
147
|
+
|
|
148
|
+
const allPredictions = [];
|
|
149
|
+
for (let i = 0; i < numSamples; i++) {
|
|
150
|
+
const input = normalized.subarray(i * numFeatures, (i + 1) * numFeatures);
|
|
151
|
+
const pred = forward(input);
|
|
152
|
+
allPredictions.push(Array.from(pred));
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
console.timeEnd('Batch inference (1000 samples)');
|
|
156
|
+
|
|
157
|
+
// ============================================================================
|
|
158
|
+
// 3. FEATURE ENGINEERING
|
|
159
|
+
// ============================================================================
|
|
160
|
+
|
|
161
|
+
console.log('\n--- Step 3: Feature Engineering ---');
|
|
162
|
+
|
|
163
|
+
// Compute polynomial features (x^2)
|
|
164
|
+
const squaredFeatures = new Float64Array(normalized.length);
|
|
165
|
+
accelerate.vsquare(normalized, squaredFeatures);
|
|
166
|
+
|
|
167
|
+
// Compute interaction features (for first two features)
|
|
168
|
+
const feature1 = normalized.subarray(0, numSamples);
|
|
169
|
+
const feature2 = normalized.subarray(numSamples, numSamples * 2);
|
|
170
|
+
const interaction = new Float64Array(numSamples);
|
|
171
|
+
accelerate.vmul(feature1, feature2, interaction);
|
|
172
|
+
|
|
173
|
+
console.log('Original features:', numFeatures);
|
|
174
|
+
console.log('Added squared features:', numFeatures);
|
|
175
|
+
console.log('Added interaction features: 1');
|
|
176
|
+
console.log('Total features:', numFeatures * 2 + 1);
|
|
177
|
+
|
|
178
|
+
// ============================================================================
|
|
179
|
+
// 4. DISTANCE-BASED METHODS
|
|
180
|
+
// ============================================================================
|
|
181
|
+
|
|
182
|
+
console.log('\n--- Step 4: Distance Computations ---');
|
|
183
|
+
|
|
184
|
+
// K-nearest neighbors: find distance to first sample
|
|
185
|
+
const query = normalized.subarray(0, numFeatures);
|
|
186
|
+
const distances = [];
|
|
187
|
+
|
|
188
|
+
for (let i = 1; i < Math.min(100, numSamples); i++) {
|
|
189
|
+
const sample = normalized.subarray(i * numFeatures, (i + 1) * numFeatures);
|
|
190
|
+
const dist = accelerate.euclidean(query, sample);
|
|
191
|
+
distances.push({ index: i, distance: dist });
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
// Sort by distance
|
|
195
|
+
distances.sort((a, b) => a.distance - b.distance);
|
|
196
|
+
|
|
197
|
+
console.log('K-Nearest Neighbors (k=5):');
|
|
198
|
+
for (let i = 0; i < 5; i++) {
|
|
199
|
+
console.log(` Neighbor ${i + 1}: sample ${distances[i].index}, distance ${distances[i].distance.toFixed(4)}`);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// ============================================================================
|
|
203
|
+
// 5. DIMENSIONALITY REDUCTION (PCA-like projection)
|
|
204
|
+
// ============================================================================
|
|
205
|
+
|
|
206
|
+
console.log('\n--- Step 5: Dimensionality Reduction ---');
|
|
207
|
+
|
|
208
|
+
// Simple random projection (approximates PCA)
|
|
209
|
+
const projectionMatrix = new Float64Array(numFeatures * 2); // Project to 2D
|
|
210
|
+
initializeWeights(projectionMatrix, numFeatures, 2);
|
|
211
|
+
|
|
212
|
+
// Normalize projection matrix columns
|
|
213
|
+
for (let col = 0; col < 2; col++) {
|
|
214
|
+
const column = new Float64Array(numFeatures);
|
|
215
|
+
for (let row = 0; row < numFeatures; row++) {
|
|
216
|
+
column[row] = projectionMatrix[row * 2 + col];
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
const normalized = new Float64Array(numFeatures);
|
|
220
|
+
accelerate.normalize(column, normalized);
|
|
221
|
+
|
|
222
|
+
for (let row = 0; row < numFeatures; row++) {
|
|
223
|
+
projectionMatrix[row * 2 + col] = normalized[row];
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
// Project first sample
|
|
228
|
+
const projected = new Float64Array(2);
|
|
229
|
+
accelerate.matvec(projectionMatrix, sampleInput, projected, 2, numFeatures);
|
|
230
|
+
|
|
231
|
+
console.log('Original dimensions:', numFeatures);
|
|
232
|
+
console.log('Reduced dimensions: 2');
|
|
233
|
+
console.log('Sample projection:', Array.from(projected).map(x => x.toFixed(4)));
|
|
234
|
+
|
|
235
|
+
// ============================================================================
|
|
236
|
+
// 6. SIGNAL PROCESSING FOR TIME SERIES
|
|
237
|
+
// ============================================================================
|
|
238
|
+
|
|
239
|
+
console.log('\n--- Step 6: Time Series Analysis ---');
|
|
240
|
+
|
|
241
|
+
// Generate synthetic time series
|
|
242
|
+
const timeSeriesLength = 1024;
|
|
243
|
+
const timeSeries = new Float64Array(timeSeriesLength);
|
|
244
|
+
|
|
245
|
+
for (let i = 0; i < timeSeriesLength; i++) {
|
|
246
|
+
// Mix of frequencies
|
|
247
|
+
timeSeries[i] =
|
|
248
|
+
Math.sin(2 * Math.PI * 5 * i / timeSeriesLength) +
|
|
249
|
+
0.5 * Math.sin(2 * Math.PI * 10 * i / timeSeriesLength) +
|
|
250
|
+
0.3 * Math.sin(2 * Math.PI * 20 * i / timeSeriesLength);
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// Apply Hanning window
|
|
254
|
+
const window = accelerate.hanning(timeSeriesLength);
|
|
255
|
+
const windowed = new Float64Array(timeSeriesLength);
|
|
256
|
+
accelerate.vmul(timeSeries, window, windowed);
|
|
257
|
+
|
|
258
|
+
// Compute FFT
|
|
259
|
+
const spectrum = accelerate.fft(windowed);
|
|
260
|
+
|
|
261
|
+
// Find dominant frequencies
|
|
262
|
+
const magnitudes = new Float64Array(spectrum.real.length);
|
|
263
|
+
for (let i = 0; i < magnitudes.length; i++) {
|
|
264
|
+
magnitudes[i] = Math.sqrt(spectrum.real[i]**2 + spectrum.imag[i]**2);
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
// Find peaks
|
|
268
|
+
const peaks = [];
|
|
269
|
+
for (let i = 1; i < magnitudes.length - 1; i++) {
|
|
270
|
+
if (magnitudes[i] > magnitudes[i-1] && magnitudes[i] > magnitudes[i+1] && magnitudes[i] > 10) {
|
|
271
|
+
peaks.push({ bin: i, magnitude: magnitudes[i] });
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
peaks.sort((a, b) => b.magnitude - a.magnitude);
|
|
276
|
+
|
|
277
|
+
console.log('Time series length:', timeSeriesLength);
|
|
278
|
+
console.log('Dominant frequency bins:', peaks.slice(0, 3).map(p => p.bin));
|
|
279
|
+
|
|
280
|
+
// ============================================================================
|
|
281
|
+
// 7. PERFORMANCE SUMMARY
|
|
282
|
+
// ============================================================================
|
|
283
|
+
|
|
284
|
+
console.log('\n--- Performance Summary ---');
|
|
285
|
+
|
|
286
|
+
// Benchmark key operations
|
|
287
|
+
const benchSize = 10000;
|
|
288
|
+
const benchVec1 = new Float64Array(benchSize);
|
|
289
|
+
const benchVec2 = new Float64Array(benchSize);
|
|
290
|
+
const benchResult = new Float64Array(benchSize);
|
|
291
|
+
|
|
292
|
+
for (let i = 0; i < benchSize; i++) {
|
|
293
|
+
benchVec1[i] = Math.random();
|
|
294
|
+
benchVec2[i] = Math.random();
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
console.time('Vector addition (10k elements)');
|
|
298
|
+
for (let i = 0; i < 100; i++) {
|
|
299
|
+
accelerate.vadd(benchVec1, benchVec2, benchResult);
|
|
300
|
+
}
|
|
301
|
+
console.timeEnd('Vector addition (10k elements)');
|
|
302
|
+
|
|
303
|
+
console.time('Dot product (10k elements)');
|
|
304
|
+
for (let i = 0; i < 100; i++) {
|
|
305
|
+
accelerate.dot(benchVec1, benchVec2);
|
|
306
|
+
}
|
|
307
|
+
console.timeEnd('Dot product (10k elements)');
|
|
308
|
+
|
|
309
|
+
console.time('Statistical operations (10k elements)');
|
|
310
|
+
for (let i = 0; i < 100; i++) {
|
|
311
|
+
accelerate.mean(benchVec1);
|
|
312
|
+
accelerate.stddev(benchVec1);
|
|
313
|
+
accelerate.minmax(benchVec1);
|
|
314
|
+
}
|
|
315
|
+
console.timeEnd('Statistical operations (10k elements)');
|
|
316
|
+
|
|
317
|
+
console.log('\n✓ ML Pipeline Complete!');
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Advanced Signal Processing Examples
|
|
3
|
+
* Demonstrates convolution, correlation, windowing, and FFT/IFFT
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const accelerate = require('..');
|
|
7
|
+
|
|
8
|
+
console.log('=== Advanced Signal Processing ===\n');
|
|
9
|
+
|
|
10
|
+
// 1. Convolution Example
|
|
11
|
+
console.log('--- Convolution ---');
|
|
12
|
+
const signal = new Float64Array([1, 2, 3, 4, 5, 6, 7, 8]);
|
|
13
|
+
const kernel = new Float64Array([0.25, 0.5, 0.25]); // Moving average
|
|
14
|
+
const convResult = new Float64Array(signal.length - kernel.length + 1);
|
|
15
|
+
|
|
16
|
+
accelerate.conv(signal, kernel, convResult);
|
|
17
|
+
console.log('Signal:', Array.from(signal));
|
|
18
|
+
console.log('Kernel (moving avg):', Array.from(kernel));
|
|
19
|
+
console.log('Convolution result:', Array.from(convResult).map(x => x.toFixed(2)));
|
|
20
|
+
|
|
21
|
+
// 2. Cross-correlation Example
|
|
22
|
+
console.log('\n--- Cross-Correlation ---');
|
|
23
|
+
const sig1 = new Float64Array(100);
|
|
24
|
+
const sig2 = new Float64Array(100);
|
|
25
|
+
|
|
26
|
+
// Create two similar signals with a delay
|
|
27
|
+
for (let i = 0; i < 100; i++) {
|
|
28
|
+
sig1[i] = Math.sin(2 * Math.PI * i / 20);
|
|
29
|
+
sig2[i] = i >= 10 ? Math.sin(2 * Math.PI * (i - 10) / 20) : 0;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
const xcorrResult = new Float64Array(sig1.length + sig2.length - 1);
|
|
33
|
+
accelerate.xcorr(sig1, sig2, xcorrResult);
|
|
34
|
+
|
|
35
|
+
// Find peak (indicates delay)
|
|
36
|
+
const peakIdx = xcorrResult.indexOf(accelerate.max(xcorrResult));
|
|
37
|
+
console.log('Detected delay:', Math.abs(peakIdx - sig1.length + 1), 'samples');
|
|
38
|
+
|
|
39
|
+
// 3. Window Functions
|
|
40
|
+
console.log('\n--- Window Functions ---');
|
|
41
|
+
const windowSize = 64;
|
|
42
|
+
|
|
43
|
+
const hammingWin = accelerate.hamming(windowSize);
|
|
44
|
+
const hanningWin = accelerate.hanning(windowSize);
|
|
45
|
+
const blackmanWin = accelerate.blackman(windowSize);
|
|
46
|
+
|
|
47
|
+
console.log('Hamming window center value:', hammingWin[windowSize/2].toFixed(4));
|
|
48
|
+
console.log('Hanning window center value:', hanningWin[windowSize/2].toFixed(4));
|
|
49
|
+
console.log('Blackman window center value:', blackmanWin[windowSize/2].toFixed(4));
|
|
50
|
+
|
|
51
|
+
// 4. FFT and IFFT Round-trip
|
|
52
|
+
console.log('\n--- FFT/IFFT Round-trip ---');
|
|
53
|
+
const fftSize = 256;
|
|
54
|
+
const testSignal = new Float64Array(fftSize);
|
|
55
|
+
|
|
56
|
+
// Create test signal: sum of two sine waves
|
|
57
|
+
for (let i = 0; i < fftSize; i++) {
|
|
58
|
+
testSignal[i] = Math.sin(2 * Math.PI * 5 * i / fftSize) +
|
|
59
|
+
0.5 * Math.sin(2 * Math.PI * 10 * i / fftSize);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Forward FFT
|
|
63
|
+
const spectrum = accelerate.fft(testSignal);
|
|
64
|
+
console.log('FFT output size:', spectrum.real.length, 'bins');
|
|
65
|
+
|
|
66
|
+
// Find dominant frequencies
|
|
67
|
+
const magnitudes = new Float64Array(spectrum.real.length);
|
|
68
|
+
for (let i = 0; i < magnitudes.length; i++) {
|
|
69
|
+
magnitudes[i] = Math.sqrt(spectrum.real[i]**2 + spectrum.imag[i]**2);
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
const peak1 = magnitudes.indexOf(accelerate.max(magnitudes));
|
|
73
|
+
console.log('Dominant frequency bin:', peak1);
|
|
74
|
+
|
|
75
|
+
// Inverse FFT
|
|
76
|
+
const reconstructed = accelerate.ifft(spectrum.real, spectrum.imag);
|
|
77
|
+
|
|
78
|
+
// Check reconstruction error
|
|
79
|
+
let maxError = 0;
|
|
80
|
+
for (let i = 0; i < fftSize; i++) {
|
|
81
|
+
const error = Math.abs(testSignal[i] - reconstructed[i]);
|
|
82
|
+
if (error > maxError) maxError = error;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
console.log('Max reconstruction error:', maxError.toFixed(10));
|
|
86
|
+
console.log('Reconstruction successful:', maxError < 1e-10 ? 'YES' : 'NO');
|
|
87
|
+
|
|
88
|
+
// 5. Windowed FFT (for spectral analysis)
|
|
89
|
+
console.log('\n--- Windowed FFT ---');
|
|
90
|
+
const windowedSignal = new Float64Array(fftSize);
|
|
91
|
+
const window = accelerate.hanning(fftSize);
|
|
92
|
+
|
|
93
|
+
accelerate.vmul(testSignal, window, windowedSignal);
|
|
94
|
+
const windowedSpectrum = accelerate.fft(windowedSignal);
|
|
95
|
+
|
|
96
|
+
console.log('Windowed FFT reduces spectral leakage');
|
|
97
|
+
console.log('Original signal energy:', accelerate.sumOfSquares(testSignal).toFixed(2));
|
|
98
|
+
console.log('Windowed signal energy:', accelerate.sumOfSquares(windowedSignal).toFixed(2));
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Statistical Operations Examples
|
|
3
|
+
* Demonstrates statistical functions using Apple Accelerate
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const accelerate = require('..');
|
|
7
|
+
|
|
8
|
+
console.log('=== Statistical Operations ===\n');
|
|
9
|
+
|
|
10
|
+
// Create sample data
|
|
11
|
+
const data = new Float64Array(1000);
|
|
12
|
+
for (let i = 0; i < data.length; i++) {
|
|
13
|
+
data[i] = Math.random() * 100;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
console.log('Sample size:', data.length);
|
|
17
|
+
|
|
18
|
+
// Basic statistics
|
|
19
|
+
console.log('\n--- Basic Statistics ---');
|
|
20
|
+
console.log('Mean:', accelerate.mean(data).toFixed(2));
|
|
21
|
+
console.log('Variance:', accelerate.variance(data).toFixed(2));
|
|
22
|
+
console.log('Standard Deviation:', accelerate.stddev(data).toFixed(2));
|
|
23
|
+
console.log('RMS:', accelerate.rms(data).toFixed(2));
|
|
24
|
+
|
|
25
|
+
// Min/Max
|
|
26
|
+
const { min, max } = accelerate.minmax(data);
|
|
27
|
+
console.log('\n--- Range ---');
|
|
28
|
+
console.log('Min:', min.toFixed(2));
|
|
29
|
+
console.log('Max:', max.toFixed(2));
|
|
30
|
+
console.log('Range:', (max - min).toFixed(2));
|
|
31
|
+
|
|
32
|
+
// Magnitude statistics
|
|
33
|
+
console.log('\n--- Magnitude Statistics ---');
|
|
34
|
+
console.log('Sum:', accelerate.sum(data).toFixed(2));
|
|
35
|
+
console.log('Sum of Squares:', accelerate.sumOfSquares(data).toFixed(2));
|
|
36
|
+
console.log('Mean Magnitude:', accelerate.meanMagnitude(data).toFixed(2));
|
|
37
|
+
console.log('Mean Square:', accelerate.meanSquare(data).toFixed(2));
|
|
38
|
+
|
|
39
|
+
// Normalized data
|
|
40
|
+
const normalized = new Float64Array(data.length);
|
|
41
|
+
accelerate.normalize(data, normalized);
|
|
42
|
+
console.log('\n--- Normalized Data ---');
|
|
43
|
+
console.log('Normalized mean:', accelerate.mean(normalized).toFixed(6));
|
|
44
|
+
console.log('Normalized magnitude:', Math.sqrt(accelerate.sumOfSquares(normalized)).toFixed(6));
|