@digitaldefiance/node-accelerate 1.0.6 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1277 -166
- package/accelerate.cc +1346 -0
- package/examples/advanced-functions.js +0 -0
- package/examples/data-processing.js +123 -0
- package/examples/machine-learning.js +158 -0
- package/examples/mathematical-functions.js +101 -0
- package/examples/matrix-multiply.js +50 -0
- package/examples/ml-pipeline.js +317 -0
- package/examples/signal-processing-advanced.js +98 -0
- package/examples/signal-processing.js +70 -0
- package/examples/statistical-operations.js +44 -0
- package/examples/trigonometric-functions.js +52 -0
- package/examples/vector-operations.js +73 -0
- package/index.d.ts +720 -0
- package/index.js +636 -0
- package/package.json +13 -3
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Complete Machine Learning Pipeline Example
|
|
3
|
+
* Demonstrates data preprocessing, neural network inference, and evaluation
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const accelerate = require('..');
|
|
7
|
+
|
|
8
|
+
console.log('=== Machine Learning Pipeline ===\n');
|
|
9
|
+
|
|
10
|
+
// ============================================================================
|
|
11
|
+
// 1. DATA PREPROCESSING
|
|
12
|
+
// ============================================================================
|
|
13
|
+
|
|
14
|
+
console.log('--- Step 1: Data Preprocessing ---');
|
|
15
|
+
|
|
16
|
+
// Generate synthetic dataset
|
|
17
|
+
const numSamples = 1000;
|
|
18
|
+
const numFeatures = 10;
|
|
19
|
+
const rawData = new Float64Array(numSamples * numFeatures);
|
|
20
|
+
|
|
21
|
+
for (let i = 0; i < rawData.length; i++) {
|
|
22
|
+
rawData[i] = Math.random() * 200 - 100; // Range: -100 to 100
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
console.log('Raw data shape:', [numSamples, numFeatures]);
|
|
26
|
+
console.log('Raw data range:', [
|
|
27
|
+
accelerate.min(rawData).toFixed(2),
|
|
28
|
+
accelerate.max(rawData).toFixed(2)
|
|
29
|
+
]);
|
|
30
|
+
|
|
31
|
+
// Step 1a: Clip outliers
|
|
32
|
+
const clipped = new Float64Array(rawData.length);
|
|
33
|
+
accelerate.vclip(rawData, clipped, -50, 50);
|
|
34
|
+
|
|
35
|
+
// Step 1b: Standardization (z-score normalization)
|
|
36
|
+
function standardize(data, output) {
|
|
37
|
+
const mean = accelerate.mean(data);
|
|
38
|
+
const std = accelerate.stddev(data);
|
|
39
|
+
|
|
40
|
+
for (let i = 0; i < data.length; i++) {
|
|
41
|
+
output[i] = (data[i] - mean) / std;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
return output;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
const normalized = new Float64Array(clipped.length);
|
|
48
|
+
standardize(clipped, normalized);
|
|
49
|
+
|
|
50
|
+
console.log('After preprocessing:');
|
|
51
|
+
console.log(' Mean:', accelerate.mean(normalized).toFixed(6));
|
|
52
|
+
console.log(' Std Dev:', accelerate.stddev(normalized).toFixed(6));
|
|
53
|
+
console.log(' Range:', [
|
|
54
|
+
accelerate.min(normalized).toFixed(2),
|
|
55
|
+
accelerate.max(normalized).toFixed(2)
|
|
56
|
+
]);
|
|
57
|
+
|
|
58
|
+
// ============================================================================
|
|
59
|
+
// 2. NEURAL NETWORK INFERENCE
|
|
60
|
+
// ============================================================================
|
|
61
|
+
|
|
62
|
+
console.log('\n--- Step 2: Neural Network Inference ---');
|
|
63
|
+
|
|
64
|
+
// Network architecture: 10 -> 64 -> 32 -> 3 (classification)
|
|
65
|
+
const layer1Weights = new Float64Array(10 * 64);
|
|
66
|
+
const layer1Bias = new Float64Array(64);
|
|
67
|
+
const layer2Weights = new Float64Array(64 * 32);
|
|
68
|
+
const layer2Bias = new Float64Array(32);
|
|
69
|
+
const layer3Weights = new Float64Array(32 * 3);
|
|
70
|
+
const layer3Bias = new Float64Array(3);
|
|
71
|
+
|
|
72
|
+
// Initialize with random weights (Xavier initialization)
|
|
73
|
+
function initializeWeights(weights, fanIn, fanOut) {
|
|
74
|
+
const limit = Math.sqrt(6.0 / (fanIn + fanOut));
|
|
75
|
+
for (let i = 0; i < weights.length; i++) {
|
|
76
|
+
weights[i] = (Math.random() * 2 - 1) * limit;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
initializeWeights(layer1Weights, 10, 64);
|
|
81
|
+
initializeWeights(layer2Weights, 64, 32);
|
|
82
|
+
initializeWeights(layer3Weights, 32, 3);
|
|
83
|
+
|
|
84
|
+
// ReLU activation
|
|
85
|
+
function relu(input, output) {
|
|
86
|
+
accelerate.vclip(input, output, 0, Infinity);
|
|
87
|
+
return output;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Softmax activation
|
|
91
|
+
function softmax(logits, output) {
|
|
92
|
+
// Subtract max for numerical stability
|
|
93
|
+
const maxVal = accelerate.max(logits);
|
|
94
|
+
const shifted = new Float64Array(logits.length);
|
|
95
|
+
|
|
96
|
+
for (let i = 0; i < logits.length; i++) {
|
|
97
|
+
shifted[i] = logits[i] - maxVal;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Compute exp
|
|
101
|
+
accelerate.vexp(shifted, output);
|
|
102
|
+
|
|
103
|
+
// Normalize
|
|
104
|
+
const sum = accelerate.sum(output);
|
|
105
|
+
accelerate.vscale(output, 1.0 / sum, output);
|
|
106
|
+
|
|
107
|
+
return output;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Forward pass for one sample
|
|
111
|
+
function forward(input) {
|
|
112
|
+
// Layer 1: input (10) -> hidden1 (64)
|
|
113
|
+
const hidden1 = new Float64Array(64);
|
|
114
|
+
accelerate.matvec(layer1Weights, input, hidden1, 64, 10);
|
|
115
|
+
accelerate.vadd(hidden1, layer1Bias, hidden1);
|
|
116
|
+
relu(hidden1, hidden1);
|
|
117
|
+
|
|
118
|
+
// Layer 2: hidden1 (64) -> hidden2 (32)
|
|
119
|
+
const hidden2 = new Float64Array(32);
|
|
120
|
+
accelerate.matvec(layer2Weights, hidden1, hidden2, 32, 64);
|
|
121
|
+
accelerate.vadd(hidden2, layer2Bias, hidden2);
|
|
122
|
+
relu(hidden2, hidden2);
|
|
123
|
+
|
|
124
|
+
// Layer 3: hidden2 (32) -> output (3)
|
|
125
|
+
const logits = new Float64Array(3);
|
|
126
|
+
accelerate.matvec(layer3Weights, hidden2, logits, 3, 32);
|
|
127
|
+
accelerate.vadd(logits, layer3Bias, logits);
|
|
128
|
+
|
|
129
|
+
// Softmax
|
|
130
|
+
const probs = new Float64Array(3);
|
|
131
|
+
softmax(logits, probs);
|
|
132
|
+
|
|
133
|
+
return probs;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// Run inference on first sample
|
|
137
|
+
const sampleInput = normalized.subarray(0, 10);
|
|
138
|
+
const predictions = forward(sampleInput);
|
|
139
|
+
|
|
140
|
+
console.log('Network architecture: 10 -> 64 -> 32 -> 3');
|
|
141
|
+
console.log('Sample prediction:', Array.from(predictions).map(x => x.toFixed(4)));
|
|
142
|
+
console.log('Predicted class:', predictions.indexOf(accelerate.max(predictions)));
|
|
143
|
+
|
|
144
|
+
// Batch inference
|
|
145
|
+
console.log('\nRunning batch inference...');
|
|
146
|
+
console.time('Batch inference (1000 samples)');
|
|
147
|
+
|
|
148
|
+
const allPredictions = [];
|
|
149
|
+
for (let i = 0; i < numSamples; i++) {
|
|
150
|
+
const input = normalized.subarray(i * numFeatures, (i + 1) * numFeatures);
|
|
151
|
+
const pred = forward(input);
|
|
152
|
+
allPredictions.push(Array.from(pred));
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
console.timeEnd('Batch inference (1000 samples)');
|
|
156
|
+
|
|
157
|
+
// ============================================================================
|
|
158
|
+
// 3. FEATURE ENGINEERING
|
|
159
|
+
// ============================================================================
|
|
160
|
+
|
|
161
|
+
console.log('\n--- Step 3: Feature Engineering ---');
|
|
162
|
+
|
|
163
|
+
// Compute polynomial features (x^2)
|
|
164
|
+
const squaredFeatures = new Float64Array(normalized.length);
|
|
165
|
+
accelerate.vsquare(normalized, squaredFeatures);
|
|
166
|
+
|
|
167
|
+
// Compute interaction features (for first two features)
|
|
168
|
+
const feature1 = normalized.subarray(0, numSamples);
|
|
169
|
+
const feature2 = normalized.subarray(numSamples, numSamples * 2);
|
|
170
|
+
const interaction = new Float64Array(numSamples);
|
|
171
|
+
accelerate.vmul(feature1, feature2, interaction);
|
|
172
|
+
|
|
173
|
+
console.log('Original features:', numFeatures);
|
|
174
|
+
console.log('Added squared features:', numFeatures);
|
|
175
|
+
console.log('Added interaction features: 1');
|
|
176
|
+
console.log('Total features:', numFeatures * 2 + 1);
|
|
177
|
+
|
|
178
|
+
// ============================================================================
|
|
179
|
+
// 4. DISTANCE-BASED METHODS
|
|
180
|
+
// ============================================================================
|
|
181
|
+
|
|
182
|
+
console.log('\n--- Step 4: Distance Computations ---');
|
|
183
|
+
|
|
184
|
+
// K-nearest neighbors: find distance to first sample
|
|
185
|
+
const query = normalized.subarray(0, numFeatures);
|
|
186
|
+
const distances = [];
|
|
187
|
+
|
|
188
|
+
for (let i = 1; i < Math.min(100, numSamples); i++) {
|
|
189
|
+
const sample = normalized.subarray(i * numFeatures, (i + 1) * numFeatures);
|
|
190
|
+
const dist = accelerate.euclidean(query, sample);
|
|
191
|
+
distances.push({ index: i, distance: dist });
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
// Sort by distance
|
|
195
|
+
distances.sort((a, b) => a.distance - b.distance);
|
|
196
|
+
|
|
197
|
+
console.log('K-Nearest Neighbors (k=5):');
|
|
198
|
+
for (let i = 0; i < 5; i++) {
|
|
199
|
+
console.log(` Neighbor ${i + 1}: sample ${distances[i].index}, distance ${distances[i].distance.toFixed(4)}`);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// ============================================================================
|
|
203
|
+
// 5. DIMENSIONALITY REDUCTION (PCA-like projection)
|
|
204
|
+
// ============================================================================
|
|
205
|
+
|
|
206
|
+
console.log('\n--- Step 5: Dimensionality Reduction ---');
|
|
207
|
+
|
|
208
|
+
// Simple random projection (approximates PCA)
|
|
209
|
+
const projectionMatrix = new Float64Array(numFeatures * 2); // Project to 2D
|
|
210
|
+
initializeWeights(projectionMatrix, numFeatures, 2);
|
|
211
|
+
|
|
212
|
+
// Normalize projection matrix columns
|
|
213
|
+
for (let col = 0; col < 2; col++) {
|
|
214
|
+
const column = new Float64Array(numFeatures);
|
|
215
|
+
for (let row = 0; row < numFeatures; row++) {
|
|
216
|
+
column[row] = projectionMatrix[row * 2 + col];
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
const normalized = new Float64Array(numFeatures);
|
|
220
|
+
accelerate.normalize(column, normalized);
|
|
221
|
+
|
|
222
|
+
for (let row = 0; row < numFeatures; row++) {
|
|
223
|
+
projectionMatrix[row * 2 + col] = normalized[row];
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
// Project first sample
|
|
228
|
+
const projected = new Float64Array(2);
|
|
229
|
+
accelerate.matvec(projectionMatrix, sampleInput, projected, 2, numFeatures);
|
|
230
|
+
|
|
231
|
+
console.log('Original dimensions:', numFeatures);
|
|
232
|
+
console.log('Reduced dimensions: 2');
|
|
233
|
+
console.log('Sample projection:', Array.from(projected).map(x => x.toFixed(4)));
|
|
234
|
+
|
|
235
|
+
// ============================================================================
|
|
236
|
+
// 6. SIGNAL PROCESSING FOR TIME SERIES
|
|
237
|
+
// ============================================================================
|
|
238
|
+
|
|
239
|
+
console.log('\n--- Step 6: Time Series Analysis ---');
|
|
240
|
+
|
|
241
|
+
// Generate synthetic time series
|
|
242
|
+
const timeSeriesLength = 1024;
|
|
243
|
+
const timeSeries = new Float64Array(timeSeriesLength);
|
|
244
|
+
|
|
245
|
+
for (let i = 0; i < timeSeriesLength; i++) {
|
|
246
|
+
// Mix of frequencies
|
|
247
|
+
timeSeries[i] =
|
|
248
|
+
Math.sin(2 * Math.PI * 5 * i / timeSeriesLength) +
|
|
249
|
+
0.5 * Math.sin(2 * Math.PI * 10 * i / timeSeriesLength) +
|
|
250
|
+
0.3 * Math.sin(2 * Math.PI * 20 * i / timeSeriesLength);
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// Apply Hanning window
|
|
254
|
+
const window = accelerate.hanning(timeSeriesLength);
|
|
255
|
+
const windowed = new Float64Array(timeSeriesLength);
|
|
256
|
+
accelerate.vmul(timeSeries, window, windowed);
|
|
257
|
+
|
|
258
|
+
// Compute FFT
|
|
259
|
+
const spectrum = accelerate.fft(windowed);
|
|
260
|
+
|
|
261
|
+
// Find dominant frequencies
|
|
262
|
+
const magnitudes = new Float64Array(spectrum.real.length);
|
|
263
|
+
for (let i = 0; i < magnitudes.length; i++) {
|
|
264
|
+
magnitudes[i] = Math.sqrt(spectrum.real[i]**2 + spectrum.imag[i]**2);
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
// Find peaks
|
|
268
|
+
const peaks = [];
|
|
269
|
+
for (let i = 1; i < magnitudes.length - 1; i++) {
|
|
270
|
+
if (magnitudes[i] > magnitudes[i-1] && magnitudes[i] > magnitudes[i+1] && magnitudes[i] > 10) {
|
|
271
|
+
peaks.push({ bin: i, magnitude: magnitudes[i] });
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
peaks.sort((a, b) => b.magnitude - a.magnitude);
|
|
276
|
+
|
|
277
|
+
console.log('Time series length:', timeSeriesLength);
|
|
278
|
+
console.log('Dominant frequency bins:', peaks.slice(0, 3).map(p => p.bin));
|
|
279
|
+
|
|
280
|
+
// ============================================================================
|
|
281
|
+
// 7. PERFORMANCE SUMMARY
|
|
282
|
+
// ============================================================================
|
|
283
|
+
|
|
284
|
+
console.log('\n--- Performance Summary ---');
|
|
285
|
+
|
|
286
|
+
// Benchmark key operations
|
|
287
|
+
const benchSize = 10000;
|
|
288
|
+
const benchVec1 = new Float64Array(benchSize);
|
|
289
|
+
const benchVec2 = new Float64Array(benchSize);
|
|
290
|
+
const benchResult = new Float64Array(benchSize);
|
|
291
|
+
|
|
292
|
+
for (let i = 0; i < benchSize; i++) {
|
|
293
|
+
benchVec1[i] = Math.random();
|
|
294
|
+
benchVec2[i] = Math.random();
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
console.time('Vector addition (10k elements)');
|
|
298
|
+
for (let i = 0; i < 100; i++) {
|
|
299
|
+
accelerate.vadd(benchVec1, benchVec2, benchResult);
|
|
300
|
+
}
|
|
301
|
+
console.timeEnd('Vector addition (10k elements)');
|
|
302
|
+
|
|
303
|
+
console.time('Dot product (10k elements)');
|
|
304
|
+
for (let i = 0; i < 100; i++) {
|
|
305
|
+
accelerate.dot(benchVec1, benchVec2);
|
|
306
|
+
}
|
|
307
|
+
console.timeEnd('Dot product (10k elements)');
|
|
308
|
+
|
|
309
|
+
console.time('Statistical operations (10k elements)');
|
|
310
|
+
for (let i = 0; i < 100; i++) {
|
|
311
|
+
accelerate.mean(benchVec1);
|
|
312
|
+
accelerate.stddev(benchVec1);
|
|
313
|
+
accelerate.minmax(benchVec1);
|
|
314
|
+
}
|
|
315
|
+
console.timeEnd('Statistical operations (10k elements)');
|
|
316
|
+
|
|
317
|
+
console.log('\n✓ ML Pipeline Complete!');
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Advanced Signal Processing Examples
|
|
3
|
+
* Demonstrates convolution, correlation, windowing, and FFT/IFFT
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const accelerate = require('..');
|
|
7
|
+
|
|
8
|
+
console.log('=== Advanced Signal Processing ===\n');
|
|
9
|
+
|
|
10
|
+
// 1. Convolution Example
|
|
11
|
+
console.log('--- Convolution ---');
|
|
12
|
+
const signal = new Float64Array([1, 2, 3, 4, 5, 6, 7, 8]);
|
|
13
|
+
const kernel = new Float64Array([0.25, 0.5, 0.25]); // Moving average
|
|
14
|
+
const convResult = new Float64Array(signal.length - kernel.length + 1);
|
|
15
|
+
|
|
16
|
+
accelerate.conv(signal, kernel, convResult);
|
|
17
|
+
console.log('Signal:', Array.from(signal));
|
|
18
|
+
console.log('Kernel (moving avg):', Array.from(kernel));
|
|
19
|
+
console.log('Convolution result:', Array.from(convResult).map(x => x.toFixed(2)));
|
|
20
|
+
|
|
21
|
+
// 2. Cross-correlation Example
|
|
22
|
+
console.log('\n--- Cross-Correlation ---');
|
|
23
|
+
const sig1 = new Float64Array(100);
|
|
24
|
+
const sig2 = new Float64Array(100);
|
|
25
|
+
|
|
26
|
+
// Create two similar signals with a delay
|
|
27
|
+
for (let i = 0; i < 100; i++) {
|
|
28
|
+
sig1[i] = Math.sin(2 * Math.PI * i / 20);
|
|
29
|
+
sig2[i] = i >= 10 ? Math.sin(2 * Math.PI * (i - 10) / 20) : 0;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
const xcorrResult = new Float64Array(sig1.length + sig2.length - 1);
|
|
33
|
+
accelerate.xcorr(sig1, sig2, xcorrResult);
|
|
34
|
+
|
|
35
|
+
// Find peak (indicates delay)
|
|
36
|
+
const peakIdx = xcorrResult.indexOf(accelerate.max(xcorrResult));
|
|
37
|
+
console.log('Detected delay:', Math.abs(peakIdx - sig1.length + 1), 'samples');
|
|
38
|
+
|
|
39
|
+
// 3. Window Functions
|
|
40
|
+
console.log('\n--- Window Functions ---');
|
|
41
|
+
const windowSize = 64;
|
|
42
|
+
|
|
43
|
+
const hammingWin = accelerate.hamming(windowSize);
|
|
44
|
+
const hanningWin = accelerate.hanning(windowSize);
|
|
45
|
+
const blackmanWin = accelerate.blackman(windowSize);
|
|
46
|
+
|
|
47
|
+
console.log('Hamming window center value:', hammingWin[windowSize/2].toFixed(4));
|
|
48
|
+
console.log('Hanning window center value:', hanningWin[windowSize/2].toFixed(4));
|
|
49
|
+
console.log('Blackman window center value:', blackmanWin[windowSize/2].toFixed(4));
|
|
50
|
+
|
|
51
|
+
// 4. FFT and IFFT Round-trip
|
|
52
|
+
console.log('\n--- FFT/IFFT Round-trip ---');
|
|
53
|
+
const fftSize = 256;
|
|
54
|
+
const testSignal = new Float64Array(fftSize);
|
|
55
|
+
|
|
56
|
+
// Create test signal: sum of two sine waves
|
|
57
|
+
for (let i = 0; i < fftSize; i++) {
|
|
58
|
+
testSignal[i] = Math.sin(2 * Math.PI * 5 * i / fftSize) +
|
|
59
|
+
0.5 * Math.sin(2 * Math.PI * 10 * i / fftSize);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Forward FFT
|
|
63
|
+
const spectrum = accelerate.fft(testSignal);
|
|
64
|
+
console.log('FFT output size:', spectrum.real.length, 'bins');
|
|
65
|
+
|
|
66
|
+
// Find dominant frequencies
|
|
67
|
+
const magnitudes = new Float64Array(spectrum.real.length);
|
|
68
|
+
for (let i = 0; i < magnitudes.length; i++) {
|
|
69
|
+
magnitudes[i] = Math.sqrt(spectrum.real[i]**2 + spectrum.imag[i]**2);
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
const peak1 = magnitudes.indexOf(accelerate.max(magnitudes));
|
|
73
|
+
console.log('Dominant frequency bin:', peak1);
|
|
74
|
+
|
|
75
|
+
// Inverse FFT
|
|
76
|
+
const reconstructed = accelerate.ifft(spectrum.real, spectrum.imag);
|
|
77
|
+
|
|
78
|
+
// Check reconstruction error
|
|
79
|
+
let maxError = 0;
|
|
80
|
+
for (let i = 0; i < fftSize; i++) {
|
|
81
|
+
const error = Math.abs(testSignal[i] - reconstructed[i]);
|
|
82
|
+
if (error > maxError) maxError = error;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
console.log('Max reconstruction error:', maxError.toFixed(10));
|
|
86
|
+
console.log('Reconstruction successful:', maxError < 1e-10 ? 'YES' : 'NO');
|
|
87
|
+
|
|
88
|
+
// 5. Windowed FFT (for spectral analysis)
|
|
89
|
+
console.log('\n--- Windowed FFT ---');
|
|
90
|
+
const windowedSignal = new Float64Array(fftSize);
|
|
91
|
+
const window = accelerate.hanning(fftSize);
|
|
92
|
+
|
|
93
|
+
accelerate.vmul(testSignal, window, windowedSignal);
|
|
94
|
+
const windowedSpectrum = accelerate.fft(windowedSignal);
|
|
95
|
+
|
|
96
|
+
console.log('Windowed FFT reduces spectral leakage');
|
|
97
|
+
console.log('Original signal energy:', accelerate.sumOfSquares(testSignal).toFixed(2));
|
|
98
|
+
console.log('Windowed signal energy:', accelerate.sumOfSquares(windowedSignal).toFixed(2));
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Example: Signal Processing with FFT
|
|
4
|
+
* Demonstrates frequency analysis of audio signals
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
const accelerate = require('../index');
|
|
8
|
+
|
|
9
|
+
console.log('Signal Processing Example');
|
|
10
|
+
console.log('='.repeat(50));
|
|
11
|
+
console.log('');
|
|
12
|
+
|
|
13
|
+
// Create a composite signal: 440 Hz (A4) + 880 Hz (A5)
|
|
14
|
+
const sampleRate = 44100; // CD quality
|
|
15
|
+
const duration = 1; // 1 second
|
|
16
|
+
const fftSize = 16384; // Must be power of 2
|
|
17
|
+
|
|
18
|
+
console.log(`Sample rate: ${sampleRate} Hz`);
|
|
19
|
+
console.log(`FFT size: ${fftSize}`);
|
|
20
|
+
console.log(`Frequency resolution: ${(sampleRate / fftSize).toFixed(2)} Hz`);
|
|
21
|
+
console.log('');
|
|
22
|
+
|
|
23
|
+
// Generate signal
|
|
24
|
+
console.log('Generating signal (440 Hz + 880 Hz)...');
|
|
25
|
+
const signal = new Float64Array(fftSize);
|
|
26
|
+
for (let i = 0; i < fftSize; i++) {
|
|
27
|
+
const t = i / sampleRate;
|
|
28
|
+
signal[i] =
|
|
29
|
+
Math.sin(2 * Math.PI * 440 * t) + // A4
|
|
30
|
+
Math.sin(2 * Math.PI * 880 * t); // A5
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// Perform FFT
|
|
34
|
+
console.log('Performing FFT...');
|
|
35
|
+
const start = process.hrtime.bigint();
|
|
36
|
+
const spectrum = accelerate.fft(signal);
|
|
37
|
+
const end = process.hrtime.bigint();
|
|
38
|
+
|
|
39
|
+
const timeMs = Number(end - start) / 1e6;
|
|
40
|
+
console.log(`✓ Completed in ${timeMs.toFixed(3)}ms`);
|
|
41
|
+
console.log('');
|
|
42
|
+
|
|
43
|
+
// Calculate magnitudes
|
|
44
|
+
const magnitudes = new Float64Array(spectrum.real.length);
|
|
45
|
+
for (let i = 0; i < magnitudes.length; i++) {
|
|
46
|
+
magnitudes[i] = Math.sqrt(
|
|
47
|
+
spectrum.real[i] ** 2 + spectrum.imag[i] ** 2
|
|
48
|
+
);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// Find peaks
|
|
52
|
+
console.log('Finding frequency peaks...');
|
|
53
|
+
const peaks = [];
|
|
54
|
+
const threshold = Math.max(...magnitudes) * 0.5;
|
|
55
|
+
|
|
56
|
+
for (let i = 1; i < magnitudes.length - 1; i++) {
|
|
57
|
+
if (magnitudes[i] > threshold &&
|
|
58
|
+
magnitudes[i] > magnitudes[i - 1] &&
|
|
59
|
+
magnitudes[i] > magnitudes[i + 1]) {
|
|
60
|
+
const frequency = i * sampleRate / fftSize;
|
|
61
|
+
peaks.push({ frequency, magnitude: magnitudes[i] });
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
peaks.sort((a, b) => b.magnitude - a.magnitude);
|
|
66
|
+
|
|
67
|
+
console.log('Top frequency components:');
|
|
68
|
+
peaks.slice(0, 5).forEach((peak, i) => {
|
|
69
|
+
console.log(` ${i + 1}. ${peak.frequency.toFixed(1)} Hz (magnitude: ${peak.magnitude.toFixed(2)})`);
|
|
70
|
+
});
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Statistical Operations Examples
|
|
3
|
+
* Demonstrates statistical functions using Apple Accelerate
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const accelerate = require('..');
|
|
7
|
+
|
|
8
|
+
console.log('=== Statistical Operations ===\n');
|
|
9
|
+
|
|
10
|
+
// Create sample data
|
|
11
|
+
const data = new Float64Array(1000);
|
|
12
|
+
for (let i = 0; i < data.length; i++) {
|
|
13
|
+
data[i] = Math.random() * 100;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
console.log('Sample size:', data.length);
|
|
17
|
+
|
|
18
|
+
// Basic statistics
|
|
19
|
+
console.log('\n--- Basic Statistics ---');
|
|
20
|
+
console.log('Mean:', accelerate.mean(data).toFixed(2));
|
|
21
|
+
console.log('Variance:', accelerate.variance(data).toFixed(2));
|
|
22
|
+
console.log('Standard Deviation:', accelerate.stddev(data).toFixed(2));
|
|
23
|
+
console.log('RMS:', accelerate.rms(data).toFixed(2));
|
|
24
|
+
|
|
25
|
+
// Min/Max
|
|
26
|
+
const { min, max } = accelerate.minmax(data);
|
|
27
|
+
console.log('\n--- Range ---');
|
|
28
|
+
console.log('Min:', min.toFixed(2));
|
|
29
|
+
console.log('Max:', max.toFixed(2));
|
|
30
|
+
console.log('Range:', (max - min).toFixed(2));
|
|
31
|
+
|
|
32
|
+
// Magnitude statistics
|
|
33
|
+
console.log('\n--- Magnitude Statistics ---');
|
|
34
|
+
console.log('Sum:', accelerate.sum(data).toFixed(2));
|
|
35
|
+
console.log('Sum of Squares:', accelerate.sumOfSquares(data).toFixed(2));
|
|
36
|
+
console.log('Mean Magnitude:', accelerate.meanMagnitude(data).toFixed(2));
|
|
37
|
+
console.log('Mean Square:', accelerate.meanSquare(data).toFixed(2));
|
|
38
|
+
|
|
39
|
+
// Normalized data
|
|
40
|
+
const normalized = new Float64Array(data.length);
|
|
41
|
+
accelerate.normalize(data, normalized);
|
|
42
|
+
console.log('\n--- Normalized Data ---');
|
|
43
|
+
console.log('Normalized mean:', accelerate.mean(normalized).toFixed(6));
|
|
44
|
+
console.log('Normalized magnitude:', Math.sqrt(accelerate.sumOfSquares(normalized)).toFixed(6));
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Trigonometric Functions Examples
|
|
3
|
+
* Demonstrates vectorized trig operations using Apple Accelerate
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const accelerate = require('..');
|
|
7
|
+
|
|
8
|
+
console.log('=== Trigonometric Functions ===\n');
|
|
9
|
+
|
|
10
|
+
// Create angle array (0 to 2π)
|
|
11
|
+
const n = 1000;
|
|
12
|
+
const angles = new Float64Array(n);
|
|
13
|
+
for (let i = 0; i < n; i++) {
|
|
14
|
+
angles[i] = (i / n) * 2 * Math.PI;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
// Compute sin, cos, tan
|
|
18
|
+
const sinValues = new Float64Array(n);
|
|
19
|
+
const cosValues = new Float64Array(n);
|
|
20
|
+
const tanValues = new Float64Array(n);
|
|
21
|
+
|
|
22
|
+
console.time('Vectorized trig operations');
|
|
23
|
+
accelerate.vsin(angles, sinValues);
|
|
24
|
+
accelerate.vcos(angles, cosValues);
|
|
25
|
+
accelerate.vtan(angles, tanValues);
|
|
26
|
+
console.timeEnd('Vectorized trig operations');
|
|
27
|
+
|
|
28
|
+
// Verify with known values
|
|
29
|
+
console.log('\n--- Verification at key angles ---');
|
|
30
|
+
const testIndices = [0, n/4, n/2, 3*n/4];
|
|
31
|
+
const testAngles = ['0', 'π/2', 'π', '3π/2'];
|
|
32
|
+
|
|
33
|
+
for (let i = 0; i < testIndices.length; i++) {
|
|
34
|
+
const idx = Math.floor(testIndices[i]);
|
|
35
|
+
console.log(`\nAngle: ${testAngles[i]}`);
|
|
36
|
+
console.log(` sin: ${sinValues[idx].toFixed(4)}`);
|
|
37
|
+
console.log(` cos: ${cosValues[idx].toFixed(4)}`);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
// Compute sin²(x) + cos²(x) = 1 (identity check)
|
|
41
|
+
const sinSquared = new Float64Array(n);
|
|
42
|
+
const cosSquared = new Float64Array(n);
|
|
43
|
+
const identity = new Float64Array(n);
|
|
44
|
+
|
|
45
|
+
accelerate.vsquare(sinValues, sinSquared);
|
|
46
|
+
accelerate.vsquare(cosValues, cosSquared);
|
|
47
|
+
accelerate.vadd(sinSquared, cosSquared, identity);
|
|
48
|
+
|
|
49
|
+
console.log('\n--- Pythagorean Identity Check ---');
|
|
50
|
+
console.log('sin²(x) + cos²(x) should equal 1.0');
|
|
51
|
+
console.log('Mean value:', accelerate.mean(identity).toFixed(10));
|
|
52
|
+
console.log('Max deviation:', Math.abs(1.0 - accelerate.max(identity)).toFixed(10));
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Example: Vector Operations
|
|
4
|
+
* Demonstrates hardware-accelerated vector math
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
const accelerate = require('../index');
|
|
8
|
+
|
|
9
|
+
console.log('Vector Operations Example');
|
|
10
|
+
console.log('='.repeat(50));
|
|
11
|
+
console.log('');
|
|
12
|
+
|
|
13
|
+
// Create large vectors
|
|
14
|
+
const size = 1000000;
|
|
15
|
+
console.log(`Vector size: ${size.toLocaleString()} elements`);
|
|
16
|
+
console.log('');
|
|
17
|
+
|
|
18
|
+
const a = new Float64Array(size);
|
|
19
|
+
const b = new Float64Array(size);
|
|
20
|
+
const result = new Float64Array(size);
|
|
21
|
+
|
|
22
|
+
// Fill with random data
|
|
23
|
+
for (let i = 0; i < size; i++) {
|
|
24
|
+
a[i] = Math.random() * 100;
|
|
25
|
+
b[i] = Math.random() * 100;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
// Dot product
|
|
29
|
+
console.log('Computing dot product...');
|
|
30
|
+
let start = process.hrtime.bigint();
|
|
31
|
+
const dotProduct = accelerate.dot(a, b);
|
|
32
|
+
let end = process.hrtime.bigint();
|
|
33
|
+
console.log(` Result: ${dotProduct.toFixed(2)}`);
|
|
34
|
+
console.log(` Time: ${(Number(end - start) / 1e6).toFixed(3)}ms`);
|
|
35
|
+
console.log('');
|
|
36
|
+
|
|
37
|
+
// Sum
|
|
38
|
+
console.log('Computing sum...');
|
|
39
|
+
start = process.hrtime.bigint();
|
|
40
|
+
const sum = accelerate.sum(a);
|
|
41
|
+
end = process.hrtime.bigint();
|
|
42
|
+
console.log(` Result: ${sum.toFixed(2)}`);
|
|
43
|
+
console.log(` Time: ${(Number(end - start) / 1e6).toFixed(3)}ms`);
|
|
44
|
+
console.log('');
|
|
45
|
+
|
|
46
|
+
// Mean
|
|
47
|
+
console.log('Computing mean...');
|
|
48
|
+
start = process.hrtime.bigint();
|
|
49
|
+
const mean = accelerate.mean(a);
|
|
50
|
+
end = process.hrtime.bigint();
|
|
51
|
+
console.log(` Result: ${mean.toFixed(2)}`);
|
|
52
|
+
console.log(` Time: ${(Number(end - start) / 1e6).toFixed(3)}ms`);
|
|
53
|
+
console.log('');
|
|
54
|
+
|
|
55
|
+
// Vector addition
|
|
56
|
+
console.log('Computing vector addition...');
|
|
57
|
+
start = process.hrtime.bigint();
|
|
58
|
+
accelerate.vadd(a, b, result);
|
|
59
|
+
end = process.hrtime.bigint();
|
|
60
|
+
console.log(` Sample: ${a[0].toFixed(2)} + ${b[0].toFixed(2)} = ${result[0].toFixed(2)}`);
|
|
61
|
+
console.log(` Time: ${(Number(end - start) / 1e6).toFixed(3)}ms`);
|
|
62
|
+
console.log('');
|
|
63
|
+
|
|
64
|
+
// Vector multiplication
|
|
65
|
+
console.log('Computing vector multiplication...');
|
|
66
|
+
start = process.hrtime.bigint();
|
|
67
|
+
accelerate.vmul(a, b, result);
|
|
68
|
+
end = process.hrtime.bigint();
|
|
69
|
+
console.log(` Sample: ${a[0].toFixed(2)} × ${b[0].toFixed(2)} = ${result[0].toFixed(2)}`);
|
|
70
|
+
console.log(` Time: ${(Number(end - start) / 1e6).toFixed(3)}ms`);
|
|
71
|
+
console.log('');
|
|
72
|
+
|
|
73
|
+
console.log('✓ All operations completed successfully!');
|