@digitaldefiance/node-accelerate 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.d.ts ADDED
@@ -0,0 +1,320 @@
1
+ /**
2
+ * TypeScript definitions for node-accelerate
3
+ * Apple Accelerate framework bindings for Node.js
4
+ */
5
+
6
+ /**
7
+ * Perform matrix multiplication: C = A × B
8
+ * Uses Apple's BLAS (Basic Linear Algebra Subprograms) for hardware-accelerated computation
9
+ *
10
+ * @param A - First matrix (M × K) as Float64Array in row-major order
11
+ * @param B - Second matrix (K × N) as Float64Array in row-major order
12
+ * @param C - Output matrix (M × N) as Float64Array in row-major order
13
+ * @param M - Number of rows in A and C
14
+ * @param K - Number of columns in A and rows in B
15
+ * @param N - Number of columns in B and C
16
+ * @returns The output matrix C
17
+ *
18
+ * @example
19
+ * const M = 100, K = 100, N = 100;
20
+ * const A = new Float64Array(M * K);
21
+ * const B = new Float64Array(K * N);
22
+ * const C = new Float64Array(M * N);
23
+ *
24
+ * // Fill A and B with data
25
+ * for (let i = 0; i < A.length; i++) A[i] = Math.random();
26
+ * for (let i = 0; i < B.length; i++) B[i] = Math.random();
27
+ *
28
+ * // C = A × B (hardware-accelerated)
29
+ * accelerate.matmul(A, B, C, M, K, N);
30
+ */
31
+ export function matmul(
32
+ A: Float64Array,
33
+ B: Float64Array,
34
+ C: Float64Array,
35
+ M: number,
36
+ K: number,
37
+ N: number
38
+ ): Float64Array;
39
+
40
+ /**
41
+ * Compute dot product of two vectors: result = sum(a[i] * b[i])
42
+ * Uses Apple's vDSP for hardware-accelerated computation
43
+ *
44
+ * @param a - First vector as Float64Array
45
+ * @param b - Second vector as Float64Array (must be same length as a)
46
+ * @returns The dot product as a number
47
+ *
48
+ * @example
49
+ * const a = new Float64Array([1, 2, 3, 4]);
50
+ * const b = new Float64Array([5, 6, 7, 8]);
51
+ * const result = accelerate.dot(a, b); // 70
52
+ */
53
+ export function dot(a: Float64Array, b: Float64Array): number;
54
+
55
+ /**
56
+ * Compute sum of all elements in a vector: result = sum(vec[i])
57
+ * Uses Apple's vDSP for hardware-accelerated computation
58
+ *
59
+ * @param vec - Input vector as Float64Array
60
+ * @returns The sum of all elements
61
+ *
62
+ * @example
63
+ * const vec = new Float64Array([1, 2, 3, 4, 5]);
64
+ * const result = accelerate.sum(vec); // 15
65
+ */
66
+ export function sum(vec: Float64Array): number;
67
+
68
+ /**
69
+ * Compute mean (average) of all elements in a vector
70
+ * Uses Apple's vDSP for hardware-accelerated computation
71
+ *
72
+ * @param vec - Input vector as Float64Array
73
+ * @returns The mean of all elements
74
+ *
75
+ * @example
76
+ * const vec = new Float64Array([1, 2, 3, 4, 5]);
77
+ * const result = accelerate.mean(vec); // 3
78
+ */
79
+ export function mean(vec: Float64Array): number;
80
+
81
+ /**
82
+ * Element-wise vector addition: out[i] = a[i] + b[i]
83
+ * Uses Apple's vDSP for hardware-accelerated computation
84
+ *
85
+ * @param a - First vector as Float64Array
86
+ * @param b - Second vector as Float64Array (must be same length as a)
87
+ * @param out - Output vector as Float64Array (must be same length as a)
88
+ * @returns The output vector
89
+ *
90
+ * @example
91
+ * const a = new Float64Array([1, 2, 3]);
92
+ * const b = new Float64Array([4, 5, 6]);
93
+ * const out = new Float64Array(3);
94
+ * accelerate.vadd(a, b, out); // out = [5, 7, 9]
95
+ */
96
+ export function vadd(
97
+ a: Float64Array,
98
+ b: Float64Array,
99
+ out: Float64Array
100
+ ): Float64Array;
101
+
102
+ /**
103
+ * Element-wise vector multiplication: out[i] = a[i] * b[i]
104
+ * Uses Apple's vDSP for hardware-accelerated computation
105
+ *
106
+ * @param a - First vector as Float64Array
107
+ * @param b - Second vector as Float64Array (must be same length as a)
108
+ * @param out - Output vector as Float64Array (must be same length as a)
109
+ * @returns The output vector
110
+ *
111
+ * @example
112
+ * const a = new Float64Array([2, 3, 4]);
113
+ * const b = new Float64Array([5, 6, 7]);
114
+ * const out = new Float64Array(3);
115
+ * accelerate.vmul(a, b, out); // out = [10, 18, 28]
116
+ */
117
+ export function vmul(
118
+ a: Float64Array,
119
+ b: Float64Array,
120
+ out: Float64Array
121
+ ): Float64Array;
122
+
123
+ /**
124
+ * Vector scaling: out[i] = vec[i] * scalar
125
+ * Uses Apple's vDSP for hardware-accelerated computation
126
+ *
127
+ * @param vec - Input vector as Float64Array
128
+ * @param scalar - Scalar value to multiply by
129
+ * @param out - Output vector as Float64Array (must be same length as vec)
130
+ * @returns The output vector
131
+ *
132
+ * @example
133
+ * const vec = new Float64Array([1, 2, 3]);
134
+ * const out = new Float64Array(3);
135
+ * accelerate.vscale(vec, 2.0, out); // out = [2, 4, 6]
136
+ */
137
+ export function vscale(
138
+ vec: Float64Array,
139
+ scalar: number,
140
+ out: Float64Array
141
+ ): Float64Array;
142
+
143
+ /**
144
+ * Find maximum value in a vector
145
+ * Uses Apple's vDSP for hardware-accelerated computation
146
+ *
147
+ * @param vec - Input vector as Float64Array
148
+ * @returns The maximum value
149
+ *
150
+ * @example
151
+ * const vec = new Float64Array([1, 5, 3, 2, 4]);
152
+ * const result = accelerate.max(vec); // 5
153
+ */
154
+ export function max(vec: Float64Array): number;
155
+
156
+ /**
157
+ * Find minimum value in a vector
158
+ * Uses Apple's vDSP for hardware-accelerated computation
159
+ *
160
+ * @param vec - Input vector as Float64Array
161
+ * @returns The minimum value
162
+ *
163
+ * @example
164
+ * const vec = new Float64Array([1, 5, 3, 2, 4]);
165
+ * const result = accelerate.min(vec); // 1
166
+ */
167
+ export function min(vec: Float64Array): number;
168
+
169
+ /**
170
+ * Fast Fourier Transform (FFT) of a real signal
171
+ * Uses Apple's vDSP for hardware-accelerated computation
172
+ *
173
+ * @param signal - Input signal as Float64Array (length must be power of 2)
174
+ * @returns Object with real and imaginary components of the frequency spectrum
175
+ *
176
+ * @example
177
+ * const signal = new Float64Array(1024);
178
+ * for (let i = 0; i < signal.length; i++) {
179
+ * signal[i] = Math.sin(2 * Math.PI * i / signal.length);
180
+ * }
181
+ * const spectrum = accelerate.fft(signal);
182
+ * console.log(spectrum.real, spectrum.imag);
183
+ */
184
+ export function fft(signal: Float64Array): {
185
+ real: Float64Array;
186
+ imag: Float64Array;
187
+ };
188
+
189
+ /**
190
+ * Matrix-vector multiplication: y = A × x
191
+ * Uses Apple's BLAS for hardware-accelerated computation
192
+ *
193
+ * @param A - Matrix (M × N) as Float64Array in row-major order
194
+ * @param x - Vector (N elements) as Float64Array
195
+ * @param y - Output vector (M elements) as Float64Array
196
+ * @param M - Number of rows in A
197
+ * @param N - Number of columns in A
198
+ * @returns The output vector y
199
+ */
200
+ export function matvec(
201
+ A: Float64Array,
202
+ x: Float64Array,
203
+ y: Float64Array,
204
+ M: number,
205
+ N: number
206
+ ): Float64Array;
207
+
208
+ /**
209
+ * AXPY operation: y = alpha*x + y
210
+ * Uses Apple's BLAS for hardware-accelerated computation
211
+ *
212
+ * @param alpha - Scalar multiplier
213
+ * @param x - Input vector as Float64Array
214
+ * @param y - Input/output vector as Float64Array
215
+ * @returns The output vector y
216
+ */
217
+ export function axpy(
218
+ alpha: number,
219
+ x: Float64Array,
220
+ y: Float64Array
221
+ ): Float64Array;
222
+
223
+ /**
224
+ * Vector absolute value: b = |a|
225
+ * Uses Apple's vDSP for hardware-accelerated computation
226
+ *
227
+ * @param a - Input vector as Float64Array
228
+ * @param b - Output vector as Float64Array
229
+ * @returns The output vector b
230
+ */
231
+ export function vabs(a: Float64Array, b: Float64Array): Float64Array;
232
+
233
+ /**
234
+ * Vector square: b = a^2 (element-wise)
235
+ * Uses Apple's vDSP for hardware-accelerated computation
236
+ *
237
+ * @param a - Input vector as Float64Array
238
+ * @param b - Output vector as Float64Array
239
+ * @returns The output vector b
240
+ */
241
+ export function vsquare(a: Float64Array, b: Float64Array): Float64Array;
242
+
243
+ /**
244
+ * Vector square root: b = sqrt(a) (element-wise)
245
+ * Uses Apple's vForce for hardware-accelerated computation
246
+ *
247
+ * @param a - Input vector as Float64Array
248
+ * @param b - Output vector as Float64Array
249
+ * @returns The output vector b
250
+ */
251
+ export function vsqrt(a: Float64Array, b: Float64Array): Float64Array;
252
+
253
+ /**
254
+ * Normalize vector to unit length: b = a / ||a||
255
+ * Uses Apple's vDSP for hardware-accelerated computation
256
+ *
257
+ * @param a - Input vector as Float64Array
258
+ * @param b - Output vector as Float64Array (unit vector)
259
+ * @returns The output vector b
260
+ */
261
+ export function normalize(a: Float64Array, b: Float64Array): Float64Array;
262
+
263
+ /**
264
+ * Euclidean distance between two vectors: sqrt(sum((a - b)^2))
265
+ * Uses Apple's vDSP for hardware-accelerated computation
266
+ *
267
+ * @param a - First vector as Float64Array
268
+ * @param b - Second vector as Float64Array
269
+ * @returns The Euclidean distance
270
+ */
271
+ export function euclidean(a: Float64Array, b: Float64Array): number;
272
+
273
+ /**
274
+ * Root Mean Square of vector: sqrt(sum(a^2) / n)
275
+ * Uses Apple's vDSP for hardware-accelerated computation
276
+ *
277
+ * @param a - Input vector as Float64Array
278
+ * @returns The RMS value
279
+ */
280
+ export function rms(a: Float64Array): number;
281
+
282
+ /**
283
+ * All exported functions
284
+ */
285
+ declare const accelerate: {
286
+ // Matrix operations
287
+ matmul: typeof matmul;
288
+ matvec: typeof matvec;
289
+
290
+ // BLAS operations
291
+ axpy: typeof axpy;
292
+
293
+ // Vector arithmetic
294
+ dot: typeof dot;
295
+ sum: typeof sum;
296
+ mean: typeof mean;
297
+ vadd: typeof vadd;
298
+ vsub: typeof vsub;
299
+ vmul: typeof vmul;
300
+ vdiv: typeof vdiv;
301
+
302
+ // Vector functions
303
+ vabs: typeof vabs;
304
+ vsquare: typeof vsquare;
305
+ vsqrt: typeof vsqrt;
306
+ normalize: typeof normalize;
307
+
308
+ // Reductions
309
+ max: typeof max;
310
+ min: typeof min;
311
+ rms: typeof rms;
312
+
313
+ // Distance metrics
314
+ euclidean: typeof euclidean;
315
+
316
+ // Signal processing
317
+ fft: typeof fft;
318
+ };
319
+
320
+ export default accelerate;
package/index.js ADDED
@@ -0,0 +1,392 @@
1
+ /**
2
+ * Apple Accelerate Framework for Node.js
3
+ *
4
+ * High-performance BLAS/vDSP operations optimized for M4 Max
5
+ *
6
+ * Usage:
7
+ * const accelerate = require('accelerate-m4');
8
+ *
9
+ * // Matrix multiplication (100x faster than JS for large matrices)
10
+ * const A = new Float64Array(M * K);
11
+ * const B = new Float64Array(K * N);
12
+ * const C = new Float64Array(M * N);
13
+ * accelerate.matmul(A, B, C, M, K, N);
14
+ *
15
+ * // Vector operations
16
+ * const result = accelerate.dot(vec1, vec2);
17
+ * accelerate.vadd(a, b, c); // c = a + b
18
+ * accelerate.vmul(a, b, c); // c = a * b (element-wise)
19
+ * accelerate.vscale(a, 2.0, b); // b = a * 2.0
20
+ *
21
+ * // Reductions
22
+ * const sum = accelerate.sum(vec);
23
+ * const mean = accelerate.mean(vec);
24
+ * const max = accelerate.max(vec);
25
+ * const min = accelerate.min(vec);
26
+ *
27
+ * // FFT (input length must be power of 2)
28
+ * const spectrum = accelerate.fft(signal);
29
+ */
30
+
31
+ const path = require('path');
32
+ const os = require('os');
33
+
34
+ // Platform validation
35
+ function validatePlatform() {
36
+ const platform = process.platform;
37
+ const arch = process.arch;
38
+
39
+ if (platform !== 'darwin') {
40
+ const message = platform === 'linux' && arch === 'arm64'
41
+ ? `node-accelerate requires macOS, but detected Linux ARM64.\n` +
42
+ `This package uses Apple's Accelerate framework which is only available on macOS.\n` +
43
+ `Linux ARM64 systems (like Raspberry Pi, AWS Graviton) are not supported.\n` +
44
+ `\n` +
45
+ `For Linux ARM64, consider using:\n` +
46
+ ` - OpenBLAS: https://www.openblas.net/\n` +
47
+ ` - Eigen: https://eigen.tuxfamily.org/\n` +
48
+ ` - BLIS: https://github.com/flame/blis`
49
+ : `node-accelerate requires macOS (darwin), but detected ${platform}.\n` +
50
+ `This package uses Apple's Accelerate framework which is only available on macOS.\n` +
51
+ `Supported platforms: macOS only (Apple Silicon or Intel)`;
52
+
53
+ throw new Error(message);
54
+ }
55
+
56
+ if (arch !== 'arm64' && arch !== 'x64') {
57
+ throw new Error(
58
+ `node-accelerate requires ARM64 or x64 architecture, but detected ${arch}.\n` +
59
+ `Supported architectures:\n` +
60
+ ` - arm64 (Apple Silicon: M1/M2/M3/M4)\n` +
61
+ ` - x64 (Intel Macs)\n` +
62
+ `\n` +
63
+ `Your architecture (${arch}) is not supported.`
64
+ );
65
+ }
66
+
67
+ // Check for Accelerate framework (should always be present on macOS)
68
+ const fs = require('fs');
69
+ const acceleratePath = '/System/Library/Frameworks/Accelerate.framework';
70
+ if (!fs.existsSync(acceleratePath)) {
71
+ throw new Error(
72
+ `Apple Accelerate framework not found at ${acceleratePath}.\n` +
73
+ `This is unusual for macOS. Please ensure you're running on a standard macOS system.`
74
+ );
75
+ }
76
+ }
77
+
78
+ // Validate platform before loading native module
79
+ validatePlatform();
80
+
81
+ let accelerate;
82
+ try {
83
+ accelerate = require('./build/Release/accelerate.node');
84
+ } catch (e) {
85
+ // Try debug build
86
+ try {
87
+ accelerate = require('./build/Debug/accelerate.node');
88
+ } catch (e2) {
89
+ throw new Error(
90
+ 'Failed to load node-accelerate native module.\n' +
91
+ 'This usually means the module needs to be built.\n\n' +
92
+ 'To fix this, run: npm rebuild node-accelerate\n\n' +
93
+ 'Requirements:\n' +
94
+ ' - macOS (Apple Silicon or Intel)\n' +
95
+ ' - Xcode Command Line Tools (run: xcode-select --install)\n' +
96
+ ' - Node.js >= 18.0.0\n\n' +
97
+ 'Original error: ' + e.message
98
+ );
99
+ }
100
+ }
101
+
102
+ /**
103
+ * Matrix multiplication: C = A * B
104
+ * @param {Float64Array} A - Matrix A (M x K, row-major)
105
+ * @param {Float64Array} B - Matrix B (K x N, row-major)
106
+ * @param {Float64Array} C - Output matrix C (M x N, row-major)
107
+ * @param {number} M - Rows in A
108
+ * @param {number} K - Columns in A / Rows in B
109
+ * @param {number} N - Columns in B
110
+ * @returns {Float64Array} C
111
+ */
112
+ function matmul(A, B, C, M, K, N) {
113
+ if (!(A instanceof Float64Array) || !(B instanceof Float64Array) || !(C instanceof Float64Array)) {
114
+ throw new TypeError('Arguments must be Float64Arrays');
115
+ }
116
+ if (A.length !== M * K) throw new RangeError(`A must have ${M * K} elements`);
117
+ if (B.length !== K * N) throw new RangeError(`B must have ${K * N} elements`);
118
+ if (C.length !== M * N) throw new RangeError(`C must have ${M * N} elements`);
119
+
120
+ return accelerate.matmul(A, B, C, M, K, N);
121
+ }
122
+
123
+ /**
124
+ * Matrix multiplication (single precision): C = A * B
125
+ * @param {Float32Array} A - Matrix A (M x K, row-major)
126
+ * @param {Float32Array} B - Matrix B (K x N, row-major)
127
+ * @param {Float32Array} C - Output matrix C (M x N, row-major)
128
+ * @param {number} M - Rows in A
129
+ * @param {number} K - Columns in A / Rows in B
130
+ * @param {number} N - Columns in B
131
+ * @returns {Float32Array} C
132
+ */
133
+ function matmulFloat(A, B, C, M, K, N) {
134
+ if (!(A instanceof Float32Array) || !(B instanceof Float32Array) || !(C instanceof Float32Array)) {
135
+ throw new TypeError('Arguments must be Float32Arrays');
136
+ }
137
+ return accelerate.matmulFloat(A, B, C, M, K, N);
138
+ }
139
+
140
+ /**
141
+ * Dot product of two vectors
142
+ * @param {Float64Array} a
143
+ * @param {Float64Array} b
144
+ * @returns {number}
145
+ */
146
+ function dot(a, b) {
147
+ if (!(a instanceof Float64Array) || !(b instanceof Float64Array)) {
148
+ throw new TypeError('Arguments must be Float64Arrays');
149
+ }
150
+ return accelerate.dot(a, b);
151
+ }
152
+
153
+ /**
154
+ * Vector addition: c = a + b
155
+ * @param {Float64Array} a
156
+ * @param {Float64Array} b
157
+ * @param {Float64Array} c - Output
158
+ * @returns {Float64Array} c
159
+ */
160
+ function vadd(a, b, c) {
161
+ return accelerate.vadd(a, b, c);
162
+ }
163
+
164
+ /**
165
+ * Vector subtraction: c = a - b
166
+ * @param {Float64Array} a
167
+ * @param {Float64Array} b
168
+ * @param {Float64Array} c - Output
169
+ * @returns {Float64Array} c
170
+ */
171
+ function vsub(a, b, c) {
172
+ return accelerate.vsub(a, b, c);
173
+ }
174
+
175
+ /**
176
+ * Element-wise vector multiplication: c = a * b
177
+ * @param {Float64Array} a
178
+ * @param {Float64Array} b
179
+ * @param {Float64Array} c - Output
180
+ * @returns {Float64Array} c
181
+ */
182
+ function vmul(a, b, c) {
183
+ return accelerate.vmul(a, b, c);
184
+ }
185
+
186
+ /**
187
+ * Element-wise vector division: c = a / b
188
+ * @param {Float64Array} a
189
+ * @param {Float64Array} b
190
+ * @param {Float64Array} c - Output
191
+ * @returns {Float64Array} c
192
+ */
193
+ function vdiv(a, b, c) {
194
+ return accelerate.vdiv(a, b, c);
195
+ }
196
+
197
+ /**
198
+ * Vector scaling: b = a * scalar
199
+ * @param {Float64Array} a
200
+ * @param {number} scalar
201
+ * @param {Float64Array} b - Output
202
+ * @returns {Float64Array} b
203
+ */
204
+ function vscale(a, scalar, b) {
205
+ return accelerate.vscale(a, scalar, b);
206
+ }
207
+
208
+ /**
209
+ * Sum of vector elements
210
+ * @param {Float64Array} a
211
+ * @returns {number}
212
+ */
213
+ function sum(a) {
214
+ return accelerate.sum(a);
215
+ }
216
+
217
+ /**
218
+ * Mean of vector elements
219
+ * @param {Float64Array} a
220
+ * @returns {number}
221
+ */
222
+ function mean(a) {
223
+ return accelerate.mean(a);
224
+ }
225
+
226
+ /**
227
+ * Maximum element in vector
228
+ * @param {Float64Array} a
229
+ * @returns {number}
230
+ */
231
+ function max(a) {
232
+ return accelerate.max(a);
233
+ }
234
+
235
+ /**
236
+ * Minimum element in vector
237
+ * @param {Float64Array} a
238
+ * @returns {number}
239
+ */
240
+ function min(a) {
241
+ return accelerate.min(a);
242
+ }
243
+
244
+ /**
245
+ * Fast Fourier Transform
246
+ * @param {Float64Array} input - Real input (length must be power of 2)
247
+ * @returns {{real: Float64Array, imag: Float64Array}} Complex output
248
+ */
249
+ function fft(input) {
250
+ if (!(input instanceof Float64Array)) {
251
+ throw new TypeError('Input must be Float64Array');
252
+ }
253
+ const len = input.length;
254
+ if ((len & (len - 1)) !== 0) {
255
+ throw new RangeError('Input length must be a power of 2');
256
+ }
257
+
258
+ const interleaved = accelerate.fft(input);
259
+ const half = len / 2;
260
+ const real = new Float64Array(half);
261
+ const imag = new Float64Array(half);
262
+
263
+ for (let i = 0; i < half; i++) {
264
+ real[i] = interleaved[i * 2];
265
+ imag[i] = interleaved[i * 2 + 1];
266
+ }
267
+
268
+ return { real, imag };
269
+ }
270
+
271
+ /**
272
+ * Matrix-vector multiplication: y = A * x
273
+ * @param {Float64Array} A - Matrix (M × N, row-major)
274
+ * @param {Float64Array} x - Vector (N elements)
275
+ * @param {Float64Array} y - Output vector (M elements)
276
+ * @param {number} M - Rows in A
277
+ * @param {number} N - Columns in A
278
+ * @returns {Float64Array} y
279
+ */
280
+ function matvec(A, x, y, M, N) {
281
+ return accelerate.matvec(A, x, y, M, N);
282
+ }
283
+
284
+ /**
285
+ * AXPY operation: y = alpha*x + y
286
+ * @param {number} alpha - Scalar multiplier
287
+ * @param {Float64Array} x - Input vector
288
+ * @param {Float64Array} y - Input/output vector
289
+ * @returns {Float64Array} y
290
+ */
291
+ function axpy(alpha, x, y) {
292
+ return accelerate.axpy(alpha, x, y);
293
+ }
294
+
295
+ /**
296
+ * Vector absolute value: b = |a|
297
+ * @param {Float64Array} a - Input vector
298
+ * @param {Float64Array} b - Output vector
299
+ * @returns {Float64Array} b
300
+ */
301
+ function vabs(a, b) {
302
+ return accelerate.vabs(a, b);
303
+ }
304
+
305
+ /**
306
+ * Vector square: b = a^2
307
+ * @param {Float64Array} a - Input vector
308
+ * @param {Float64Array} b - Output vector
309
+ * @returns {Float64Array} b
310
+ */
311
+ function vsquare(a, b) {
312
+ return accelerate.vsquare(a, b);
313
+ }
314
+
315
+ /**
316
+ * Vector square root: b = sqrt(a)
317
+ * @param {Float64Array} a - Input vector
318
+ * @param {Float64Array} b - Output vector
319
+ * @returns {Float64Array} b
320
+ */
321
+ function vsqrt(a, b) {
322
+ return accelerate.vsqrt(a, b);
323
+ }
324
+
325
+ /**
326
+ * Normalize vector to unit length
327
+ * @param {Float64Array} a - Input vector
328
+ * @param {Float64Array} b - Output vector (unit vector)
329
+ * @returns {Float64Array} b
330
+ */
331
+ function normalize(a, b) {
332
+ return accelerate.normalize(a, b);
333
+ }
334
+
335
+ /**
336
+ * Euclidean distance between two vectors
337
+ * @param {Float64Array} a - First vector
338
+ * @param {Float64Array} b - Second vector
339
+ * @returns {number} Distance
340
+ */
341
+ function euclidean(a, b) {
342
+ return accelerate.euclidean(a, b);
343
+ }
344
+
345
+ /**
346
+ * Root Mean Square of vector
347
+ * @param {Float64Array} a - Input vector
348
+ * @returns {number} RMS value
349
+ */
350
+ function rms(a) {
351
+ return accelerate.rms(a);
352
+ }
353
+
354
+ module.exports = {
355
+ // Matrix operations
356
+ matmul,
357
+ matmulFloat,
358
+ matvec,
359
+
360
+ // BLAS operations
361
+ axpy,
362
+
363
+ // Vector arithmetic
364
+ dot,
365
+ vadd,
366
+ vsub,
367
+ vmul,
368
+ vdiv,
369
+ vscale,
370
+
371
+ // Vector functions
372
+ vabs,
373
+ vsquare,
374
+ vsqrt,
375
+ normalize,
376
+
377
+ // Reductions
378
+ sum,
379
+ mean,
380
+ max,
381
+ min,
382
+ rms,
383
+
384
+ // Distance metrics
385
+ euclidean,
386
+
387
+ // Signal processing
388
+ fft,
389
+
390
+ // Raw native bindings (for advanced use)
391
+ _native: accelerate
392
+ };