@danielsimonjr/mathts-parallel 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +993 -0
- package/dist/index.js +953 -0
- package/package.json +57 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,993 @@
|
|
|
1
|
+
import { TaskOptions, PoolStats as PoolStats$1, MathWorkerPool } from '@danielsimonjr/mathts-workerpool';
|
|
2
|
+
export { Transfer } from '@danielsimonjr/mathts-workerpool';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* MathTS Compute Pool
|
|
6
|
+
*
|
|
7
|
+
* High-level wrapper around @danielsimonjr/mathts-workerpool for parallel computation in MathTS.
|
|
8
|
+
* Provides automatic parallelization of matrix operations based on data size.
|
|
9
|
+
*
|
|
10
|
+
* @packageDocumentation
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Configuration for ComputePool
|
|
15
|
+
* Extends the base WorkerPoolConfig with MathTS-specific options
|
|
16
|
+
*/
|
|
17
|
+
interface ComputePoolConfig {
|
|
18
|
+
/** Enable parallel processing */
|
|
19
|
+
enabled: boolean;
|
|
20
|
+
/** Minimum number of workers to maintain */
|
|
21
|
+
minWorkers: number;
|
|
22
|
+
/** Maximum number of workers */
|
|
23
|
+
maxWorkers: number;
|
|
24
|
+
/** Minimum elements before parallelizing */
|
|
25
|
+
thresholdElements: number;
|
|
26
|
+
/** Elements per chunk for parallel operations */
|
|
27
|
+
chunkSize: number;
|
|
28
|
+
/** Worker type: 'auto' | 'web' | 'thread' */
|
|
29
|
+
workerType: 'auto' | 'web' | 'thread';
|
|
30
|
+
/** Worker idle timeout in milliseconds */
|
|
31
|
+
workerIdleTimeout: number;
|
|
32
|
+
/** Default task timeout in milliseconds */
|
|
33
|
+
taskTimeout: number;
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Default ComputePool configuration
|
|
37
|
+
*/
|
|
38
|
+
declare const DEFAULT_POOL_CONFIG: ComputePoolConfig;
|
|
39
|
+
/**
|
|
40
|
+
* Result of a parallel operation
|
|
41
|
+
*/
|
|
42
|
+
interface ParallelResult<T> {
|
|
43
|
+
/** The computed result */
|
|
44
|
+
result: T;
|
|
45
|
+
/** Time taken in milliseconds */
|
|
46
|
+
duration: number;
|
|
47
|
+
/** Number of chunks processed */
|
|
48
|
+
chunks: number;
|
|
49
|
+
/** Whether parallelization was used */
|
|
50
|
+
parallelized: boolean;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* ComputePool for parallel MathTS operations
|
|
54
|
+
*
|
|
55
|
+
* Wraps the @danielsimonjr/mathts-workerpool MathWorkerPool with a MathTS-specific API.
|
|
56
|
+
*
|
|
57
|
+
* @example
|
|
58
|
+
* ```typescript
|
|
59
|
+
* import { ComputePool } from '@danielsimonjr/mathts-parallel';
|
|
60
|
+
*
|
|
61
|
+
* const pool = new ComputePool({ maxWorkers: 8 });
|
|
62
|
+
* await pool.initialize();
|
|
63
|
+
*
|
|
64
|
+
* // Parallel matrix multiplication
|
|
65
|
+
* const result = await pool.matmul(matrixA, aRows, aCols, matrixB, bCols);
|
|
66
|
+
*
|
|
67
|
+
* // Parallel element-wise operation
|
|
68
|
+
* const sum = await pool.elementwise(a, b, 'add');
|
|
69
|
+
*
|
|
70
|
+
* // Cleanup
|
|
71
|
+
* await pool.terminate();
|
|
72
|
+
* ```
|
|
73
|
+
*/
|
|
74
|
+
declare class ComputePool {
|
|
75
|
+
private workerPool;
|
|
76
|
+
private config;
|
|
77
|
+
constructor(config?: Partial<ComputePoolConfig>);
|
|
78
|
+
/**
|
|
79
|
+
* Initialize the worker pool
|
|
80
|
+
*/
|
|
81
|
+
initialize(): Promise<void>;
|
|
82
|
+
/**
|
|
83
|
+
* Check if pool is ready
|
|
84
|
+
*/
|
|
85
|
+
isReady(): boolean;
|
|
86
|
+
/**
|
|
87
|
+
* Determine if operation should be parallelized
|
|
88
|
+
*/
|
|
89
|
+
shouldParallelize(elementCount: number): boolean;
|
|
90
|
+
/**
|
|
91
|
+
* Execute a method in the worker pool
|
|
92
|
+
*/
|
|
93
|
+
exec<T>(method: string, params: unknown[], options?: TaskOptions): Promise<T>;
|
|
94
|
+
/**
|
|
95
|
+
* Get pool statistics
|
|
96
|
+
*/
|
|
97
|
+
stats(): PoolStats$1;
|
|
98
|
+
/**
|
|
99
|
+
* Parallel sum of array elements
|
|
100
|
+
*/
|
|
101
|
+
sum(data: Float64Array): Promise<ParallelResult<number>>;
|
|
102
|
+
/**
|
|
103
|
+
* Parallel dot product
|
|
104
|
+
*/
|
|
105
|
+
dot(a: Float64Array, b: Float64Array): Promise<ParallelResult<number>>;
|
|
106
|
+
/**
|
|
107
|
+
* Parallel element-wise operation
|
|
108
|
+
*/
|
|
109
|
+
elementwise(a: Float64Array, b: Float64Array, op: 'add' | 'subtract' | 'multiply' | 'divide'): Promise<ParallelResult<Float64Array>>;
|
|
110
|
+
/**
|
|
111
|
+
* Parallel scale operation
|
|
112
|
+
*/
|
|
113
|
+
scale(data: Float64Array, scalar: number): Promise<ParallelResult<Float64Array>>;
|
|
114
|
+
/**
|
|
115
|
+
* Parallel matrix multiplication
|
|
116
|
+
*
|
|
117
|
+
* @param a - First matrix as flat Float64Array (row-major)
|
|
118
|
+
* @param aRows - Number of rows in A
|
|
119
|
+
* @param aCols - Number of columns in A
|
|
120
|
+
* @param b - Second matrix as flat Float64Array (row-major)
|
|
121
|
+
* @param bCols - Number of columns in B
|
|
122
|
+
*/
|
|
123
|
+
matmul(a: Float64Array, aRows: number, aCols: number, b: Float64Array, bCols: number): Promise<ParallelResult<Float64Array>>;
|
|
124
|
+
/**
|
|
125
|
+
* Parallel matrix transpose
|
|
126
|
+
*/
|
|
127
|
+
transpose(data: Float64Array, rows: number, cols: number): Promise<ParallelResult<Float64Array>>;
|
|
128
|
+
/**
|
|
129
|
+
* Parallel map operation
|
|
130
|
+
*/
|
|
131
|
+
map<T, R>(data: T[], fn: (item: T) => R): Promise<ParallelResult<R[]>>;
|
|
132
|
+
/**
|
|
133
|
+
* Parallel reduce operation
|
|
134
|
+
*/
|
|
135
|
+
reduce<T, R>(data: T[], fn: (acc: R, item: T) => R, initial: R): Promise<ParallelResult<R>>;
|
|
136
|
+
/**
|
|
137
|
+
* Parallel filter operation
|
|
138
|
+
*/
|
|
139
|
+
filter<T>(data: T[], predicate: (item: T) => boolean): Promise<ParallelResult<T[]>>;
|
|
140
|
+
/**
|
|
141
|
+
* Find min and max values in parallel
|
|
142
|
+
*/
|
|
143
|
+
minMax(data: Float64Array): Promise<ParallelResult<{
|
|
144
|
+
min: number;
|
|
145
|
+
max: number;
|
|
146
|
+
minIdx: number;
|
|
147
|
+
maxIdx: number;
|
|
148
|
+
}>>;
|
|
149
|
+
/**
|
|
150
|
+
* Compute variance, mean, and standard deviation in parallel
|
|
151
|
+
*/
|
|
152
|
+
variance(data: Float64Array): Promise<ParallelResult<{
|
|
153
|
+
mean: number;
|
|
154
|
+
variance: number;
|
|
155
|
+
std: number;
|
|
156
|
+
}>>;
|
|
157
|
+
/**
|
|
158
|
+
* Compute norm (Euclidean length) in parallel
|
|
159
|
+
*/
|
|
160
|
+
norm(data: Float64Array): Promise<ParallelResult<number>>;
|
|
161
|
+
/**
|
|
162
|
+
* Compute Euclidean distance in parallel
|
|
163
|
+
*/
|
|
164
|
+
distance(a: Float64Array, b: Float64Array): Promise<ParallelResult<number>>;
|
|
165
|
+
/**
|
|
166
|
+
* Compute histogram in parallel
|
|
167
|
+
*/
|
|
168
|
+
histogram(data: Float64Array, bins: number, min?: number, max?: number): Promise<ParallelResult<number[]>>;
|
|
169
|
+
/**
|
|
170
|
+
* Apply unary function in parallel
|
|
171
|
+
*/
|
|
172
|
+
unary(data: Float64Array, fn: 'abs' | 'sqrt' | 'exp' | 'log' | 'sin' | 'cos' | 'tan' | 'negate' | 'square'): Promise<ParallelResult<Float64Array>>;
|
|
173
|
+
/**
|
|
174
|
+
* Parallel absolute value
|
|
175
|
+
*/
|
|
176
|
+
abs(data: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
177
|
+
/**
|
|
178
|
+
* Parallel square root
|
|
179
|
+
*/
|
|
180
|
+
sqrt(data: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
181
|
+
/**
|
|
182
|
+
* Parallel exponential
|
|
183
|
+
*/
|
|
184
|
+
exp(data: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
185
|
+
/**
|
|
186
|
+
* Parallel natural log
|
|
187
|
+
*/
|
|
188
|
+
log(data: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
189
|
+
/**
|
|
190
|
+
* Parallel sine
|
|
191
|
+
*/
|
|
192
|
+
sin(data: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
193
|
+
/**
|
|
194
|
+
* Parallel cosine
|
|
195
|
+
*/
|
|
196
|
+
cos(data: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
197
|
+
/**
|
|
198
|
+
* Parallel tangent
|
|
199
|
+
*/
|
|
200
|
+
tan(data: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
201
|
+
/**
|
|
202
|
+
* Parallel negation
|
|
203
|
+
*/
|
|
204
|
+
negate(data: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
205
|
+
/**
|
|
206
|
+
* Parallel square
|
|
207
|
+
*/
|
|
208
|
+
square(data: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
209
|
+
/**
|
|
210
|
+
* Parallel matrix-vector multiplication
|
|
211
|
+
*/
|
|
212
|
+
matvec(matrix: Float64Array, rows: number, cols: number, vector: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
213
|
+
/**
|
|
214
|
+
* Parallel outer product
|
|
215
|
+
*/
|
|
216
|
+
outer(a: Float64Array, b: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
217
|
+
/**
|
|
218
|
+
* Parallel find operation
|
|
219
|
+
*/
|
|
220
|
+
find<T>(data: T[], predicate: (item: T) => boolean): Promise<ParallelResult<{
|
|
221
|
+
found: boolean;
|
|
222
|
+
value?: T;
|
|
223
|
+
index?: number;
|
|
224
|
+
}>>;
|
|
225
|
+
/**
|
|
226
|
+
* Parallel sort operation
|
|
227
|
+
*/
|
|
228
|
+
sort<T>(data: T[], compare?: (a: T, b: T) => number): Promise<ParallelResult<T[]>>;
|
|
229
|
+
/**
|
|
230
|
+
* Parallel addition of two arrays
|
|
231
|
+
*/
|
|
232
|
+
add(a: Float64Array, b: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
233
|
+
/**
|
|
234
|
+
* Parallel subtraction of two arrays
|
|
235
|
+
*/
|
|
236
|
+
subtract(a: Float64Array, b: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
237
|
+
/**
|
|
238
|
+
* Parallel element-wise multiplication
|
|
239
|
+
*/
|
|
240
|
+
multiply(a: Float64Array, b: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
241
|
+
/**
|
|
242
|
+
* Parallel element-wise division
|
|
243
|
+
*/
|
|
244
|
+
divide(a: Float64Array, b: Float64Array): Promise<ParallelResult<Float64Array>>;
|
|
245
|
+
/**
|
|
246
|
+
* Compute mean in parallel (uses variance internally)
|
|
247
|
+
*/
|
|
248
|
+
mean(data: Float64Array): Promise<ParallelResult<number>>;
|
|
249
|
+
/**
|
|
250
|
+
* Compute standard deviation in parallel
|
|
251
|
+
*/
|
|
252
|
+
std(data: Float64Array): Promise<ParallelResult<number>>;
|
|
253
|
+
/**
|
|
254
|
+
* Find minimum value in parallel
|
|
255
|
+
*/
|
|
256
|
+
min(data: Float64Array): Promise<ParallelResult<number>>;
|
|
257
|
+
/**
|
|
258
|
+
* Find maximum value in parallel
|
|
259
|
+
*/
|
|
260
|
+
max(data: Float64Array): Promise<ParallelResult<number>>;
|
|
261
|
+
/**
|
|
262
|
+
* Terminate the worker pool
|
|
263
|
+
*/
|
|
264
|
+
terminate(force?: boolean): Promise<void>;
|
|
265
|
+
/**
|
|
266
|
+
* Update configuration
|
|
267
|
+
*/
|
|
268
|
+
updateConfig(config: Partial<ComputePoolConfig>): void;
|
|
269
|
+
/**
|
|
270
|
+
* Get current configuration
|
|
271
|
+
*/
|
|
272
|
+
getConfig(): ComputePoolConfig;
|
|
273
|
+
/**
|
|
274
|
+
* Get the underlying MathWorkerPool for advanced operations
|
|
275
|
+
*/
|
|
276
|
+
getWorkerPool(): MathWorkerPool;
|
|
277
|
+
}
|
|
278
|
+
/**
|
|
279
|
+
* Global compute pool instance
|
|
280
|
+
*/
|
|
281
|
+
declare const computePool: ComputePool;
|
|
282
|
+
|
|
283
|
+
/**
|
|
284
|
+
* Parallel Matrix Multiplication
|
|
285
|
+
*
|
|
286
|
+
* High-performance parallel matrix multiplication with automatic chunking.
|
|
287
|
+
* Distributes rows across workers for parallel computation.
|
|
288
|
+
*
|
|
289
|
+
* @packageDocumentation
|
|
290
|
+
*/
|
|
291
|
+
|
|
292
|
+
/**
|
|
293
|
+
* Options for parallel matrix multiplication
|
|
294
|
+
*/
|
|
295
|
+
interface MatmulOptions {
|
|
296
|
+
/** Custom pool to use (defaults to global computePool) */
|
|
297
|
+
pool?: ComputePool;
|
|
298
|
+
/** Force parallel execution regardless of threshold */
|
|
299
|
+
forceParallel?: boolean;
|
|
300
|
+
/** Force sequential execution regardless of threshold */
|
|
301
|
+
forceSequential?: boolean;
|
|
302
|
+
}
|
|
303
|
+
/**
|
|
304
|
+
* Parallel matrix multiplication (C = A × B)
|
|
305
|
+
*
|
|
306
|
+
* Multiplies two matrices using parallel workers. The computation
|
|
307
|
+
* distributes rows of matrix A across available workers.
|
|
308
|
+
*
|
|
309
|
+
* @param a - First matrix as flat Float64Array (row-major order)
|
|
310
|
+
* @param aRows - Number of rows in matrix A
|
|
311
|
+
* @param aCols - Number of columns in matrix A (must equal bRows)
|
|
312
|
+
* @param b - Second matrix as flat Float64Array (row-major order)
|
|
313
|
+
* @param bCols - Number of columns in matrix B
|
|
314
|
+
* @param options - Optional configuration
|
|
315
|
+
* @returns Result matrix C (aRows × bCols) with parallel execution metadata
|
|
316
|
+
*
|
|
317
|
+
* @example
|
|
318
|
+
* ```typescript
|
|
319
|
+
* // 2x3 matrix * 3x2 matrix = 2x2 matrix
|
|
320
|
+
* const A = new Float64Array([1, 2, 3, 4, 5, 6]);
|
|
321
|
+
* const B = new Float64Array([7, 8, 9, 10, 11, 12]);
|
|
322
|
+
*
|
|
323
|
+
* const result = await parallelMatmul(A, 2, 3, B, 2);
|
|
324
|
+
* console.log(result.result); // 2x2 result matrix
|
|
325
|
+
* console.log(result.parallelized); // true if workers were used
|
|
326
|
+
* ```
|
|
327
|
+
*/
|
|
328
|
+
declare function parallelMatmul(a: Float64Array, aRows: number, aCols: number, b: Float64Array, bCols: number, options?: MatmulOptions): Promise<ParallelResult<Float64Array>>;
|
|
329
|
+
/**
|
|
330
|
+
* Parallel matrix-vector multiplication (y = A × x)
|
|
331
|
+
*
|
|
332
|
+
* Multiplies a matrix by a vector using parallel workers.
|
|
333
|
+
*
|
|
334
|
+
* @param matrix - Matrix as flat Float64Array (row-major order)
|
|
335
|
+
* @param rows - Number of rows in the matrix
|
|
336
|
+
* @param cols - Number of columns in the matrix (must equal vector length)
|
|
337
|
+
* @param vector - Vector as Float64Array
|
|
338
|
+
* @param options - Optional configuration
|
|
339
|
+
* @returns Result vector with parallel execution metadata
|
|
340
|
+
*/
|
|
341
|
+
declare function parallelMatvec(matrix: Float64Array, rows: number, cols: number, vector: Float64Array, options?: MatmulOptions): Promise<ParallelResult<Float64Array>>;
|
|
342
|
+
/**
|
|
343
|
+
* Parallel matrix transpose
|
|
344
|
+
*
|
|
345
|
+
* Transposes a matrix using parallel workers.
|
|
346
|
+
*
|
|
347
|
+
* @param data - Matrix as flat Float64Array (row-major order)
|
|
348
|
+
* @param rows - Number of rows in the original matrix
|
|
349
|
+
* @param cols - Number of columns in the original matrix
|
|
350
|
+
* @param options - Optional configuration
|
|
351
|
+
* @returns Transposed matrix (cols × rows) with parallel execution metadata
|
|
352
|
+
*/
|
|
353
|
+
declare function parallelTranspose(data: Float64Array, rows: number, cols: number, options?: MatmulOptions): Promise<ParallelResult<Float64Array>>;
|
|
354
|
+
/**
|
|
355
|
+
* Parallel outer product (C = a ⊗ b)
|
|
356
|
+
*
|
|
357
|
+
* Computes the outer product of two vectors.
|
|
358
|
+
*
|
|
359
|
+
* @param a - First vector
|
|
360
|
+
* @param b - Second vector
|
|
361
|
+
* @param options - Optional configuration
|
|
362
|
+
* @returns Outer product matrix (a.length × b.length)
|
|
363
|
+
*/
|
|
364
|
+
declare function parallelOuter(a: Float64Array, b: Float64Array, options?: MatmulOptions): Promise<ParallelResult<Float64Array>>;
|
|
365
|
+
/**
|
|
366
|
+
* Parallel dot product (scalar = a · b)
|
|
367
|
+
*
|
|
368
|
+
* Computes the dot product of two vectors.
|
|
369
|
+
*
|
|
370
|
+
* @param a - First vector
|
|
371
|
+
* @param b - Second vector (must have same length as a)
|
|
372
|
+
* @param options - Optional configuration
|
|
373
|
+
* @returns Dot product scalar
|
|
374
|
+
*/
|
|
375
|
+
declare function parallelDot(a: Float64Array, b: Float64Array, options?: MatmulOptions): Promise<ParallelResult<number>>;
|
|
376
|
+
|
|
377
|
+
/**
|
|
378
|
+
* Parallel Element-wise Operations
|
|
379
|
+
*
|
|
380
|
+
* High-performance parallel element-wise operations on arrays.
|
|
381
|
+
* Automatically chunks data for distribution across workers.
|
|
382
|
+
*
|
|
383
|
+
* @packageDocumentation
|
|
384
|
+
*/
|
|
385
|
+
|
|
386
|
+
/**
|
|
387
|
+
* Options for parallel element-wise operations
|
|
388
|
+
*/
|
|
389
|
+
interface ElementwiseOptions {
|
|
390
|
+
/** Custom pool to use (defaults to global computePool) */
|
|
391
|
+
pool?: ComputePool;
|
|
392
|
+
/** Force parallel execution regardless of threshold */
|
|
393
|
+
forceParallel?: boolean;
|
|
394
|
+
/** Force sequential execution regardless of threshold */
|
|
395
|
+
forceSequential?: boolean;
|
|
396
|
+
}
|
|
397
|
+
/**
|
|
398
|
+
* Parallel element-wise addition (c[i] = a[i] + b[i])
|
|
399
|
+
*/
|
|
400
|
+
declare function parallelAdd(a: Float64Array, b: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
401
|
+
/**
|
|
402
|
+
* Parallel element-wise subtraction (c[i] = a[i] - b[i])
|
|
403
|
+
*/
|
|
404
|
+
declare function parallelSubtract(a: Float64Array, b: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
405
|
+
/**
|
|
406
|
+
* Parallel element-wise multiplication (c[i] = a[i] * b[i])
|
|
407
|
+
*/
|
|
408
|
+
declare function parallelMultiply(a: Float64Array, b: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
409
|
+
/**
|
|
410
|
+
* Parallel element-wise division (c[i] = a[i] / b[i])
|
|
411
|
+
*/
|
|
412
|
+
declare function parallelDivide(a: Float64Array, b: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
413
|
+
/**
|
|
414
|
+
* Parallel scalar multiplication (b[i] = a[i] * scalar)
|
|
415
|
+
*/
|
|
416
|
+
declare function parallelScale(data: Float64Array, scalar: number, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
417
|
+
/**
|
|
418
|
+
* Parallel absolute value (b[i] = |a[i]|)
|
|
419
|
+
*/
|
|
420
|
+
declare function parallelAbs(data: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
421
|
+
/**
|
|
422
|
+
* Parallel negation (b[i] = -a[i])
|
|
423
|
+
*/
|
|
424
|
+
declare function parallelNegate(data: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
425
|
+
/**
|
|
426
|
+
* Parallel square (b[i] = a[i]²)
|
|
427
|
+
*/
|
|
428
|
+
declare function parallelSquare(data: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
429
|
+
/**
|
|
430
|
+
* Parallel square root (b[i] = √a[i])
|
|
431
|
+
*/
|
|
432
|
+
declare function parallelSqrt(data: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
433
|
+
/**
|
|
434
|
+
* Parallel exponential (b[i] = e^a[i])
|
|
435
|
+
*/
|
|
436
|
+
declare function parallelExp(data: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
437
|
+
/**
|
|
438
|
+
* Parallel natural logarithm (b[i] = ln(a[i]))
|
|
439
|
+
*/
|
|
440
|
+
declare function parallelLog(data: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
441
|
+
/**
|
|
442
|
+
* Parallel sine (b[i] = sin(a[i]))
|
|
443
|
+
*/
|
|
444
|
+
declare function parallelSin(data: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
445
|
+
/**
|
|
446
|
+
* Parallel cosine (b[i] = cos(a[i]))
|
|
447
|
+
*/
|
|
448
|
+
declare function parallelCos(data: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
449
|
+
/**
|
|
450
|
+
* Parallel tangent (b[i] = tan(a[i]))
|
|
451
|
+
*/
|
|
452
|
+
declare function parallelTan(data: Float64Array, options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
453
|
+
/**
|
|
454
|
+
* Generic parallel element-wise operation
|
|
455
|
+
*/
|
|
456
|
+
declare function parallelElementwise(a: Float64Array, b: Float64Array, op: 'add' | 'subtract' | 'multiply' | 'divide', options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
457
|
+
/**
|
|
458
|
+
* Generic parallel unary operation
|
|
459
|
+
*/
|
|
460
|
+
declare function parallelUnary(data: Float64Array, fn: 'abs' | 'sqrt' | 'exp' | 'log' | 'sin' | 'cos' | 'tan' | 'negate' | 'square', options?: ElementwiseOptions): Promise<ParallelResult<Float64Array>>;
|
|
461
|
+
|
|
462
|
+
/**
|
|
463
|
+
* Parallel Reduction Operations
|
|
464
|
+
*
|
|
465
|
+
* High-performance parallel reduction operations (sum, min, max, etc.).
|
|
466
|
+
* Uses parallel aggregation with sequential final reduction.
|
|
467
|
+
*
|
|
468
|
+
* @packageDocumentation
|
|
469
|
+
*/
|
|
470
|
+
|
|
471
|
+
/**
|
|
472
|
+
* Options for parallel reduction operations
|
|
473
|
+
*/
|
|
474
|
+
interface ReduceOptions {
|
|
475
|
+
/** Custom pool to use (defaults to global computePool) */
|
|
476
|
+
pool?: ComputePool;
|
|
477
|
+
/** Force parallel execution regardless of threshold */
|
|
478
|
+
forceParallel?: boolean;
|
|
479
|
+
/** Force sequential execution regardless of threshold */
|
|
480
|
+
forceSequential?: boolean;
|
|
481
|
+
}
|
|
482
|
+
/**
|
|
483
|
+
* Parallel sum reduction
|
|
484
|
+
*
|
|
485
|
+
* Computes the sum of all elements using parallel partial sums.
|
|
486
|
+
*
|
|
487
|
+
* @param data - Array to sum
|
|
488
|
+
* @param options - Optional configuration
|
|
489
|
+
* @returns Sum of all elements
|
|
490
|
+
*/
|
|
491
|
+
declare function parallelSum(data: Float64Array, options?: ReduceOptions): Promise<ParallelResult<number>>;
|
|
492
|
+
/**
|
|
493
|
+
* Parallel mean computation
|
|
494
|
+
*
|
|
495
|
+
* @param data - Array to compute mean of
|
|
496
|
+
* @param options - Optional configuration
|
|
497
|
+
* @returns Mean value
|
|
498
|
+
*/
|
|
499
|
+
declare function parallelMean(data: Float64Array, options?: ReduceOptions): Promise<ParallelResult<number>>;
|
|
500
|
+
/**
|
|
501
|
+
* Parallel minimum value
|
|
502
|
+
*
|
|
503
|
+
* @param data - Array to find minimum of
|
|
504
|
+
* @param options - Optional configuration
|
|
505
|
+
* @returns Minimum value
|
|
506
|
+
*/
|
|
507
|
+
declare function parallelMin(data: Float64Array, options?: ReduceOptions): Promise<ParallelResult<number>>;
|
|
508
|
+
/**
|
|
509
|
+
* Parallel maximum value
|
|
510
|
+
*
|
|
511
|
+
* @param data - Array to find maximum of
|
|
512
|
+
* @param options - Optional configuration
|
|
513
|
+
* @returns Maximum value
|
|
514
|
+
*/
|
|
515
|
+
declare function parallelMax(data: Float64Array, options?: ReduceOptions): Promise<ParallelResult<number>>;
|
|
516
|
+
/**
|
|
517
|
+
* Parallel min/max with indices
|
|
518
|
+
*
|
|
519
|
+
* @param data - Array to analyze
|
|
520
|
+
* @param options - Optional configuration
|
|
521
|
+
* @returns Object with min, max, minIdx, maxIdx
|
|
522
|
+
*/
|
|
523
|
+
declare function parallelMinMax(data: Float64Array, options?: ReduceOptions): Promise<ParallelResult<{
|
|
524
|
+
min: number;
|
|
525
|
+
max: number;
|
|
526
|
+
minIdx: number;
|
|
527
|
+
maxIdx: number;
|
|
528
|
+
}>>;
|
|
529
|
+
/**
|
|
530
|
+
* Parallel variance computation
|
|
531
|
+
*
|
|
532
|
+
* Uses Welford's algorithm for numerically stable variance calculation.
|
|
533
|
+
*
|
|
534
|
+
* @param data - Array to compute variance of
|
|
535
|
+
* @param options - Optional configuration
|
|
536
|
+
* @returns Object with mean, variance, and std (standard deviation)
|
|
537
|
+
*/
|
|
538
|
+
declare function parallelVariance(data: Float64Array, options?: ReduceOptions): Promise<ParallelResult<{
|
|
539
|
+
mean: number;
|
|
540
|
+
variance: number;
|
|
541
|
+
std: number;
|
|
542
|
+
}>>;
|
|
543
|
+
/**
|
|
544
|
+
* Parallel standard deviation
|
|
545
|
+
*
|
|
546
|
+
* @param data - Array to compute std of
|
|
547
|
+
* @param options - Optional configuration
|
|
548
|
+
* @returns Standard deviation
|
|
549
|
+
*/
|
|
550
|
+
declare function parallelStd(data: Float64Array, options?: ReduceOptions): Promise<ParallelResult<number>>;
|
|
551
|
+
/**
|
|
552
|
+
* Parallel Euclidean norm (L2 norm)
|
|
553
|
+
*
|
|
554
|
+
* Computes √(Σx²) using parallel partial sums of squares.
|
|
555
|
+
*
|
|
556
|
+
* @param data - Vector to compute norm of
|
|
557
|
+
* @param options - Optional configuration
|
|
558
|
+
* @returns Euclidean norm
|
|
559
|
+
*/
|
|
560
|
+
declare function parallelNorm(data: Float64Array, options?: ReduceOptions): Promise<ParallelResult<number>>;
|
|
561
|
+
/**
|
|
562
|
+
* Parallel Euclidean distance
|
|
563
|
+
*
|
|
564
|
+
* Computes the Euclidean distance between two vectors.
|
|
565
|
+
*
|
|
566
|
+
* @param a - First vector
|
|
567
|
+
* @param b - Second vector
|
|
568
|
+
* @param options - Optional configuration
|
|
569
|
+
* @returns Euclidean distance
|
|
570
|
+
*/
|
|
571
|
+
declare function parallelDistance(a: Float64Array, b: Float64Array, options?: ReduceOptions): Promise<ParallelResult<number>>;
|
|
572
|
+
/**
|
|
573
|
+
* Parallel histogram computation
|
|
574
|
+
*
|
|
575
|
+
* Bins data into histogram using parallel counting.
|
|
576
|
+
*
|
|
577
|
+
* @param data - Array to bin
|
|
578
|
+
* @param bins - Number of bins
|
|
579
|
+
* @param min - Minimum value (auto-detected if not provided)
|
|
580
|
+
* @param max - Maximum value (auto-detected if not provided)
|
|
581
|
+
* @param options - Optional configuration
|
|
582
|
+
* @returns Array of bin counts
|
|
583
|
+
*/
|
|
584
|
+
declare function parallelHistogram(data: Float64Array, bins: number, min?: number, max?: number, options?: ReduceOptions): Promise<ParallelResult<number[]>>;
|
|
585
|
+
/**
|
|
586
|
+
* Generic parallel reduce operation
|
|
587
|
+
*
|
|
588
|
+
* Reduces an array using a custom function. Note that for truly
|
|
589
|
+
* associative operations, consider using specialized functions
|
|
590
|
+
* (parallelSum, parallelMin, etc.) for better performance.
|
|
591
|
+
*
|
|
592
|
+
* @param data - Array to reduce
|
|
593
|
+
* @param fn - Reduction function (acc, item) => newAcc
|
|
594
|
+
* @param initial - Initial accumulator value
|
|
595
|
+
* @param options - Optional configuration
|
|
596
|
+
* @returns Reduced value
|
|
597
|
+
*/
|
|
598
|
+
declare function parallelReduce<T, R>(data: T[], fn: (acc: R, item: T) => R, initial: R, options?: ReduceOptions): Promise<ParallelResult<R>>;
|
|
599
|
+
|
|
600
|
+
/**
|
|
601
|
+
* Parallel Map and Transform Operations
|
|
602
|
+
*
|
|
603
|
+
* High-performance parallel map, filter, find, and sort operations.
|
|
604
|
+
* Automatically chunks data for distribution across workers.
|
|
605
|
+
*
|
|
606
|
+
* @packageDocumentation
|
|
607
|
+
*/
|
|
608
|
+
|
|
609
|
+
/**
|
|
610
|
+
* Options for parallel map operations
|
|
611
|
+
*/
|
|
612
|
+
interface MapOptions {
|
|
613
|
+
/** Custom pool to use (defaults to global computePool) */
|
|
614
|
+
pool?: ComputePool;
|
|
615
|
+
/** Force parallel execution regardless of threshold */
|
|
616
|
+
forceParallel?: boolean;
|
|
617
|
+
/** Force sequential execution regardless of threshold */
|
|
618
|
+
forceSequential?: boolean;
|
|
619
|
+
}
|
|
620
|
+
/**
|
|
621
|
+
* Parallel map operation
|
|
622
|
+
*
|
|
623
|
+
* Applies a function to each element in parallel.
|
|
624
|
+
*
|
|
625
|
+
* @param data - Array to map over
|
|
626
|
+
* @param fn - Function to apply to each element
|
|
627
|
+
* @param options - Optional configuration
|
|
628
|
+
* @returns Mapped array
|
|
629
|
+
*
|
|
630
|
+
* @example
|
|
631
|
+
* ```typescript
|
|
632
|
+
* const data = [1, 2, 3, 4, 5];
|
|
633
|
+
* const result = await parallelMap(data, x => x * x);
|
|
634
|
+
* console.log(result.result); // [1, 4, 9, 16, 25]
|
|
635
|
+
* ```
|
|
636
|
+
*/
|
|
637
|
+
declare function parallelMap<T, R>(data: T[], fn: (item: T) => R, options?: MapOptions): Promise<ParallelResult<R[]>>;
|
|
638
|
+
/**
|
|
639
|
+
* Parallel filter operation
|
|
640
|
+
*
|
|
641
|
+
* Filters elements based on a predicate in parallel.
|
|
642
|
+
*
|
|
643
|
+
* @param data - Array to filter
|
|
644
|
+
* @param predicate - Function that returns true for elements to keep
|
|
645
|
+
* @param options - Optional configuration
|
|
646
|
+
* @returns Filtered array
|
|
647
|
+
*
|
|
648
|
+
* @example
|
|
649
|
+
* ```typescript
|
|
650
|
+
* const data = [1, 2, 3, 4, 5, 6];
|
|
651
|
+
* const result = await parallelFilter(data, x => x % 2 === 0);
|
|
652
|
+
* console.log(result.result); // [2, 4, 6]
|
|
653
|
+
* ```
|
|
654
|
+
*/
|
|
655
|
+
declare function parallelFilter<T>(data: T[], predicate: (item: T) => boolean, options?: MapOptions): Promise<ParallelResult<T[]>>;
|
|
656
|
+
/**
|
|
657
|
+
* Parallel find operation
|
|
658
|
+
*
|
|
659
|
+
* Finds the first element matching a predicate.
|
|
660
|
+
* Searches chunks in parallel for early termination.
|
|
661
|
+
*
|
|
662
|
+
* @param data - Array to search
|
|
663
|
+
* @param predicate - Function that returns true for the target element
|
|
664
|
+
* @param options - Optional configuration
|
|
665
|
+
* @returns Object with found status, value, and index
|
|
666
|
+
*
|
|
667
|
+
* @example
|
|
668
|
+
* ```typescript
|
|
669
|
+
* const data = [1, 2, 3, 4, 5];
|
|
670
|
+
* const result = await parallelFind(data, x => x > 3);
|
|
671
|
+
* console.log(result.result); // { found: true, value: 4, index: 3 }
|
|
672
|
+
* ```
|
|
673
|
+
*/
|
|
674
|
+
declare function parallelFind<T>(data: T[], predicate: (item: T) => boolean, options?: MapOptions): Promise<ParallelResult<{
|
|
675
|
+
found: boolean;
|
|
676
|
+
value?: T;
|
|
677
|
+
index?: number;
|
|
678
|
+
}>>;
|
|
679
|
+
/**
|
|
680
|
+
* Parallel sort operation
|
|
681
|
+
*
|
|
682
|
+
* Sorts an array using parallel merge sort.
|
|
683
|
+
* Each chunk is sorted in parallel, then merged.
|
|
684
|
+
*
|
|
685
|
+
* @param data - Array to sort
|
|
686
|
+
* @param compare - Optional comparison function (default: ascending)
|
|
687
|
+
* @param options - Optional configuration
|
|
688
|
+
* @returns Sorted array
|
|
689
|
+
*
|
|
690
|
+
* @example
|
|
691
|
+
* ```typescript
|
|
692
|
+
* const data = [3, 1, 4, 1, 5, 9, 2, 6];
|
|
693
|
+
* const result = await parallelSort(data);
|
|
694
|
+
* console.log(result.result); // [1, 1, 2, 3, 4, 5, 6, 9]
|
|
695
|
+
* ```
|
|
696
|
+
*/
|
|
697
|
+
declare function parallelSort<T>(data: T[], compare?: (a: T, b: T) => number, options?: MapOptions): Promise<ParallelResult<T[]>>;
|
|
698
|
+
/**
|
|
699
|
+
* Parallel forEach (for side effects)
|
|
700
|
+
*
|
|
701
|
+
* Note: This executes the function for side effects only.
|
|
702
|
+
* The function is serialized to workers, so it cannot access
|
|
703
|
+
* closures or external state.
|
|
704
|
+
*
|
|
705
|
+
* @param data - Array to iterate
|
|
706
|
+
* @param fn - Function to apply to each element
|
|
707
|
+
* @param options - Optional configuration
|
|
708
|
+
*/
|
|
709
|
+
declare function parallelForEach<T>(data: T[], fn: (item: T) => void, options?: MapOptions): Promise<ParallelResult<void[]>>;
|
|
710
|
+
/**
|
|
711
|
+
* Parallel some operation
|
|
712
|
+
*
|
|
713
|
+
* Tests whether at least one element passes the predicate.
|
|
714
|
+
*
|
|
715
|
+
* @param data - Array to test
|
|
716
|
+
* @param predicate - Test function
|
|
717
|
+
* @param options - Optional configuration
|
|
718
|
+
* @returns true if any element passes
|
|
719
|
+
*/
|
|
720
|
+
declare function parallelSome<T>(data: T[], predicate: (item: T) => boolean, options?: MapOptions): Promise<ParallelResult<boolean>>;
|
|
721
|
+
/**
|
|
722
|
+
* Parallel every operation
|
|
723
|
+
*
|
|
724
|
+
* Tests whether all elements pass the predicate.
|
|
725
|
+
*
|
|
726
|
+
* @param data - Array to test
|
|
727
|
+
* @param predicate - Test function
|
|
728
|
+
* @param options - Optional configuration
|
|
729
|
+
* @returns true if all elements pass
|
|
730
|
+
*/
|
|
731
|
+
declare function parallelEvery<T>(data: T[], predicate: (item: T) => boolean, options?: MapOptions): Promise<ParallelResult<boolean>>;
|
|
732
|
+
/**
|
|
733
|
+
* Parallel count operation
|
|
734
|
+
*
|
|
735
|
+
* Counts elements matching a predicate.
|
|
736
|
+
*
|
|
737
|
+
* @param data - Array to count
|
|
738
|
+
* @param predicate - Optional predicate (counts all if not provided)
|
|
739
|
+
* @param options - Optional configuration
|
|
740
|
+
* @returns Count of matching elements
|
|
741
|
+
*/
|
|
742
|
+
declare function parallelCount<T>(data: T[], predicate?: (item: T) => boolean, options?: MapOptions): Promise<ParallelResult<number>>;
|
|
743
|
+
|
|
744
|
+
/**
|
|
745
|
+
* Chunking Strategies for Parallel Operations
|
|
746
|
+
*
|
|
747
|
+
* Provides utilities for dividing large arrays into chunks
|
|
748
|
+
* for parallel processing across multiple workers.
|
|
749
|
+
*
|
|
750
|
+
* @packageDocumentation
|
|
751
|
+
*/
|
|
752
|
+
/**
|
|
753
|
+
* Result of a chunking operation
|
|
754
|
+
*/
|
|
755
|
+
interface ChunkResult<T> {
|
|
756
|
+
/** The chunked data */
|
|
757
|
+
chunks: T[];
|
|
758
|
+
/** Metadata about each chunk */
|
|
759
|
+
chunkInfo: ChunkInfo[];
|
|
760
|
+
/** Total number of elements */
|
|
761
|
+
totalElements: number;
|
|
762
|
+
/** Number of workers/chunks */
|
|
763
|
+
numChunks: number;
|
|
764
|
+
}
|
|
765
|
+
/**
|
|
766
|
+
* Information about a single chunk
|
|
767
|
+
*/
|
|
768
|
+
interface ChunkInfo {
|
|
769
|
+
/** Start index in the original array */
|
|
770
|
+
startIndex: number;
|
|
771
|
+
/** End index (exclusive) in the original array */
|
|
772
|
+
endIndex: number;
|
|
773
|
+
/** Length of this chunk */
|
|
774
|
+
length: number;
|
|
775
|
+
/** Chunk index */
|
|
776
|
+
chunkIndex: number;
|
|
777
|
+
}
|
|
778
|
+
/**
|
|
779
|
+
* Options for chunking operations
|
|
780
|
+
*/
|
|
781
|
+
interface ChunkOptions {
|
|
782
|
+
/** Minimum elements per chunk (default: 1000) */
|
|
783
|
+
minChunkSize?: number;
|
|
784
|
+
/** Maximum number of chunks (default: number of CPUs) */
|
|
785
|
+
maxChunks?: number;
|
|
786
|
+
/** Target chunk size (default: auto-calculated) */
|
|
787
|
+
targetChunkSize?: number;
|
|
788
|
+
/** Whether to balance chunk sizes evenly (default: true) */
|
|
789
|
+
balanced?: boolean;
|
|
790
|
+
}
|
|
791
|
+
/**
|
|
792
|
+
* Calculate optimal number of chunks for a given array size
|
|
793
|
+
*
|
|
794
|
+
* @param totalElements - Total number of elements
|
|
795
|
+
* @param options - Chunking options
|
|
796
|
+
* @returns Optimal number of chunks
|
|
797
|
+
*/
|
|
798
|
+
declare function calculateOptimalChunks(totalElements: number, options?: ChunkOptions): number;
|
|
799
|
+
/**
|
|
800
|
+
* Chunk a Float64Array into multiple smaller arrays
|
|
801
|
+
*
|
|
802
|
+
* @param data - The array to chunk
|
|
803
|
+
* @param options - Chunking options
|
|
804
|
+
* @returns Chunked data with metadata
|
|
805
|
+
*/
|
|
806
|
+
declare function chunkFloat64Array(data: Float64Array, options?: ChunkOptions): ChunkResult<Float64Array>;
|
|
807
|
+
/**
|
|
808
|
+
* Chunk a generic array into multiple smaller arrays
|
|
809
|
+
*
|
|
810
|
+
* @param data - The array to chunk
|
|
811
|
+
* @param options - Chunking options
|
|
812
|
+
* @returns Chunked data with metadata
|
|
813
|
+
*/
|
|
814
|
+
declare function chunkArray<T>(data: T[], options?: ChunkOptions): ChunkResult<T[]>;
|
|
815
|
+
/**
|
|
816
|
+
* Merge chunked results back into a single array
|
|
817
|
+
*
|
|
818
|
+
* @param chunks - Array of chunk results
|
|
819
|
+
* @returns Merged array
|
|
820
|
+
*/
|
|
821
|
+
declare function mergeFloat64Chunks(chunks: Float64Array[]): Float64Array;
|
|
822
|
+
/**
|
|
823
|
+
* Merge generic array chunks
|
|
824
|
+
*
|
|
825
|
+
* @param chunks - Array of chunk results
|
|
826
|
+
* @returns Merged array
|
|
827
|
+
*/
|
|
828
|
+
declare function mergeArrayChunks<T>(chunks: T[][]): T[];
|
|
829
|
+
/**
|
|
830
|
+
* Determine if an array should be parallelized based on size
|
|
831
|
+
*
|
|
832
|
+
* @param elementCount - Number of elements
|
|
833
|
+
* @param threshold - Minimum elements for parallelization (default: 10000)
|
|
834
|
+
* @returns Whether parallelization is recommended
|
|
835
|
+
*/
|
|
836
|
+
declare function shouldParallelize$1(elementCount: number, threshold?: number): boolean;
|
|
837
|
+
/**
|
|
838
|
+
* Create a range partitioner for parallel iteration
|
|
839
|
+
*
|
|
840
|
+
* @param start - Start of range (inclusive)
|
|
841
|
+
* @param end - End of range (exclusive)
|
|
842
|
+
* @param options - Chunking options
|
|
843
|
+
* @returns Array of [start, end] pairs for each chunk
|
|
844
|
+
*/
|
|
845
|
+
declare function partitionRange(start: number, end: number, options?: ChunkOptions): Array<[number, number]>;
|
|
846
|
+
/**
|
|
847
|
+
* Create a 2D partition for matrix operations
|
|
848
|
+
*
|
|
849
|
+
* @param rows - Number of rows
|
|
850
|
+
* @param cols - Number of columns
|
|
851
|
+
* @param options - Chunking options
|
|
852
|
+
* @returns Array of [rowStart, rowEnd, colStart, colEnd] tuples
|
|
853
|
+
*/
|
|
854
|
+
declare function partition2D(rows: number, cols: number, options?: ChunkOptions): Array<[number, number, number, number]>;
|
|
855
|
+
|
|
856
|
+
/**
|
|
857
|
+
* Threshold-based Dispatch Strategy
|
|
858
|
+
*
|
|
859
|
+
* Automatically decides whether to use parallel or sequential execution
|
|
860
|
+
* based on data size, operation type, and system capabilities.
|
|
861
|
+
*
|
|
862
|
+
* @packageDocumentation
|
|
863
|
+
*/
|
|
864
|
+
|
|
865
|
+
/**
|
|
866
|
+
* Operation categories for threshold selection
|
|
867
|
+
*/
|
|
868
|
+
type OperationCategory = 'matmul' | 'elementwise' | 'reduce' | 'map' | 'sort' | 'decomposition' | 'general';
|
|
869
|
+
/**
|
|
870
|
+
* Threshold configuration for different operation types
|
|
871
|
+
*/
|
|
872
|
+
interface ThresholdConfig {
|
|
873
|
+
/** Minimum elements for parallel matmul */
|
|
874
|
+
matmul: number;
|
|
875
|
+
/** Minimum elements for parallel elementwise ops */
|
|
876
|
+
elementwise: number;
|
|
877
|
+
/** Minimum elements for parallel reductions */
|
|
878
|
+
reduce: number;
|
|
879
|
+
/** Minimum elements for parallel map */
|
|
880
|
+
map: number;
|
|
881
|
+
/** Minimum elements for parallel sort */
|
|
882
|
+
sort: number;
|
|
883
|
+
/** Minimum elements for parallel decomposition */
|
|
884
|
+
decomposition: number;
|
|
885
|
+
/** Default threshold for other operations */
|
|
886
|
+
general: number;
|
|
887
|
+
}
|
|
888
|
+
/**
|
|
889
|
+
* Default thresholds optimized for typical hardware
|
|
890
|
+
*
|
|
891
|
+
* These thresholds account for worker creation overhead vs computation benefit.
|
|
892
|
+
* Smaller thresholds for compute-intensive operations (matmul, decomposition).
|
|
893
|
+
* Larger thresholds for memory-bound operations (elementwise, reduce).
|
|
894
|
+
*/
|
|
895
|
+
declare const DEFAULT_THRESHOLDS: ThresholdConfig;
|
|
896
|
+
/**
|
|
897
|
+
* Execution mode returned by threshold dispatch
|
|
898
|
+
*/
|
|
899
|
+
type ExecutionMode = 'parallel' | 'sequential';
|
|
900
|
+
/**
|
|
901
|
+
* Result of threshold dispatch decision
|
|
902
|
+
*/
|
|
903
|
+
interface DispatchResult {
|
|
904
|
+
/** Recommended execution mode */
|
|
905
|
+
mode: ExecutionMode;
|
|
906
|
+
/** Reason for the decision */
|
|
907
|
+
reason: string;
|
|
908
|
+
/** Threshold used for comparison */
|
|
909
|
+
threshold: number;
|
|
910
|
+
/** Actual element count */
|
|
911
|
+
elementCount: number;
|
|
912
|
+
}
|
|
913
|
+
/**
|
|
914
|
+
* Threshold-based Dispatch Manager
|
|
915
|
+
*
|
|
916
|
+
* Provides intelligent parallel vs sequential dispatch based on
|
|
917
|
+
* operation type, data size, and system state.
|
|
918
|
+
*/
|
|
919
|
+
declare class ThresholdDispatcher {
|
|
920
|
+
private thresholds;
|
|
921
|
+
private pool;
|
|
922
|
+
constructor(thresholds?: Partial<ThresholdConfig>, pool?: ComputePool);
|
|
923
|
+
/**
|
|
924
|
+
* Get threshold for a specific operation category
|
|
925
|
+
*/
|
|
926
|
+
getThreshold(category: OperationCategory): number;
|
|
927
|
+
/**
|
|
928
|
+
* Update thresholds
|
|
929
|
+
*/
|
|
930
|
+
setThresholds(thresholds: Partial<ThresholdConfig>): void;
|
|
931
|
+
/**
|
|
932
|
+
* Get current threshold configuration
|
|
933
|
+
*/
|
|
934
|
+
getThresholds(): ThresholdConfig;
|
|
935
|
+
/**
|
|
936
|
+
* Determine execution mode based on operation and data size
|
|
937
|
+
*/
|
|
938
|
+
dispatch(elementCount: number, category?: OperationCategory): DispatchResult;
|
|
939
|
+
/**
|
|
940
|
+
* Simple boolean check for parallel execution
|
|
941
|
+
*/
|
|
942
|
+
shouldParallelize(elementCount: number, category?: OperationCategory): boolean;
|
|
943
|
+
/**
|
|
944
|
+
* Calculate optimal chunk count based on operation and data size
|
|
945
|
+
*/
|
|
946
|
+
calculateChunks(elementCount: number, category?: OperationCategory): number;
|
|
947
|
+
}
|
|
948
|
+
/**
|
|
949
|
+
* Global threshold dispatcher instance
|
|
950
|
+
*/
|
|
951
|
+
declare const thresholdDispatcher: ThresholdDispatcher;
|
|
952
|
+
/**
|
|
953
|
+
* Convenience function to check if operation should be parallelized
|
|
954
|
+
*/
|
|
955
|
+
declare function shouldParallelize(elementCount: number, category?: OperationCategory): boolean;
|
|
956
|
+
/**
|
|
957
|
+
* Convenience function to get dispatch decision
|
|
958
|
+
*/
|
|
959
|
+
declare function dispatch(elementCount: number, category?: OperationCategory): DispatchResult;
|
|
960
|
+
/**
|
|
961
|
+
* Convenience function to calculate optimal chunk count
|
|
962
|
+
*/
|
|
963
|
+
declare function calculateChunks(elementCount: number, category?: OperationCategory): number;
|
|
964
|
+
|
|
965
|
+
/**
|
|
966
|
+
* @danielsimonjr/mathts-parallel
|
|
967
|
+
*
|
|
968
|
+
* WebWorker parallelization for MathTS computations.
|
|
969
|
+
* Provides automatic parallelization based on data size thresholds.
|
|
970
|
+
*
|
|
971
|
+
* @packageDocumentation
|
|
972
|
+
*/
|
|
973
|
+
|
|
974
|
+
interface PoolOptions {
|
|
975
|
+
minWorkers?: number | 'max';
|
|
976
|
+
maxWorkers?: number;
|
|
977
|
+
workerType?: 'auto' | 'web' | 'thread';
|
|
978
|
+
workerTerminateTimeout?: number;
|
|
979
|
+
}
|
|
980
|
+
interface ExecOptions {
|
|
981
|
+
on?: (payload: unknown) => void;
|
|
982
|
+
transfer?: unknown[];
|
|
983
|
+
timeout?: number;
|
|
984
|
+
}
|
|
985
|
+
interface PoolStats {
|
|
986
|
+
totalWorkers: number;
|
|
987
|
+
busyWorkers: number;
|
|
988
|
+
idleWorkers: number;
|
|
989
|
+
pendingTasks: number;
|
|
990
|
+
activeTasks: number;
|
|
991
|
+
}
|
|
992
|
+
|
|
993
|
+
export { type ChunkInfo, type ChunkOptions, type ChunkResult, ComputePool, type ComputePoolConfig, DEFAULT_POOL_CONFIG, DEFAULT_THRESHOLDS, type DispatchResult, type ElementwiseOptions, type ExecOptions, type ExecutionMode, type MapOptions, type MatmulOptions, type OperationCategory, type ParallelResult, type PoolOptions, type PoolStats, type ReduceOptions, type ThresholdConfig, ThresholdDispatcher, calculateChunks, calculateOptimalChunks, chunkArray, chunkFloat64Array, computePool, dispatch, mergeArrayChunks, mergeFloat64Chunks, parallelAbs, parallelAdd, parallelCos, parallelCount, parallelDistance, parallelDivide, parallelDot, parallelElementwise, parallelEvery, parallelExp, parallelFilter, parallelFind, parallelForEach, parallelHistogram, parallelLog, parallelMap, parallelMatmul, parallelMatvec, parallelMax, parallelMean, parallelMin, parallelMinMax, parallelMultiply, parallelNegate, parallelNorm, parallelOuter, parallelReduce, parallelScale, parallelSin, parallelSome, parallelSort, parallelSqrt, parallelSquare, parallelStd, parallelSubtract, parallelSum, parallelTan, parallelTranspose, parallelUnary, parallelVariance, partition2D, partitionRange, shouldParallelize$1 as shouldChunkParallelize, shouldParallelize, thresholdDispatcher };
|