@danielsimonjr/mathts-matrix 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2837 @@
1
+ import * as _danielsimonjr_mathts_parallel from '@danielsimonjr/mathts-parallel';
2
+ import { ComputePool, ComputePoolConfig } from '@danielsimonjr/mathts-parallel';
3
+ import * as typed_function from 'typed-function';
4
+
5
+ /**
6
+ * Matrix Base Class
7
+ *
8
+ * Abstract base class defining the common interface for all matrix types.
9
+ * Provides a unified API for DenseMatrix, SparseMatrix, and future implementations.
10
+ *
11
+ * @packageDocumentation
12
+ */
13
+ /**
14
+ * Matrix dimension type
15
+ */
16
+ interface MatrixDimensions {
17
+ rows: number;
18
+ cols: number;
19
+ }
20
+ /**
21
+ * Matrix index for element access
22
+ */
23
+ interface MatrixIndex {
24
+ row: number;
25
+ col: number;
26
+ }
27
+ /**
28
+ * Slice specification for submatrix extraction
29
+ */
30
+ interface SliceSpec {
31
+ rowStart?: number;
32
+ rowEnd?: number;
33
+ colStart?: number;
34
+ colEnd?: number;
35
+ }
36
+ /**
37
+ * Matrix iterator result
38
+ */
39
+ interface MatrixEntry<T = number> {
40
+ row: number;
41
+ col: number;
42
+ value: T;
43
+ }
44
+ /**
45
+ * Matrix storage format identifier
46
+ */
47
+ type MatrixType = 'DenseMatrix' | 'SparseMatrix' | 'DiagonalMatrix' | 'BandMatrix';
48
+ /**
49
+ * Abstract base class for all matrix types
50
+ *
51
+ * @typeParam T - The element type (default: number)
52
+ */
53
+ declare abstract class Matrix<T = number> {
54
+ /**
55
+ * The matrix storage format type
56
+ */
57
+ abstract readonly type: MatrixType;
58
+ /**
59
+ * Number of rows
60
+ */
61
+ abstract readonly rows: number;
62
+ /**
63
+ * Number of columns
64
+ */
65
+ abstract readonly cols: number;
66
+ /**
67
+ * Get the dimensions of the matrix
68
+ */
69
+ get size(): MatrixDimensions;
70
+ /**
71
+ * Check if the matrix is square
72
+ */
73
+ get isSquare(): boolean;
74
+ /**
75
+ * Check if the matrix is a vector (single row or column)
76
+ */
77
+ get isVector(): boolean;
78
+ /**
79
+ * Check if the matrix is a row vector
80
+ */
81
+ get isRowVector(): boolean;
82
+ /**
83
+ * Check if the matrix is a column vector
84
+ */
85
+ get isColumnVector(): boolean;
86
+ /**
87
+ * Total number of elements
88
+ */
89
+ get length(): number;
90
+ /**
91
+ * Get element at position (row, col)
92
+ * @param row - Row index (0-based)
93
+ * @param col - Column index (0-based)
94
+ */
95
+ abstract get(row: number, col: number): T;
96
+ /**
97
+ * Set element at position (row, col)
98
+ * @param row - Row index (0-based)
99
+ * @param col - Column index (0-based)
100
+ * @param value - Value to set
101
+ * @returns A new matrix with the updated value (immutable)
102
+ */
103
+ abstract set(row: number, col: number, value: T): Matrix<T>;
104
+ /**
105
+ * Get a row as a new matrix (1×n)
106
+ * @param index - Row index
107
+ */
108
+ abstract row(index: number): Matrix<T>;
109
+ /**
110
+ * Get a column as a new matrix (m×1)
111
+ * @param index - Column index
112
+ */
113
+ abstract column(index: number): Matrix<T>;
114
+ /**
115
+ * Get a submatrix (view or copy depending on implementation)
116
+ * @param spec - Slice specification
117
+ */
118
+ abstract slice(spec: SliceSpec): Matrix<T>;
119
+ /**
120
+ * Get the diagonal elements as a vector
121
+ * @param k - Diagonal offset (0 = main diagonal, positive = above, negative = below)
122
+ */
123
+ abstract diagonal(k?: number): Matrix<T>;
124
+ /**
125
+ * Matrix addition
126
+ */
127
+ abstract add(other: Matrix<T>): Matrix<T>;
128
+ /**
129
+ * Matrix subtraction
130
+ */
131
+ abstract subtract(other: Matrix<T>): Matrix<T>;
132
+ /**
133
+ * Element-wise multiplication (Hadamard product)
134
+ */
135
+ abstract multiplyElementwise(other: Matrix<T>): Matrix<T>;
136
+ /**
137
+ * Matrix multiplication
138
+ */
139
+ abstract multiply(other: Matrix<T>): Matrix<T>;
140
+ /**
141
+ * Scalar multiplication
142
+ */
143
+ abstract scale(scalar: T): Matrix<T>;
144
+ /**
145
+ * Matrix transpose
146
+ */
147
+ abstract transpose(): Matrix<T>;
148
+ /**
149
+ * Convert to nested array representation
150
+ */
151
+ abstract toArray(): T[][];
152
+ /**
153
+ * Convert to flat array (row-major order)
154
+ */
155
+ abstract toFlatArray(): T[];
156
+ /**
157
+ * Create a deep copy of the matrix
158
+ */
159
+ abstract clone(): Matrix<T>;
160
+ /**
161
+ * Iterate over all elements with their indices
162
+ */
163
+ abstract entries(): IterableIterator<MatrixEntry<T>>;
164
+ /**
165
+ * Iterate over all values (row-major order)
166
+ */
167
+ abstract values(): IterableIterator<T>;
168
+ /**
169
+ * Check if indices are within bounds
170
+ */
171
+ protected checkBounds(row: number, col: number): void;
172
+ /**
173
+ * Check if dimensions match for element-wise operations
174
+ */
175
+ protected checkDimensionsMatch(other: Matrix<T>): void;
176
+ /**
177
+ * Check if matrices can be multiplied
178
+ */
179
+ protected checkMultiplyDimensions(other: Matrix<T>): void;
180
+ /**
181
+ * Format matrix for display
182
+ */
183
+ toString(): string;
184
+ /**
185
+ * Check equality with another matrix
186
+ */
187
+ equals(other: Matrix<T>, tolerance?: number): boolean;
188
+ }
189
+ /**
190
+ * Type guard to check if a value is a Matrix
191
+ */
192
+ declare function isMatrix<T = number>(value: unknown): value is Matrix<T>;
193
+
194
+ /**
195
+ * Sparse Matrix Implementation (CSR Format)
196
+ *
197
+ * Compressed Sparse Row (CSR) format sparse matrix for efficient storage
198
+ * and operations on matrices with many zero elements.
199
+ *
200
+ * CSR stores:
201
+ * - values: Non-zero values in row-major order
202
+ * - colIndices: Column index for each non-zero value
203
+ * - rowPointers: Index into values/colIndices where each row starts
204
+ *
205
+ * @packageDocumentation
206
+ */
207
+
208
+ /**
209
+ * Sparse matrix using Compressed Sparse Row (CSR) format
210
+ *
211
+ * Efficient for:
212
+ * - Row slicing and iteration
213
+ * - Sparse matrix-vector multiplication
214
+ * - Matrices with many zeros (typically < 10% non-zero)
215
+ */
216
+ declare class SparseMatrix extends Matrix<number> {
217
+ readonly type: "SparseMatrix";
218
+ readonly rows: number;
219
+ readonly cols: number;
220
+ /**
221
+ * Non-zero values in row-major order
222
+ */
223
+ private readonly _data;
224
+ /**
225
+ * Column indices for each non-zero value
226
+ */
227
+ private readonly _colIndices;
228
+ /**
229
+ * Row pointers: rowPointers[i] = index of first non-zero in row i
230
+ * rowPointers[rows] = total number of non-zeros (nnz)
231
+ */
232
+ private readonly _rowPointers;
233
+ /**
234
+ * Create a sparse matrix from CSR components
235
+ *
236
+ * @param rows - Number of rows
237
+ * @param cols - Number of columns
238
+ * @param values - Non-zero values
239
+ * @param colIndices - Column index for each value
240
+ * @param rowPointers - Row start indices
241
+ */
242
+ constructor(rows: number, cols: number, values: Float64Array | number[], colIndices: Int32Array | number[], rowPointers: Int32Array | number[]);
243
+ /**
244
+ * Create a sparse matrix from a dense matrix
245
+ *
246
+ * @param dense - Dense matrix to convert
247
+ * @param dropTolerance - Values below this threshold are treated as zero
248
+ */
249
+ static fromDense(dense: DenseMatrix, dropTolerance?: number): SparseMatrix;
250
+ /**
251
+ * Create a sparse matrix from coordinate (COO) format
252
+ *
253
+ * @param rows - Number of rows
254
+ * @param cols - Number of columns
255
+ * @param entries - Array of {row, col, value} entries
256
+ */
257
+ static fromCOO(rows: number, cols: number, entries: Array<{
258
+ row: number;
259
+ col: number;
260
+ value: number;
261
+ }>): SparseMatrix;
262
+ /**
263
+ * Create a zero sparse matrix
264
+ */
265
+ static zeros(rows: number, cols: number): SparseMatrix;
266
+ /**
267
+ * Create a sparse identity matrix
268
+ */
269
+ static identity(n: number): SparseMatrix;
270
+ /**
271
+ * Create a sparse diagonal matrix
272
+ */
273
+ static diag(values: number[]): SparseMatrix;
274
+ /**
275
+ * Number of non-zero elements
276
+ */
277
+ get nnz(): number;
278
+ /**
279
+ * Sparsity: fraction of zero elements
280
+ */
281
+ get sparsity(): number;
282
+ /**
283
+ * Density: fraction of non-zero elements
284
+ */
285
+ get density(): number;
286
+ /**
287
+ * Get element at (row, col) - O(log(nnz_in_row)) for sorted columns
288
+ */
289
+ get(row: number, col: number): number;
290
+ /**
291
+ * Set element at (row, col) - returns new matrix (immutable)
292
+ */
293
+ set(row: number, col: number, value: number): SparseMatrix;
294
+ /**
295
+ * Get a row as a sparse 1×n matrix
296
+ */
297
+ row(index: number): SparseMatrix;
298
+ /**
299
+ * Get a column as a sparse m×1 matrix
300
+ */
301
+ column(index: number): SparseMatrix;
302
+ /**
303
+ * Get a submatrix
304
+ */
305
+ slice(spec: SliceSpec): SparseMatrix;
306
+ /**
307
+ * Get the diagonal elements
308
+ */
309
+ diagonal(k?: number): SparseMatrix;
310
+ /**
311
+ * Sparse matrix addition
312
+ */
313
+ add(other: Matrix<number>): SparseMatrix;
314
+ private addSparse;
315
+ /**
316
+ * Sparse matrix subtraction
317
+ */
318
+ subtract(other: Matrix<number>): SparseMatrix;
319
+ /**
320
+ * Element-wise multiplication (Hadamard product)
321
+ */
322
+ multiplyElementwise(other: Matrix<number>): SparseMatrix;
323
+ /**
324
+ * Sparse matrix multiplication
325
+ */
326
+ multiply(other: Matrix<number>): SparseMatrix;
327
+ private multiplySparse;
328
+ /**
329
+ * Scalar multiplication
330
+ */
331
+ scale(scalar: number): SparseMatrix;
332
+ /**
333
+ * Sparse matrix transpose
334
+ */
335
+ transpose(): SparseMatrix;
336
+ /**
337
+ * Negate all elements
338
+ */
339
+ negate(): SparseMatrix;
340
+ /**
341
+ * Sum of all elements
342
+ */
343
+ sum(): number;
344
+ /**
345
+ * Frobenius norm
346
+ */
347
+ norm(): number;
348
+ /**
349
+ * Trace (sum of diagonal elements)
350
+ */
351
+ trace(): number;
352
+ /**
353
+ * Convert to dense matrix
354
+ */
355
+ toDense(): DenseMatrix;
356
+ /**
357
+ * Convert to nested array
358
+ */
359
+ toArray(): number[][];
360
+ /**
361
+ * Convert to flat array (row-major)
362
+ */
363
+ toFlatArray(): number[];
364
+ /**
365
+ * Clone the matrix
366
+ */
367
+ clone(): SparseMatrix;
368
+ /**
369
+ * Get the CSR components
370
+ */
371
+ getCSR(): {
372
+ values: Float64Array;
373
+ colIndices: Int32Array;
374
+ rowPointers: Int32Array;
375
+ };
376
+ /**
377
+ * Iterate over non-zero entries
378
+ */
379
+ entries(): IterableIterator<MatrixEntry<number>>;
380
+ /**
381
+ * Iterate over non-zero values
382
+ */
383
+ values(): IterableIterator<number>;
384
+ /**
385
+ * Iterate over all values (including zeros, row-major)
386
+ */
387
+ allValues(): IterableIterator<number>;
388
+ /**
389
+ * Support for...of iteration (iterates over non-zero values)
390
+ */
391
+ [Symbol.iterator](): IterableIterator<number>;
392
+ /**
393
+ * Apply a function to each non-zero element
394
+ */
395
+ mapNonZeros(fn: (value: number, row: number, col: number) => number): SparseMatrix;
396
+ /**
397
+ * Apply a function to each element (including zeros)
398
+ * Warning: This may create a dense result
399
+ */
400
+ map(fn: (value: number, row: number, col: number) => number): SparseMatrix;
401
+ }
402
+ /**
403
+ * Type guard for SparseMatrix
404
+ */
405
+ declare function isSparseMatrix(value: unknown): value is SparseMatrix;
406
+
407
+ /**
408
+ * Dense Matrix Implementation
409
+ *
410
+ * Row-major dense matrix backed by Float64Array for efficient storage
411
+ * and operations. Supports views (slices without copying) where possible.
412
+ *
413
+ * @packageDocumentation
414
+ */
415
+
416
+ /**
417
+ * Dense matrix implementation using Float64Array
418
+ *
419
+ * Data is stored in row-major order for cache-friendly row access.
420
+ * All operations return new matrices (immutable API).
421
+ */
422
+ declare class DenseMatrix extends Matrix<number> {
423
+ readonly type: "DenseMatrix";
424
+ readonly rows: number;
425
+ readonly cols: number;
426
+ /**
427
+ * Internal data storage (row-major Float64Array)
428
+ */
429
+ private readonly data;
430
+ /**
431
+ * Whether this matrix is a view into another matrix's data
432
+ */
433
+ private readonly isView;
434
+ /**
435
+ * Offset into data array (for views)
436
+ */
437
+ private readonly offset;
438
+ /**
439
+ * Stride between rows (for views)
440
+ */
441
+ private readonly rowStride;
442
+ /**
443
+ * Create a new DenseMatrix
444
+ *
445
+ * @param rows - Number of rows
446
+ * @param cols - Number of columns
447
+ * @param data - Optional initial data (row-major order)
448
+ */
449
+ constructor(rows: number, cols: number, data?: Float64Array | number[] | number[][], viewConfig?: {
450
+ isView: boolean;
451
+ offset: number;
452
+ rowStride: number;
453
+ });
454
+ /**
455
+ * Create a matrix from a nested array
456
+ */
457
+ static fromArray(arr: number[][]): DenseMatrix;
458
+ /**
459
+ * Create a matrix from a flat array with specified dimensions
460
+ */
461
+ static fromFlat(rows: number, cols: number, data: number[]): DenseMatrix;
462
+ /**
463
+ * Create a zero matrix
464
+ */
465
+ static zeros(rows: number, cols: number): DenseMatrix;
466
+ /**
467
+ * Create a matrix filled with ones
468
+ */
469
+ static ones(rows: number, cols: number): DenseMatrix;
470
+ /**
471
+ * Create an identity matrix
472
+ */
473
+ static identity(n: number): DenseMatrix;
474
+ /**
475
+ * Create a diagonal matrix from values
476
+ */
477
+ static diag(values: number[]): DenseMatrix;
478
+ /**
479
+ * Create a matrix filled with a constant value
480
+ */
481
+ static fill(rows: number, cols: number, value: number): DenseMatrix;
482
+ /**
483
+ * Create a matrix with random values in [0, 1)
484
+ */
485
+ static random(rows: number, cols: number): DenseMatrix;
486
+ /**
487
+ * Get element at (row, col)
488
+ * O(1) access time
489
+ */
490
+ get(row: number, col: number): number;
491
+ /**
492
+ * Set element at (row, col) - returns new matrix
493
+ */
494
+ set(row: number, col: number, value: number): DenseMatrix;
495
+ /**
496
+ * Get the raw Float64Array data (copy if view, reference otherwise)
497
+ */
498
+ private toFlatFloat64Array;
499
+ /**
500
+ * Get a row as a 1×n matrix (view, no copy)
501
+ */
502
+ row(index: number): DenseMatrix;
503
+ /**
504
+ * Get a column as an m×1 matrix (copy, since data is row-major)
505
+ */
506
+ column(index: number): DenseMatrix;
507
+ /**
508
+ * Get a submatrix
509
+ */
510
+ slice(spec: SliceSpec): DenseMatrix;
511
+ /**
512
+ * Get the diagonal elements
513
+ */
514
+ diagonal(k?: number): DenseMatrix;
515
+ /**
516
+ * Matrix addition
517
+ */
518
+ add(other: Matrix<number>): DenseMatrix;
519
+ /**
520
+ * Matrix subtraction
521
+ */
522
+ subtract(other: Matrix<number>): DenseMatrix;
523
+ /**
524
+ * Element-wise multiplication (Hadamard product)
525
+ */
526
+ multiplyElementwise(other: Matrix<number>): DenseMatrix;
527
+ /**
528
+ * Matrix multiplication
529
+ */
530
+ multiply(other: Matrix<number>): DenseMatrix;
531
+ /**
532
+ * Scalar multiplication
533
+ */
534
+ scale(scalar: number): DenseMatrix;
535
+ /**
536
+ * Matrix transpose
537
+ */
538
+ transpose(): DenseMatrix;
539
+ /**
540
+ * Negate all elements
541
+ */
542
+ negate(): DenseMatrix;
543
+ /**
544
+ * Sum of all elements
545
+ */
546
+ sum(): number;
547
+ /**
548
+ * Mean of all elements
549
+ */
550
+ mean(): number;
551
+ /**
552
+ * Minimum element
553
+ */
554
+ min(): number;
555
+ /**
556
+ * Maximum element
557
+ */
558
+ max(): number;
559
+ /**
560
+ * Frobenius norm (sqrt of sum of squared elements)
561
+ */
562
+ norm(): number;
563
+ /**
564
+ * Trace (sum of diagonal elements)
565
+ */
566
+ trace(): number;
567
+ /**
568
+ * Convert to nested array
569
+ */
570
+ toArray(): number[][];
571
+ /**
572
+ * Convert to flat array (row-major)
573
+ */
574
+ toFlatArray(): number[];
575
+ /**
576
+ * Get the underlying Float64Array (copy)
577
+ */
578
+ toFloat64Array(): Float64Array;
579
+ /**
580
+ * Clone the matrix
581
+ */
582
+ clone(): DenseMatrix;
583
+ /**
584
+ * Convert to sparse matrix (CSR format)
585
+ *
586
+ * This is a synchronous conversion that creates a SparseMatrix
587
+ * containing only the non-zero elements of this matrix.
588
+ *
589
+ * @param dropTolerance - Values below this threshold are treated as zero
590
+ * @returns SparseMatrix representation
591
+ */
592
+ toSparse(dropTolerance?: number): SparseMatrix;
593
+ /**
594
+ * Iterate over entries with indices
595
+ */
596
+ entries(): IterableIterator<MatrixEntry<number>>;
597
+ /**
598
+ * Iterate over values (row-major)
599
+ */
600
+ values(): IterableIterator<number>;
601
+ /**
602
+ * Support for...of iteration (iterates over values)
603
+ */
604
+ [Symbol.iterator](): IterableIterator<number>;
605
+ /**
606
+ * Apply a function to each element
607
+ */
608
+ map(fn: (value: number, row: number, col: number) => number): DenseMatrix;
609
+ /**
610
+ * Apply a function to each element (no return, for side effects)
611
+ */
612
+ forEach(fn: (value: number, row: number, col: number) => void): void;
613
+ }
614
+ /**
615
+ * Type guard for DenseMatrix
616
+ */
617
+ declare function isDenseMatrix(value: unknown): value is DenseMatrix;
618
+
619
+ /**
620
+ * Matrix Backend Interface
621
+ *
622
+ * Defines the contract for matrix operation backends.
623
+ * Implementations include JSBackend (pure TypeScript),
624
+ * WASMBackend (AssemblyScript), and GPUBackend (WebGPU).
625
+ *
626
+ * @packageDocumentation
627
+ */
628
+
629
+ /**
630
+ * Backend type identifier
631
+ */
632
+ type BackendType = 'js' | 'wasm' | 'gpu' | 'parallel';
633
+ /**
634
+ * Backend selection hints
635
+ */
636
+ interface BackendHints {
637
+ /** Minimum size to use WASM backend */
638
+ wasmThreshold?: number;
639
+ /** Minimum size to use GPU backend */
640
+ gpuThreshold?: number;
641
+ /** Force a specific backend */
642
+ preferredBackend?: BackendType;
643
+ }
644
+ /**
645
+ * Default backend hints
646
+ */
647
+ declare const DEFAULT_BACKEND_HINTS: Required<BackendHints>;
648
+ /**
649
+ * Abstract backend interface for matrix operations
650
+ *
651
+ * Each backend implements the same operations but may use different
652
+ * underlying implementations (pure JS, WASM SIMD, WebGPU compute shaders).
653
+ */
654
+ interface MatrixBackend {
655
+ /**
656
+ * Backend identifier
657
+ */
658
+ readonly type: BackendType;
659
+ /**
660
+ * Whether the backend is available in the current environment
661
+ */
662
+ isAvailable(): boolean;
663
+ /**
664
+ * Initialize the backend (may be async for WASM/GPU)
665
+ */
666
+ initialize(): Promise<void>;
667
+ /**
668
+ * Matrix addition: C = A + B
669
+ */
670
+ add(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
671
+ /**
672
+ * Matrix subtraction: C = A - B
673
+ */
674
+ subtract(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
675
+ /**
676
+ * Element-wise multiplication: C = A .* B
677
+ */
678
+ multiplyElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
679
+ /**
680
+ * Element-wise division: C = A ./ B
681
+ */
682
+ divideElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
683
+ /**
684
+ * Scalar multiplication: C = A * s
685
+ */
686
+ scale(a: DenseMatrix, scalar: number): DenseMatrix;
687
+ /**
688
+ * Element-wise absolute value
689
+ */
690
+ abs(a: DenseMatrix): DenseMatrix;
691
+ /**
692
+ * Element-wise negation
693
+ */
694
+ negate(a: DenseMatrix): DenseMatrix;
695
+ /**
696
+ * Matrix multiplication: C = A * B
697
+ */
698
+ multiply(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
699
+ /**
700
+ * Matrix transpose
701
+ */
702
+ transpose(a: DenseMatrix): DenseMatrix;
703
+ /**
704
+ * Sum of all elements (may be async for parallel backends)
705
+ */
706
+ sum(a: DenseMatrix): number | Promise<number>;
707
+ /**
708
+ * Sum along an axis (0 = columns, 1 = rows)
709
+ */
710
+ sumAxis(a: DenseMatrix, axis: 0 | 1): DenseMatrix;
711
+ /**
712
+ * Frobenius norm
713
+ */
714
+ norm(a: DenseMatrix): number;
715
+ /**
716
+ * Dot product of two vectors (may be async for parallel backends)
717
+ */
718
+ dot(a: DenseMatrix, b: DenseMatrix): number | Promise<number>;
719
+ }
720
+ /**
721
+ * Registry of available backends
722
+ */
723
+ declare class BackendRegistry {
724
+ private backends;
725
+ private initialized;
726
+ private hints;
727
+ /**
728
+ * Register a backend
729
+ */
730
+ register(backend: MatrixBackend): void;
731
+ /**
732
+ * Get a backend by type
733
+ */
734
+ get(type: BackendType): MatrixBackend | undefined;
735
+ /**
736
+ * Check if a backend is registered and available
737
+ */
738
+ has(type: BackendType): boolean;
739
+ /**
740
+ * Initialize a backend
741
+ */
742
+ initialize(type: BackendType): Promise<void>;
743
+ /**
744
+ * Get all available backends
745
+ */
746
+ available(): BackendType[];
747
+ /**
748
+ * Update selection hints
749
+ */
750
+ setHints(hints: BackendHints): void;
751
+ /**
752
+ * Get current hints
753
+ */
754
+ getHints(): Required<BackendHints>;
755
+ /**
756
+ * Select the best backend for a given operation size
757
+ */
758
+ selectBackend(elementCount: number): MatrixBackend;
759
+ }
760
+ /**
761
+ * Global backend registry
762
+ */
763
+ declare const backendRegistry: BackendRegistry;
764
+
765
+ /**
766
+ * Pure TypeScript Matrix Backend
767
+ *
768
+ * Reference implementation of matrix operations using plain JavaScript.
769
+ * Serves as baseline for correctness and fallback when WASM/GPU unavailable.
770
+ *
771
+ * @packageDocumentation
772
+ */
773
+
774
+ /**
775
+ * Pure JavaScript matrix backend
776
+ *
777
+ * All operations are implemented in TypeScript without external dependencies.
778
+ * Performance is optimized where possible but prioritizes correctness.
779
+ */
780
+ declare class JSBackend implements MatrixBackend {
781
+ readonly type: BackendType;
782
+ /**
783
+ * JS backend is always available
784
+ */
785
+ isAvailable(): boolean;
786
+ /**
787
+ * No initialization needed for JS backend
788
+ */
789
+ initialize(): Promise<void>;
790
+ /**
791
+ * Matrix addition
792
+ */
793
+ add(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
794
+ /**
795
+ * Matrix subtraction
796
+ */
797
+ subtract(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
798
+ /**
799
+ * Element-wise multiplication
800
+ */
801
+ multiplyElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
802
+ /**
803
+ * Element-wise division
804
+ */
805
+ divideElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
806
+ /**
807
+ * Scalar multiplication
808
+ */
809
+ scale(a: DenseMatrix, scalar: number): DenseMatrix;
810
+ /**
811
+ * Element-wise absolute value
812
+ */
813
+ abs(a: DenseMatrix): DenseMatrix;
814
+ /**
815
+ * Element-wise negation
816
+ */
817
+ negate(a: DenseMatrix): DenseMatrix;
818
+ /**
819
+ * Matrix multiplication using the naive O(n³) algorithm
820
+ *
821
+ * For larger matrices, consider using blocked/tiled multiplication
822
+ * or Strassen's algorithm (implemented in WASM backend).
823
+ */
824
+ multiply(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
825
+ /**
826
+ * Matrix transpose
827
+ */
828
+ transpose(a: DenseMatrix): DenseMatrix;
829
+ /**
830
+ * Sum of all elements
831
+ */
832
+ sum(a: DenseMatrix): number;
833
+ /**
834
+ * Sum along an axis
835
+ * axis=0: sum columns (result is 1×n row vector)
836
+ * axis=1: sum rows (result is m×1 column vector)
837
+ */
838
+ sumAxis(a: DenseMatrix, axis: 0 | 1): DenseMatrix;
839
+ /**
840
+ * Frobenius norm: sqrt(sum of squared elements)
841
+ */
842
+ norm(a: DenseMatrix): number;
843
+ /**
844
+ * Dot product of two vectors
845
+ *
846
+ * Works with row vectors (1×n) or column vectors (m×1).
847
+ */
848
+ dot(a: DenseMatrix, b: DenseMatrix): number;
849
+ private checkDimensionsMatch;
850
+ private checkMultiplyDimensions;
851
+ }
852
+ /**
853
+ * Default JS backend instance
854
+ */
855
+ declare const jsBackend: JSBackend;
856
+
857
+ /**
858
+ * Configuration for ParallelBackend
859
+ */
860
+ interface ParallelBackendConfig {
861
+ /** Custom ComputePool instance (optional) */
862
+ pool?: ComputePool;
863
+ /** Pool configuration (if creating new pool) */
864
+ poolConfig?: Partial<ComputePoolConfig>;
865
+ /** Threshold for parallel execution (elements) */
866
+ parallelThreshold?: number;
867
+ }
868
+ /**
869
+ * Parallel matrix backend using worker pool
870
+ *
871
+ * Provides parallel implementations of matrix operations for large matrices.
872
+ * Uses automatic chunking and result aggregation for efficient parallel execution.
873
+ *
874
+ * @example
875
+ * ```typescript
876
+ * const backend = new ParallelBackend();
877
+ * await backend.initialize();
878
+ *
879
+ * const result = await backend.multiply(largeMatrixA, largeMatrixB);
880
+ * ```
881
+ */
882
+ declare class ParallelBackend {
883
+ readonly type: BackendType;
884
+ private pool;
885
+ private initialized;
886
+ private parallelThreshold;
887
+ constructor(config?: ParallelBackendConfig);
888
+ /**
889
+ * Check if parallel backend is available
890
+ */
891
+ isAvailable(): boolean;
892
+ /**
893
+ * Initialize the worker pool
894
+ */
895
+ initialize(): Promise<void>;
896
+ /**
897
+ * Check if initialized
898
+ */
899
+ isReady(): boolean;
900
+ /**
901
+ * Determine if operation should be parallelized
902
+ */
903
+ private shouldParallelize;
904
+ /**
905
+ * Parallel matrix addition
906
+ */
907
+ add(a: DenseMatrix, b: DenseMatrix): Promise<DenseMatrix>;
908
+ /**
909
+ * Parallel matrix subtraction
910
+ */
911
+ subtract(a: DenseMatrix, b: DenseMatrix): Promise<DenseMatrix>;
912
+ /**
913
+ * Parallel element-wise multiplication
914
+ */
915
+ multiplyElementwise(a: DenseMatrix, b: DenseMatrix): Promise<DenseMatrix>;
916
+ /**
917
+ * Parallel element-wise division
918
+ */
919
+ divideElementwise(a: DenseMatrix, b: DenseMatrix): Promise<DenseMatrix>;
920
+ /**
921
+ * Parallel scalar multiplication
922
+ */
923
+ scale(a: DenseMatrix, scalar: number): Promise<DenseMatrix>;
924
+ /**
925
+ * Element-wise absolute value (sequential, as it's simple)
926
+ */
927
+ abs(a: DenseMatrix): DenseMatrix;
928
+ /**
929
+ * Element-wise negation
930
+ */
931
+ negate(a: DenseMatrix): DenseMatrix;
932
+ /**
933
+ * Parallel matrix multiplication
934
+ */
935
+ multiply(a: DenseMatrix, b: DenseMatrix): Promise<DenseMatrix>;
936
+ /**
937
+ * Parallel matrix transpose
938
+ */
939
+ transpose(a: DenseMatrix): Promise<DenseMatrix>;
940
+ /**
941
+ * Parallel sum of all elements
942
+ */
943
+ sum(a: DenseMatrix): Promise<number>;
944
+ /**
945
+ * Sum along an axis (sequential)
946
+ */
947
+ sumAxis(a: DenseMatrix, axis: 0 | 1): DenseMatrix;
948
+ /**
949
+ * Frobenius norm (sequential)
950
+ */
951
+ norm(a: DenseMatrix): number;
952
+ /**
953
+ * Parallel dot product
954
+ */
955
+ dot(a: DenseMatrix, b: DenseMatrix): Promise<number>;
956
+ /**
957
+ * Terminate the worker pool
958
+ */
959
+ terminate(): Promise<void>;
960
+ /**
961
+ * Get pool statistics
962
+ */
963
+ getStats(): _danielsimonjr_mathts_parallel.PoolStats;
964
+ private checkDimensionsMatch;
965
+ private checkMultiplyDimensions;
966
+ }
967
+ /**
968
+ * Default parallel backend instance using global compute pool
969
+ */
970
+ declare const parallelBackend: ParallelBackend;
971
+ /**
972
+ * Create a new parallel backend with custom configuration
973
+ */
974
+ declare function createParallelBackend(config?: ParallelBackendConfig): ParallelBackend;
975
+
976
+ /**
977
+ * WASM Feature Detection
978
+ *
979
+ * Detects available WebAssembly features in the current environment.
980
+ * Used to enable/disable SIMD, threads, and other optimizations.
981
+ *
982
+ * @packageDocumentation
983
+ */
984
+ /**
985
+ * Available WASM features
986
+ */
987
+ interface WasmFeatures {
988
+ /** Basic WebAssembly support */
989
+ webAssembly: boolean;
990
+ /** WASM SIMD (v128) support */
991
+ simd: boolean;
992
+ /** SharedArrayBuffer available */
993
+ sharedMemory: boolean;
994
+ /** Atomics API available */
995
+ atomics: boolean;
996
+ /** WASM threads (requires SharedArrayBuffer) */
997
+ threads: boolean;
998
+ /** Bulk memory operations */
999
+ bulkMemory: boolean;
1000
+ /** Reference types */
1001
+ referenceTypes: boolean;
1002
+ /** WASM exception handling */
1003
+ exceptions: boolean;
1004
+ /** Tail call optimization */
1005
+ tailCall: boolean;
1006
+ }
1007
+ /**
1008
+ * Detect all WASM features
1009
+ *
1010
+ * @returns Promise resolving to detected features
1011
+ */
1012
+ declare function detectWasmFeatures(): Promise<WasmFeatures>;
1013
+ /**
1014
+ * Synchronously check if WASM is available (basic check only)
1015
+ */
1016
+ declare function isWasmAvailable(): boolean;
1017
+ /**
1018
+ * Synchronously check if SharedArrayBuffer is available
1019
+ */
1020
+ declare function isSharedMemoryAvailable(): boolean;
1021
+ /**
1022
+ * Synchronously check if Atomics are available
1023
+ */
1024
+ declare function isAtomicsAvailable(): boolean;
1025
+ /**
1026
+ * Clear the feature detection cache (useful for testing)
1027
+ */
1028
+ declare function clearFeatureCache(): void;
1029
+ /**
1030
+ * Get cached features if available
1031
+ */
1032
+ declare function getCachedFeatures(): WasmFeatures | null;
1033
+
1034
+ /**
1035
+ * WASM Matrix Backend
1036
+ *
1037
+ * Implements MatrixBackend using WebAssembly for accelerated operations.
1038
+ * Falls back to JSBackend when WASM is unavailable or for small matrices.
1039
+ *
1040
+ * @packageDocumentation
1041
+ */
1042
+
1043
+ /**
1044
+ * WASM Backend configuration
1045
+ */
1046
+ interface WASMBackendConfig {
1047
+ /** Minimum elements to use WASM (default: 100) */
1048
+ minElements?: number;
1049
+ /** Path to WASM file */
1050
+ wasmPath?: string;
1051
+ /** Enable SIMD optimizations when available */
1052
+ useSIMD?: boolean;
1053
+ }
1054
+ /**
1055
+ * WASM Backend for matrix operations
1056
+ *
1057
+ * Uses WebAssembly for accelerated matrix operations with SIMD support.
1058
+ * Automatically falls back to JavaScript for small matrices or when WASM unavailable.
1059
+ */
1060
+ declare class WASMBackend implements MatrixBackend {
1061
+ readonly type: BackendType;
1062
+ private config;
1063
+ private wasmModule;
1064
+ private features;
1065
+ private initPromise;
1066
+ constructor(config?: WASMBackendConfig);
1067
+ /**
1068
+ * Check if WASM is available in the current environment
1069
+ */
1070
+ isAvailable(): boolean;
1071
+ /**
1072
+ * Initialize the WASM backend
1073
+ */
1074
+ initialize(): Promise<void>;
1075
+ private doInitialize;
1076
+ /**
1077
+ * Check if operation should use WASM
1078
+ */
1079
+ private shouldUseWasm;
1080
+ /**
1081
+ * Get detected WASM features
1082
+ */
1083
+ getFeatures(): WasmFeatures | null;
1084
+ add(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
1085
+ subtract(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
1086
+ multiplyElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
1087
+ divideElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
1088
+ scale(a: DenseMatrix, scalar: number): DenseMatrix;
1089
+ abs(a: DenseMatrix): DenseMatrix;
1090
+ negate(a: DenseMatrix): DenseMatrix;
1091
+ multiply(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
1092
+ transpose(a: DenseMatrix): DenseMatrix;
1093
+ sum(a: DenseMatrix): number;
1094
+ sumAxis(a: DenseMatrix, axis: 0 | 1): DenseMatrix;
1095
+ norm(a: DenseMatrix): number;
1096
+ dot(a: DenseMatrix, b: DenseMatrix): number;
1097
+ /**
1098
+ * LU Decomposition using WASM
1099
+ */
1100
+ luDecomposition(a: DenseMatrix): Promise<{
1101
+ lu: DenseMatrix;
1102
+ perm: Int32Array;
1103
+ singular: boolean;
1104
+ }>;
1105
+ private luDecompositionJS;
1106
+ /**
1107
+ * QR Decomposition using WASM
1108
+ */
1109
+ qrDecomposition(a: DenseMatrix): Promise<{
1110
+ q: DenseMatrix;
1111
+ r: DenseMatrix;
1112
+ }>;
1113
+ private qrDecompositionJS;
1114
+ /**
1115
+ * Matrix inversion using LU decomposition
1116
+ */
1117
+ inverse(a: DenseMatrix): Promise<{
1118
+ inverse: DenseMatrix;
1119
+ singular: boolean;
1120
+ }>;
1121
+ private inverseJS;
1122
+ /**
1123
+ * Compute matrix determinant using LU decomposition
1124
+ */
1125
+ determinantWasm(a: DenseMatrix): Promise<number>;
1126
+ private determinantJS;
1127
+ /**
1128
+ * Cholesky Decomposition using WASM
1129
+ * For symmetric positive-definite matrices: A = L * L^T
1130
+ */
1131
+ choleskyDecomposition(a: DenseMatrix): Promise<{
1132
+ l: DenseMatrix;
1133
+ positiveDefinite: boolean;
1134
+ }>;
1135
+ private choleskyDecompositionJS;
1136
+ /**
1137
+ * Update configuration
1138
+ */
1139
+ updateConfig(config: Partial<WASMBackendConfig>): void;
1140
+ /**
1141
+ * Get current configuration
1142
+ */
1143
+ getConfig(): Required<WASMBackendConfig>;
1144
+ }
1145
+ /**
1146
+ * Global WASM backend instance
1147
+ */
1148
+ declare const wasmBackend: WASMBackend;
1149
+ /**
1150
+ * Create a WASM backend with custom configuration
1151
+ */
1152
+ declare function createWASMBackend(config?: WASMBackendConfig): WASMBackend;
1153
+
1154
+ /**
1155
+ * WebGPU Detection and Capability Checking
1156
+ *
1157
+ * Provides runtime detection of WebGPU support and adapter capabilities.
1158
+ */
1159
+ /**
1160
+ * WebGPU adapter information
1161
+ */
1162
+ interface GPUAdapterInfo {
1163
+ /** Adapter vendor */
1164
+ vendor: string;
1165
+ /** Adapter architecture */
1166
+ architecture: string;
1167
+ /** Device description */
1168
+ device: string;
1169
+ /** Driver description */
1170
+ description: string;
1171
+ }
1172
+ /**
1173
+ * WebGPU capability information
1174
+ */
1175
+ interface GPUCapabilities {
1176
+ /** Whether WebGPU is supported */
1177
+ supported: boolean;
1178
+ /** Adapter information if available */
1179
+ adapterInfo: GPUAdapterInfo | null;
1180
+ /** Maximum buffer size in bytes */
1181
+ maxBufferSize: number;
1182
+ /** Maximum compute workgroup size */
1183
+ maxWorkgroupSize: [number, number, number];
1184
+ /** Maximum storage buffer binding size */
1185
+ maxStorageBufferBindingSize: number;
1186
+ /** Maximum compute invocations per workgroup */
1187
+ maxComputeInvocationsPerWorkgroup: number;
1188
+ /** Maximum workgroups per dimension */
1189
+ maxComputeWorkgroupsPerDimension: number;
1190
+ /** Whether the adapter is a fallback/software adapter */
1191
+ isFallbackAdapter: boolean;
1192
+ /** Supported features */
1193
+ features: string[];
1194
+ }
1195
+ /**
1196
+ * Check if WebGPU is available in the current environment
1197
+ */
1198
+ declare function hasWebGPU(): boolean;
1199
+ /**
1200
+ * Detect WebGPU capabilities
1201
+ * @param preferHighPerformance - Whether to prefer high-performance GPU
1202
+ */
1203
+ declare function detectGPUCapabilities(preferHighPerformance?: boolean): Promise<GPUCapabilities>;
1204
+ /**
1205
+ * Recommended workgroup size based on GPU capabilities
1206
+ */
1207
+ declare function getRecommendedWorkgroupSize(capabilities: GPUCapabilities): [number, number, number];
1208
+
1209
+ /**
1210
+ * WebGPU Context Management
1211
+ *
1212
+ * Manages WebGPU device, queue, and command encoding.
1213
+ */
1214
+
1215
+ /**
1216
+ * Options for GPUContext initialization
1217
+ */
1218
+ interface GPUContextOptions {
1219
+ /** Prefer high-performance GPU */
1220
+ preferHighPerformance?: boolean;
1221
+ /** Required features for the device */
1222
+ requiredFeatures?: GPUFeatureName[];
1223
+ /** Required limits for the device */
1224
+ requiredLimits?: Record<string, number>;
1225
+ /** Label for debugging */
1226
+ label?: string;
1227
+ }
1228
+ /**
1229
+ * Status of the GPU context
1230
+ */
1231
+ type GPUContextStatus = 'uninitialized' | 'initializing' | 'ready' | 'error' | 'lost';
1232
+ /**
1233
+ * Event emitted when device is lost
1234
+ */
1235
+ interface DeviceLostEvent {
1236
+ reason: GPUDeviceLostReason;
1237
+ message: string;
1238
+ }
1239
+ /**
1240
+ * GPU Context manages the lifecycle of WebGPU resources
1241
+ */
1242
+ declare class GPUContext {
1243
+ private adapter;
1244
+ private device;
1245
+ private _status;
1246
+ private _capabilities;
1247
+ private _lastError;
1248
+ private deviceLostCallbacks;
1249
+ private label;
1250
+ constructor(options?: GPUContextOptions);
1251
+ /**
1252
+ * Get the current status
1253
+ */
1254
+ get status(): GPUContextStatus;
1255
+ /**
1256
+ * Check if context is ready
1257
+ */
1258
+ get isReady(): boolean;
1259
+ /**
1260
+ * Get the GPU device (throws if not initialized)
1261
+ */
1262
+ getDevice(): GPUDevice;
1263
+ /**
1264
+ * Get the GPU queue
1265
+ */
1266
+ getQueue(): GPUQueue;
1267
+ /**
1268
+ * Get capabilities
1269
+ */
1270
+ get capabilities(): GPUCapabilities | null;
1271
+ /**
1272
+ * Get last error
1273
+ */
1274
+ get lastError(): Error | null;
1275
+ /**
1276
+ * Initialize the GPU context
1277
+ */
1278
+ initialize(options?: GPUContextOptions): Promise<boolean>;
1279
+ /**
1280
+ * Register callback for device lost event
1281
+ */
1282
+ onDeviceLost(callback: (event: DeviceLostEvent) => void): void;
1283
+ /**
1284
+ * Create a command encoder
1285
+ */
1286
+ createCommandEncoder(label?: string): GPUCommandEncoder;
1287
+ /**
1288
+ * Create a buffer
1289
+ */
1290
+ createBuffer(size: number, usage: GPUBufferUsageFlags, label?: string, mappedAtCreation?: boolean): GPUBuffer;
1291
+ /**
1292
+ * Create a storage buffer for compute operations
1293
+ */
1294
+ createStorageBuffer(size: number, label?: string, readable?: boolean, writable?: boolean): GPUBuffer;
1295
+ /**
1296
+ * Create a staging buffer for reading back data
1297
+ */
1298
+ createStagingBuffer(size: number, label?: string): GPUBuffer;
1299
+ /**
1300
+ * Create a compute pipeline
1301
+ */
1302
+ createComputePipeline(shaderModule: GPUShaderModule, entryPoint: string, layout?: GPUPipelineLayout | 'auto', label?: string): GPUComputePipeline;
1303
+ /**
1304
+ * Create a shader module from WGSL source
1305
+ */
1306
+ createShaderModule(code: string, label?: string): GPUShaderModule;
1307
+ /**
1308
+ * Create a bind group
1309
+ */
1310
+ createBindGroup(layout: GPUBindGroupLayout, entries: GPUBindGroupEntry[], label?: string): GPUBindGroup;
1311
+ /**
1312
+ * Submit commands to the GPU queue
1313
+ */
1314
+ submitCommands(commandBuffers: GPUCommandBuffer[]): void;
1315
+ /**
1316
+ * Write data to a buffer
1317
+ */
1318
+ writeBuffer(buffer: GPUBuffer, data: ArrayBufferView | ArrayBuffer | SharedArrayBuffer, bufferOffset?: number, dataOffset?: number, size?: number): void;
1319
+ /**
1320
+ * Read data from a buffer (async)
1321
+ */
1322
+ readBuffer(buffer: GPUBuffer, offset?: number, size?: number): Promise<ArrayBuffer>;
1323
+ /**
1324
+ * Dispatch a compute shader
1325
+ */
1326
+ dispatchCompute(pipeline: GPUComputePipeline, bindGroups: GPUBindGroup[], workgroupCounts: [number, number, number]): void;
1327
+ /**
1328
+ * Wait for all GPU operations to complete
1329
+ */
1330
+ waitForCompletion(): Promise<void>;
1331
+ /**
1332
+ * Destroy the context and release resources
1333
+ */
1334
+ destroy(): void;
1335
+ }
1336
+ /**
1337
+ * Get the global GPU context
1338
+ */
1339
+ declare function getGlobalGPUContext(): GPUContext;
1340
+ /**
1341
+ * Destroy the global GPU context
1342
+ */
1343
+ declare function destroyGlobalGPU(): void;
1344
+
1345
+ /**
1346
+ * GPU Buffer Pool
1347
+ *
1348
+ * Manages GPU buffer allocation, deallocation, and reuse.
1349
+ * Reduces allocation overhead by recycling buffers.
1350
+ */
1351
+
1352
+ /**
1353
+ * Options for buffer pool
1354
+ */
1355
+ interface BufferPoolOptions {
1356
+ /** Maximum total memory to cache (bytes) */
1357
+ maxCacheSize?: number;
1358
+ /** Time after which unused buffers are evicted (ms) */
1359
+ evictionTimeout?: number;
1360
+ /** Whether to enable automatic eviction */
1361
+ autoEvict?: boolean;
1362
+ /** Interval for automatic eviction (ms) */
1363
+ evictionInterval?: number;
1364
+ }
1365
+ /**
1366
+ * GPU Buffer Pool for efficient buffer management
1367
+ */
1368
+ declare class BufferPool {
1369
+ private context;
1370
+ private buffers;
1371
+ private maxCacheSize;
1372
+ private evictionTimeout;
1373
+ private evictionTimer;
1374
+ private currentCacheSize;
1375
+ constructor(context: GPUContext, options?: BufferPoolOptions);
1376
+ /**
1377
+ * Generate a key for buffer categorization
1378
+ */
1379
+ private getBufferKey;
1380
+ /**
1381
+ * Round up to nearest power of 2
1382
+ */
1383
+ private roundUpToPowerOf2;
1384
+ /**
1385
+ * Acquire a buffer from the pool or create a new one
1386
+ */
1387
+ acquire(size: number, usage: GPUBufferUsageFlags, label?: string): GPUBuffer;
1388
+ /**
1389
+ * Release a buffer back to the pool
1390
+ */
1391
+ release(buffer: GPUBuffer): void;
1392
+ /**
1393
+ * Create a storage buffer from the pool
1394
+ */
1395
+ acquireStorageBuffer(size: number, label?: string, readable?: boolean, writable?: boolean): GPUBuffer;
1396
+ /**
1397
+ * Create a staging buffer from the pool
1398
+ */
1399
+ acquireStagingBuffer(size: number, label?: string): GPUBuffer;
1400
+ /**
1401
+ * Create a uniform buffer from the pool
1402
+ */
1403
+ acquireUniformBuffer(size: number, label?: string): GPUBuffer;
1404
+ /**
1405
+ * Evict old unused buffers
1406
+ */
1407
+ evictOldBuffers(): void;
1408
+ /**
1409
+ * Force eviction to reduce cache to target size
1410
+ */
1411
+ evictToSize(targetSize: number): void;
1412
+ /**
1413
+ * Start automatic eviction timer
1414
+ */
1415
+ startAutoEviction(interval: number): void;
1416
+ /**
1417
+ * Stop automatic eviction timer
1418
+ */
1419
+ stopAutoEviction(): void;
1420
+ /**
1421
+ * Get pool statistics
1422
+ */
1423
+ getStats(): {
1424
+ totalBuffers: number;
1425
+ inUseBuffers: number;
1426
+ cachedBuffers: number;
1427
+ currentCacheSize: number;
1428
+ maxCacheSize: number;
1429
+ };
1430
+ /**
1431
+ * Clear all buffers and reset pool
1432
+ */
1433
+ clear(): void;
1434
+ /**
1435
+ * Destroy the pool
1436
+ */
1437
+ destroy(): void;
1438
+ }
1439
+
1440
+ /**
1441
+ * GPU Shader Manager
1442
+ *
1443
+ * Manages WGSL shader loading, compilation, and caching.
1444
+ */
1445
+
1446
+ /**
1447
+ * Built-in shader library
1448
+ */
1449
+ declare const BUILTIN_SHADERS: {
1450
+ /** Matrix addition shader */
1451
+ matrixAdd: string;
1452
+ /** Matrix subtraction shader */
1453
+ matrixSub: string;
1454
+ /** Element-wise multiplication shader */
1455
+ matrixMul: string;
1456
+ /** Scalar multiplication shader */
1457
+ scalarMul: string;
1458
+ /** Matrix multiplication (naive) shader */
1459
+ matmul: string;
1460
+ /** Matrix transpose shader */
1461
+ transpose: string;
1462
+ /** Sum reduction shader (first pass) */
1463
+ sumReduce: string;
1464
+ };
1465
+ /**
1466
+ * Shader Manager for compiling and caching GPU shaders
1467
+ */
1468
+ declare class ShaderManager {
1469
+ private context;
1470
+ private cache;
1471
+ constructor(context: GPUContext);
1472
+ /**
1473
+ * Get or compile a shader module
1474
+ */
1475
+ getShaderModule(name: string, code: string): GPUShaderModule;
1476
+ /**
1477
+ * Get a builtin shader module
1478
+ */
1479
+ getBuiltinShader(name: keyof typeof BUILTIN_SHADERS): GPUShaderModule;
1480
+ /**
1481
+ * Get or create a compute pipeline
1482
+ */
1483
+ getPipeline(shaderName: string, entryPoint: string, code?: string, layout?: GPUPipelineLayout | 'auto'): GPUComputePipeline;
1484
+ /**
1485
+ * Get a builtin compute pipeline
1486
+ */
1487
+ getBuiltinPipeline(name: keyof typeof BUILTIN_SHADERS, entryPoint?: string): GPUComputePipeline;
1488
+ /**
1489
+ * Precompile all builtin shaders
1490
+ */
1491
+ precompileBuiltins(): void;
1492
+ /**
1493
+ * Clear shader cache
1494
+ */
1495
+ clearCache(): void;
1496
+ /**
1497
+ * Get cache statistics
1498
+ */
1499
+ getStats(): {
1500
+ cachedShaders: number;
1501
+ cachedPipelines: number;
1502
+ };
1503
+ }
1504
+
1505
+ /**
1506
+ * GPU Batch Executor
1507
+ *
1508
+ * Manages batched GPU command submission for reduced overhead.
1509
+ * Queues multiple operations and executes them together for efficiency.
1510
+ *
1511
+ * @packageDocumentation
1512
+ */
1513
+
1514
+ /**
1515
+ * Result of batch execution
1516
+ */
1517
+ interface BatchResult {
1518
+ /** Success status */
1519
+ success: boolean;
1520
+ /** Number of operations executed */
1521
+ operationCount: number;
1522
+ /** Execution time in milliseconds */
1523
+ duration: number;
1524
+ /** Error message if failed */
1525
+ error?: string;
1526
+ }
1527
+ /**
1528
+ * Options for batch execution
1529
+ */
1530
+ interface BatchOptions {
1531
+ /** Maximum operations per batch before auto-flush */
1532
+ maxBatchSize?: number;
1533
+ /** Auto-flush when batch is full */
1534
+ autoFlush?: boolean;
1535
+ /** Wait for GPU completion before returning */
1536
+ waitForCompletion?: boolean;
1537
+ }
1538
+ /**
1539
+ * GPU Batch Executor
1540
+ *
1541
+ * Accumulates GPU operations and executes them in batches to reduce
1542
+ * command submission overhead. Uses command buffers efficiently.
1543
+ *
1544
+ * @example
1545
+ * ```typescript
1546
+ * const executor = new BatchExecutor(context, shaders, bufferPool);
1547
+ *
1548
+ * executor.add(a, b, output, { rows: 100, cols: 100 });
1549
+ * executor.matmul(a, b, output, { rows: 100, cols: 100, k: 100 });
1550
+ *
1551
+ * await executor.flush();
1552
+ * ```
1553
+ */
1554
+ declare class BatchExecutor {
1555
+ private context;
1556
+ private shaders;
1557
+ private bufferPool;
1558
+ private operations;
1559
+ private options;
1560
+ /**
1561
+ * Create a new batch executor
1562
+ */
1563
+ constructor(context: GPUContext, shaders: ShaderManager, bufferPool: BufferPool, options?: BatchOptions);
1564
+ /**
1565
+ * Get current batch size
1566
+ */
1567
+ get size(): number;
1568
+ /**
1569
+ * Check if batch is empty
1570
+ */
1571
+ get isEmpty(): boolean;
1572
+ /**
1573
+ * Check if batch is full
1574
+ */
1575
+ get isFull(): boolean;
1576
+ /**
1577
+ * Queue an add operation
1578
+ */
1579
+ add(inputA: GPUBuffer, inputB: GPUBuffer, output: GPUBuffer, dimensions: {
1580
+ rows: number;
1581
+ cols: number;
1582
+ }): void;
1583
+ /**
1584
+ * Queue a subtract operation
1585
+ */
1586
+ subtract(inputA: GPUBuffer, inputB: GPUBuffer, output: GPUBuffer, dimensions: {
1587
+ rows: number;
1588
+ cols: number;
1589
+ }): void;
1590
+ /**
1591
+ * Queue an element-wise multiply operation
1592
+ */
1593
+ multiply(inputA: GPUBuffer, inputB: GPUBuffer, output: GPUBuffer, dimensions: {
1594
+ rows: number;
1595
+ cols: number;
1596
+ }): void;
1597
+ /**
1598
+ * Queue a scale operation
1599
+ */
1600
+ scale(input: GPUBuffer, output: GPUBuffer, scalar: number, dimensions: {
1601
+ rows: number;
1602
+ cols: number;
1603
+ }): void;
1604
+ /**
1605
+ * Queue a matrix multiplication operation
1606
+ */
1607
+ matmul(inputA: GPUBuffer, inputB: GPUBuffer, output: GPUBuffer, dimensions: {
1608
+ rows: number;
1609
+ cols: number;
1610
+ k: number;
1611
+ }): void;
1612
+ /**
1613
+ * Queue a transpose operation
1614
+ */
1615
+ transpose(input: GPUBuffer, output: GPUBuffer, dimensions: {
1616
+ rows: number;
1617
+ cols: number;
1618
+ }): void;
1619
+ /**
1620
+ * Queue a sum reduction operation
1621
+ */
1622
+ reduceSum(input: GPUBuffer, output: GPUBuffer, dimensions: {
1623
+ rows: number;
1624
+ cols: number;
1625
+ }): void;
1626
+ /**
1627
+ * Queue operation (internal)
1628
+ */
1629
+ private queueOperation;
1630
+ /**
1631
+ * Flush all queued operations (async)
1632
+ */
1633
+ flush(): Promise<BatchResult>;
1634
+ /**
1635
+ * Flush synchronously (fire and forget)
1636
+ */
1637
+ flushSync(): void;
1638
+ /**
1639
+ * Encode a single operation into the command encoder
1640
+ */
1641
+ private encodeOperation;
1642
+ /**
1643
+ * Get pipeline name for operation type
1644
+ */
1645
+ private getPipelineName;
1646
+ /**
1647
+ * Create params buffer for operation
1648
+ */
1649
+ private createParamsBuffer;
1650
+ /**
1651
+ * Calculate workgroup dispatch counts
1652
+ */
1653
+ private calculateWorkgroups;
1654
+ /**
1655
+ * Clear all queued operations without executing
1656
+ */
1657
+ clear(): void;
1658
+ /**
1659
+ * Get statistics about the batch executor
1660
+ */
1661
+ getStats(): {
1662
+ queuedOperations: number;
1663
+ maxBatchSize: number;
1664
+ autoFlush: boolean;
1665
+ };
1666
+ }
1667
+
1668
+ /**
1669
+ * GPU-CPU Synchronization Strategy
1670
+ *
1671
+ * Implements efficient patterns for synchronizing data between
1672
+ * CPU and GPU memory. Minimizes transfer latency and enables
1673
+ * overlapping of CPU and GPU work.
1674
+ *
1675
+ * @packageDocumentation
1676
+ */
1677
+
1678
+ /**
1679
+ * Synchronization strategy type
1680
+ */
1681
+ type SyncStrategy = 'immediate' | 'lazy' | 'double-buffer' | 'streaming';
1682
+ /**
1683
+ * Transfer direction
1684
+ */
1685
+ type TransferDirection = 'cpu-to-gpu' | 'gpu-to-cpu';
1686
+ /**
1687
+ * Transfer result
1688
+ */
1689
+ interface TransferResult {
1690
+ /** Request ID */
1691
+ id: number;
1692
+ /** Success status */
1693
+ success: boolean;
1694
+ /** Transfer time in milliseconds */
1695
+ duration: number;
1696
+ /** Bytes transferred */
1697
+ bytesTransferred: number;
1698
+ /** Error message if failed */
1699
+ error?: string;
1700
+ }
1701
+ /**
1702
+ * Sync configuration
1703
+ */
1704
+ interface SyncConfig {
1705
+ /** Default strategy */
1706
+ strategy: SyncStrategy;
1707
+ /** Chunk size for streaming (bytes) */
1708
+ chunkSize?: number;
1709
+ /** Maximum pending transfers */
1710
+ maxPendingTransfers?: number;
1711
+ /** Auto-coalesce nearby transfers */
1712
+ coalesceTransfers?: boolean;
1713
+ }
1714
+ /**
1715
+ * GPU-CPU Synchronization Manager
1716
+ *
1717
+ * Manages data transfer between CPU and GPU with various strategies
1718
+ * optimized for different use cases.
1719
+ *
1720
+ * @example
1721
+ * ```typescript
1722
+ * const sync = new SyncManager(context, bufferPool, {
1723
+ * strategy: 'double-buffer',
1724
+ * });
1725
+ *
1726
+ * // Upload data to GPU
1727
+ * await sync.upload(cpuData, gpuBuffer);
1728
+ *
1729
+ * // Download results from GPU
1730
+ * const result = await sync.download(gpuBuffer);
1731
+ * ```
1732
+ */
1733
+ declare class SyncManager {
1734
+ private context;
1735
+ private config;
1736
+ private pendingTransfers;
1737
+ private nextRequestId;
1738
+ private stagingBuffers;
1739
+ private totalUploads;
1740
+ private totalDownloads;
1741
+ private totalBytesUploaded;
1742
+ private totalBytesDownloaded;
1743
+ constructor(context: GPUContext, _bufferPool: BufferPool, // Reserved for future pool integration
1744
+ config?: Partial<SyncConfig>);
1745
+ /**
1746
+ * Upload data from CPU to GPU
1747
+ */
1748
+ upload(cpuData: Float32Array | Float64Array | Uint32Array | Int32Array, gpuBuffer: GPUBuffer, options?: {
1749
+ offset?: number;
1750
+ size?: number;
1751
+ }): Promise<TransferResult>;
1752
+ /**
1753
+ * Download data from GPU to CPU
1754
+ */
1755
+ download(gpuBuffer: GPUBuffer, options?: {
1756
+ offset?: number;
1757
+ size?: number;
1758
+ }): Promise<Float32Array>;
1759
+ /**
1760
+ * Download data using double-buffering for overlap
1761
+ */
1762
+ downloadDoubleBuffered(gpuBuffer: GPUBuffer, size: number): Promise<Float32Array>;
1763
+ /**
1764
+ * Stream large data in chunks
1765
+ */
1766
+ uploadStreaming(cpuData: Float32Array, gpuBuffer: GPUBuffer, onProgress?: (progress: number) => void): Promise<TransferResult>;
1767
+ /**
1768
+ * Download large data in chunks
1769
+ */
1770
+ downloadStreaming(gpuBuffer: GPUBuffer, totalSize: number, onProgress?: (progress: number) => void): Promise<Float32Array>;
1771
+ /**
1772
+ * Batch multiple transfers
1773
+ */
1774
+ batchTransfer(requests: Array<{
1775
+ data: Float32Array;
1776
+ buffer: GPUBuffer;
1777
+ direction: TransferDirection;
1778
+ }>): Promise<TransferResult[]>;
1779
+ /**
1780
+ * Create or reuse a staging buffer
1781
+ */
1782
+ private getOrCreateStagingBuffer;
1783
+ /**
1784
+ * Round up to next power of 2
1785
+ */
1786
+ private roundToPowerOf2;
1787
+ /**
1788
+ * Wait for all pending transfers to complete
1789
+ */
1790
+ flush(): Promise<void>;
1791
+ /**
1792
+ * Get synchronization statistics
1793
+ */
1794
+ getStats(): {
1795
+ totalUploads: number;
1796
+ totalDownloads: number;
1797
+ totalBytesUploaded: number;
1798
+ totalBytesDownloaded: number;
1799
+ pendingTransfers: number;
1800
+ stagingBuffersCount: number;
1801
+ strategy: SyncStrategy;
1802
+ };
1803
+ /**
1804
+ * Destroy sync manager and release resources
1805
+ */
1806
+ destroy(): void;
1807
+ }
1808
+ /**
1809
+ * Create a sync manager with the recommended configuration
1810
+ */
1811
+ declare function createSyncManager(context: GPUContext, bufferPool: BufferPool, strategy?: SyncStrategy): SyncManager;
1812
+
1813
+ /**
1814
+ * GPU Backend for Matrix Operations
1815
+ *
1816
+ * WebGPU-accelerated matrix operations for large matrices.
1817
+ */
1818
+
1819
+ /**
1820
+ * GPU Backend status
1821
+ */
1822
+ type GPUBackendStatus = 'uninitialized' | 'initializing' | 'ready' | 'error' | 'unsupported';
1823
+ /**
1824
+ * Options for GPU backend
1825
+ */
1826
+ interface GPUBackendOptions extends GPUContextOptions {
1827
+ /** Use global GPU context instead of creating a new one */
1828
+ useGlobalContext?: boolean;
1829
+ /** Buffer pool options */
1830
+ bufferPoolOptions?: {
1831
+ maxCacheSize?: number;
1832
+ evictionTimeout?: number;
1833
+ };
1834
+ /** Threshold for using GPU (matrix size) */
1835
+ threshold?: number;
1836
+ }
1837
+ /**
1838
+ * GPU Backend for accelerated matrix operations
1839
+ */
1840
+ declare class GPUBackend {
1841
+ private context;
1842
+ private bufferPool;
1843
+ private shaderManager;
1844
+ private _status;
1845
+ private _capabilities;
1846
+ private _lastError;
1847
+ private threshold;
1848
+ private workgroupSize;
1849
+ private useGlobalContext;
1850
+ constructor(options?: GPUBackendOptions);
1851
+ /**
1852
+ * Get the current status
1853
+ */
1854
+ get status(): GPUBackendStatus;
1855
+ /**
1856
+ * Check if backend is ready
1857
+ */
1858
+ get isReady(): boolean;
1859
+ /**
1860
+ * Get capabilities
1861
+ */
1862
+ get capabilities(): GPUCapabilities | null;
1863
+ /**
1864
+ * Get last error
1865
+ */
1866
+ get lastError(): Error | null;
1867
+ /**
1868
+ * Initialize the GPU backend
1869
+ */
1870
+ initialize(options?: GPUBackendOptions): Promise<boolean>;
1871
+ /**
1872
+ * Check if GPU should be used for the given matrix size
1873
+ */
1874
+ shouldUseGPU(rows: number, cols: number): boolean;
1875
+ /**
1876
+ * Calculate workgroup counts for a matrix
1877
+ */
1878
+ calculateWorkgroups(rows: number, cols: number): [number, number, number];
1879
+ /**
1880
+ * Get the GPU context
1881
+ */
1882
+ getContext(): GPUContext;
1883
+ /**
1884
+ * Get the buffer pool
1885
+ */
1886
+ getBufferPool(): BufferPool;
1887
+ /**
1888
+ * Get the shader manager
1889
+ */
1890
+ getShaderManager(): ShaderManager;
1891
+ /**
1892
+ * Add two matrices element-wise
1893
+ */
1894
+ add(a: Float32Array, b: Float32Array, rows: number, cols: number): Promise<Float32Array>;
1895
+ /**
1896
+ * Multiply two matrices
1897
+ */
1898
+ matmul(a: Float32Array, b: Float32Array, M: number, K: number, N: number): Promise<Float32Array>;
1899
+ /**
1900
+ * Transpose a matrix
1901
+ */
1902
+ transpose(a: Float32Array, rows: number, cols: number): Promise<Float32Array>;
1903
+ /**
1904
+ * Scale a matrix by a scalar
1905
+ */
1906
+ scale(a: Float32Array, scalar: number): Promise<Float32Array>;
1907
+ /**
1908
+ * Get backend statistics
1909
+ */
1910
+ getStats(): {
1911
+ status: GPUBackendStatus;
1912
+ capabilities: GPUCapabilities | null;
1913
+ bufferPool: {
1914
+ totalBuffers: number;
1915
+ inUseBuffers: number;
1916
+ cachedBuffers: number;
1917
+ } | null;
1918
+ shaders: {
1919
+ cachedShaders: number;
1920
+ cachedPipelines: number;
1921
+ } | null;
1922
+ };
1923
+ /**
1924
+ * Destroy the backend
1925
+ */
1926
+ destroy(): void;
1927
+ }
1928
+ /**
1929
+ * Get the global GPU backend
1930
+ */
1931
+ declare function getGlobalGPUBackend(): GPUBackend;
1932
+ /**
1933
+ * Initialize the global GPU backend
1934
+ */
1935
+ declare function initializeGlobalGPUBackend(options?: GPUBackendOptions): Promise<boolean>;
1936
+ /**
1937
+ * Destroy the global GPU backend
1938
+ */
1939
+ declare function destroyGlobalGPUBackend(): void;
1940
+
1941
+ /**
1942
+ * GPU Matrix Backend Adapter
1943
+ *
1944
+ * Adapts GPUBackend to implement the MatrixBackend interface,
1945
+ * enabling seamless integration with the backend selection system.
1946
+ *
1947
+ * @packageDocumentation
1948
+ */
1949
+
1950
+ /**
1951
+ * Configuration for GPU Matrix Backend
1952
+ */
1953
+ interface GPUMatrixBackendConfig {
1954
+ /** Minimum elements to use GPU (default: 65536 = 256x256) */
1955
+ minElements?: number;
1956
+ /** Use global GPU backend instance */
1957
+ useGlobalBackend?: boolean;
1958
+ /** GPU backend options */
1959
+ gpuOptions?: GPUBackendOptions;
1960
+ /** Fall back to JS on GPU errors */
1961
+ fallbackOnError?: boolean;
1962
+ }
1963
+ /**
1964
+ * GPU Matrix Backend
1965
+ *
1966
+ * Implements MatrixBackend interface using WebGPU compute shaders.
1967
+ * Provides significant acceleration for large matrices.
1968
+ *
1969
+ * @example
1970
+ * ```typescript
1971
+ * const gpu = new GPUMatrixBackend();
1972
+ * await gpu.initialize();
1973
+ *
1974
+ * const result = gpu.multiply(matrixA, matrixB);
1975
+ * ```
1976
+ */
1977
+ declare class GPUMatrixBackend implements MatrixBackend {
1978
+ readonly type: BackendType;
1979
+ private config;
1980
+ private backend;
1981
+ private capabilities;
1982
+ private initPromise;
1983
+ private _available;
1984
+ constructor(config?: GPUMatrixBackendConfig);
1985
+ /**
1986
+ * Check if GPU is available in the current environment
1987
+ */
1988
+ isAvailable(): boolean;
1989
+ /**
1990
+ * Initialize the GPU backend
1991
+ */
1992
+ initialize(): Promise<void>;
1993
+ private doInitialize;
1994
+ /**
1995
+ * Check if operation should use GPU
1996
+ */
1997
+ private shouldUseGPU;
1998
+ /**
1999
+ * Execute GPU operation with fallback
2000
+ */
2001
+ private executeWithFallback;
2002
+ /**
2003
+ * Get GPU capabilities
2004
+ */
2005
+ getCapabilities(): GPUCapabilities | null;
2006
+ /**
2007
+ * Get backend statistics
2008
+ */
2009
+ getStats(): ReturnType<GPUBackend['getStats']> | null;
2010
+ add(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2011
+ /**
2012
+ * Async add operation using GPU
2013
+ */
2014
+ addAsync(a: DenseMatrix, b: DenseMatrix): Promise<DenseMatrix>;
2015
+ subtract(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2016
+ multiplyElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2017
+ divideElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2018
+ scale(a: DenseMatrix, scalar: number): DenseMatrix;
2019
+ /**
2020
+ * Async scale operation using GPU
2021
+ */
2022
+ scaleAsync(a: DenseMatrix, scalar: number): Promise<DenseMatrix>;
2023
+ abs(a: DenseMatrix): DenseMatrix;
2024
+ negate(a: DenseMatrix): DenseMatrix;
2025
+ multiply(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2026
+ /**
2027
+ * Async matrix multiplication using GPU
2028
+ */
2029
+ multiplyAsync(a: DenseMatrix, b: DenseMatrix): Promise<DenseMatrix>;
2030
+ transpose(a: DenseMatrix): DenseMatrix;
2031
+ /**
2032
+ * Async transpose using GPU
2033
+ */
2034
+ transposeAsync(a: DenseMatrix): Promise<DenseMatrix>;
2035
+ sum(a: DenseMatrix): number;
2036
+ sumAxis(a: DenseMatrix, axis: 0 | 1): DenseMatrix;
2037
+ norm(a: DenseMatrix): number;
2038
+ dot(a: DenseMatrix, b: DenseMatrix): number;
2039
+ /**
2040
+ * Update configuration
2041
+ */
2042
+ updateConfig(config: Partial<GPUMatrixBackendConfig>): void;
2043
+ /**
2044
+ * Get current configuration
2045
+ */
2046
+ getConfig(): Required<GPUMatrixBackendConfig>;
2047
+ /**
2048
+ * Destroy the backend
2049
+ */
2050
+ destroy(): void;
2051
+ }
2052
+ /**
2053
+ * Global GPU matrix backend instance
2054
+ */
2055
+ declare const gpuMatrixBackend: GPUMatrixBackend;
2056
+ /**
2057
+ * Create a GPU matrix backend with custom configuration
2058
+ */
2059
+ declare function createGPUMatrixBackend(config?: GPUMatrixBackendConfig): GPUMatrixBackend;
2060
+
2061
+ /**
2062
+ * Backend Manager
2063
+ *
2064
+ * Centralized management for matrix operation backends with automatic
2065
+ * selection based on matrix size, operation type, and availability.
2066
+ * Includes adaptive threshold tuning based on runtime profiling.
2067
+ *
2068
+ * @packageDocumentation
2069
+ */
2070
+
2071
+ /**
2072
+ * Operation type hints for backend selection
2073
+ */
2074
+ type OperationType = 'add' | 'subtract' | 'multiply' | 'multiplyElementwise' | 'transpose' | 'scale' | 'decomposition' | 'solve';
2075
+ /**
2076
+ * Extended backend hints with operation-specific thresholds
2077
+ */
2078
+ interface ExtendedBackendHints extends BackendHints {
2079
+ /** Specific thresholds by operation type */
2080
+ operationThresholds?: Partial<Record<OperationType, {
2081
+ wasm?: number;
2082
+ gpu?: number;
2083
+ }>>;
2084
+ /** Enable automatic SIMD detection for WASM */
2085
+ autoSIMD?: boolean;
2086
+ /** Fallback to JS on backend failure */
2087
+ fallbackOnError?: boolean;
2088
+ }
2089
+ /**
2090
+ * Default extended hints
2091
+ */
2092
+ declare const DEFAULT_EXTENDED_HINTS: Required<ExtendedBackendHints>;
2093
+ /**
2094
+ * Centralized Backend Manager
2095
+ *
2096
+ * Provides a unified interface for executing matrix operations with
2097
+ * automatic backend selection based on matrix size and operation type.
2098
+ * Features adaptive threshold tuning based on runtime profiling.
2099
+ */
2100
+ declare class BackendManager {
2101
+ private hints;
2102
+ private initialized;
2103
+ private initializationPromise;
2104
+ private adaptiveState;
2105
+ private configUnsubscribe;
2106
+ constructor(hints?: ExtendedBackendHints);
2107
+ /**
2108
+ * Sync manager state with global config
2109
+ */
2110
+ private syncWithConfig;
2111
+ /**
2112
+ * Initialize all available backends
2113
+ */
2114
+ initialize(): Promise<void>;
2115
+ private doInitialize;
2116
+ /**
2117
+ * Update backend hints
2118
+ */
2119
+ setHints(hints: ExtendedBackendHints): void;
2120
+ /**
2121
+ * Get current hints
2122
+ */
2123
+ getHints(): Required<ExtendedBackendHints>;
2124
+ /**
2125
+ * Get the best backend for a given operation and matrix size
2126
+ */
2127
+ selectBackend(elementCount: number, operation?: OperationType): MatrixBackend;
2128
+ /**
2129
+ * Execute an operation with automatic backend selection
2130
+ */
2131
+ private executeWithFallback;
2132
+ /**
2133
+ * Matrix addition with auto backend selection
2134
+ */
2135
+ add(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2136
+ /**
2137
+ * Matrix subtraction with auto backend selection
2138
+ */
2139
+ subtract(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2140
+ /**
2141
+ * Element-wise multiplication with auto backend selection
2142
+ */
2143
+ multiplyElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2144
+ /**
2145
+ * Element-wise division with auto backend selection
2146
+ */
2147
+ divideElementwise(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2148
+ /**
2149
+ * Scalar multiplication with auto backend selection
2150
+ */
2151
+ scale(a: DenseMatrix, scalar: number): DenseMatrix;
2152
+ /**
2153
+ * Element-wise absolute value with auto backend selection
2154
+ */
2155
+ abs(a: DenseMatrix): DenseMatrix;
2156
+ /**
2157
+ * Element-wise negation with auto backend selection
2158
+ */
2159
+ negate(a: DenseMatrix): DenseMatrix;
2160
+ /**
2161
+ * Matrix multiplication with auto backend selection
2162
+ */
2163
+ multiply(a: DenseMatrix, b: DenseMatrix): DenseMatrix;
2164
+ /**
2165
+ * Matrix transpose with auto backend selection
2166
+ */
2167
+ transpose(a: DenseMatrix): DenseMatrix;
2168
+ /**
2169
+ * Sum of all elements with auto backend selection
2170
+ */
2171
+ sum(a: DenseMatrix): Promise<number>;
2172
+ /**
2173
+ * Sum along axis with auto backend selection
2174
+ */
2175
+ sumAxis(a: DenseMatrix, axis: 0 | 1): DenseMatrix;
2176
+ /**
2177
+ * Frobenius norm with auto backend selection
2178
+ */
2179
+ norm(a: DenseMatrix): number;
2180
+ /**
2181
+ * Dot product with auto backend selection
2182
+ */
2183
+ dot(a: DenseMatrix, b: DenseMatrix): Promise<number>;
2184
+ /**
2185
+ * Get list of available backends
2186
+ */
2187
+ getAvailableBackends(): BackendType[];
2188
+ /**
2189
+ * Check if a specific backend is available
2190
+ */
2191
+ hasBackend(type: BackendType): boolean;
2192
+ /**
2193
+ * Get current active backend for a given operation size
2194
+ */
2195
+ getActiveBackend(elementCount: number, operation?: OperationType): BackendType;
2196
+ /**
2197
+ * Force a specific backend for all operations
2198
+ */
2199
+ forceBackend(type: BackendType | null): void;
2200
+ /**
2201
+ * Record a performance sample for adaptive tuning
2202
+ */
2203
+ recordSample(operation: OperationType, elementCount: number, backend: BackendType, durationMs: number): void;
2204
+ /**
2205
+ * Adjust thresholds based on collected samples
2206
+ */
2207
+ private maybeAdjustThresholds;
2208
+ /**
2209
+ * Get current adaptive thresholds
2210
+ */
2211
+ getAdaptiveThresholds(): Map<OperationType, {
2212
+ wasm: number;
2213
+ gpu: number;
2214
+ }>;
2215
+ /**
2216
+ * Reset adaptive tuning state
2217
+ */
2218
+ resetAdaptiveState(): void;
2219
+ /**
2220
+ * Get performance statistics
2221
+ */
2222
+ getPerformanceStats(): {
2223
+ sampleCount: number;
2224
+ operationStats: Map<OperationType, {
2225
+ avgDuration: number;
2226
+ samples: number;
2227
+ backendUsage: Record<BackendType, number>;
2228
+ }>;
2229
+ };
2230
+ /**
2231
+ * Cleanup resources
2232
+ */
2233
+ destroy(): void;
2234
+ }
2235
+ /**
2236
+ * Default backend manager instance
2237
+ */
2238
+ declare const backendManager: BackendManager;
2239
+ /**
2240
+ * Create a new backend manager with custom hints
2241
+ */
2242
+ declare function createBackendManager(hints?: ExtendedBackendHints): BackendManager;
2243
+
2244
+ /**
2245
+ * Eigenvalue and Eigenvector Decomposition
2246
+ *
2247
+ * Implements eigenvalue computation using QR algorithm and
2248
+ * eigenvector extraction using inverse iteration.
2249
+ */
2250
+ /**
2251
+ * Result of eigenvalue decomposition
2252
+ */
2253
+ interface EigResult {
2254
+ /** Eigenvalues (may be complex for non-symmetric matrices) */
2255
+ values: Array<{
2256
+ re: number;
2257
+ im: number;
2258
+ }>;
2259
+ /** Eigenvectors as columns (each column is an eigenvector) */
2260
+ vectors: number[][];
2261
+ /** Whether the matrix was symmetric */
2262
+ isSymmetric: boolean;
2263
+ }
2264
+ /**
2265
+ * Options for eigenvalue computation
2266
+ */
2267
+ interface EigOptions {
2268
+ /** Maximum number of QR iterations */
2269
+ maxIterations?: number;
2270
+ /** Convergence tolerance */
2271
+ tolerance?: number;
2272
+ /** Whether to compute eigenvectors */
2273
+ computeVectors?: boolean;
2274
+ }
2275
+ /**
2276
+ * Compute eigenvalues and eigenvectors of a square matrix
2277
+ * Uses QR algorithm with implicit shifts
2278
+ *
2279
+ * @param matrix - Square matrix (n x n)
2280
+ * @param options - Computation options
2281
+ * @returns Eigenvalues and eigenvectors
2282
+ */
2283
+ declare function eig(matrix: number[][] | Float64Array, options?: EigOptions): EigResult;
2284
+ /**
2285
+ * Compute only eigenvalues (faster, no eigenvector computation)
2286
+ */
2287
+ declare function eigvals(matrix: number[][] | Float64Array, options?: Omit<EigOptions, 'computeVectors'>): Array<{
2288
+ re: number;
2289
+ im: number;
2290
+ }>;
2291
+ /**
2292
+ * Power iteration for dominant eigenvalue
2293
+ * Faster than full eigendecomposition when only largest eigenvalue needed
2294
+ */
2295
+ declare function powerIteration(matrix: number[][], options?: {
2296
+ maxIterations?: number;
2297
+ tolerance?: number;
2298
+ }): {
2299
+ value: number;
2300
+ vector: number[];
2301
+ };
2302
+
2303
+ /**
2304
+ * Singular Value Decomposition (SVD)
2305
+ *
2306
+ * Implements SVD using Golub-Reinsch bidiagonalization and implicit QR.
2307
+ * A = U * S * V^T where:
2308
+ * - U: m x m orthogonal matrix (left singular vectors)
2309
+ * - S: m x n diagonal matrix (singular values)
2310
+ * - V: n x n orthogonal matrix (right singular vectors)
2311
+ */
2312
+ /**
2313
+ * Result of SVD decomposition
2314
+ */
2315
+ interface SVDResult {
2316
+ /** Left singular vectors (m x m) */
2317
+ U: number[][];
2318
+ /** Singular values (min(m,n) values) */
2319
+ S: number[];
2320
+ /** Right singular vectors (n x n) */
2321
+ V: number[][];
2322
+ /** Rank estimate */
2323
+ rank: number;
2324
+ }
2325
+ /**
2326
+ * Options for SVD computation
2327
+ */
2328
+ interface SVDOptions {
2329
+ /** Maximum number of iterations */
2330
+ maxIterations?: number;
2331
+ /** Convergence tolerance */
2332
+ tolerance?: number;
2333
+ /** Whether to compute full U and V matrices */
2334
+ fullMatrices?: boolean;
2335
+ /** Threshold for rank determination */
2336
+ rankTolerance?: number;
2337
+ }
2338
+ /**
2339
+ * Compute SVD of a matrix
2340
+ *
2341
+ * @param matrix - Input matrix (m x n)
2342
+ * @param options - Computation options
2343
+ * @returns SVD decomposition
2344
+ */
2345
+ declare function svd(matrix: number[][] | Float64Array, options?: SVDOptions): SVDResult;
2346
+ /**
2347
+ * Compute only singular values (faster than full SVD)
2348
+ */
2349
+ declare function singularValues(matrix: number[][] | Float64Array, options?: Omit<SVDOptions, 'fullMatrices'>): number[];
2350
+ /**
2351
+ * Compute the pseudoinverse (Moore-Penrose inverse) using SVD
2352
+ */
2353
+ declare function pinv(matrix: number[][], options?: SVDOptions): number[][];
2354
+ /**
2355
+ * Low-rank approximation using SVD
2356
+ * Keeps only the top r singular values
2357
+ */
2358
+ declare function lowRankApprox(matrix: number[][], r: number, options?: SVDOptions): number[][];
2359
+ /**
2360
+ * Compute condition number using SVD
2361
+ */
2362
+ declare function cond(matrix: number[][], options?: SVDOptions): number;
2363
+ /**
2364
+ * Compute matrix norm using SVD
2365
+ * Returns the spectral norm (largest singular value)
2366
+ */
2367
+ declare function norm2(matrix: number[][], options?: SVDOptions): number;
2368
+ /**
2369
+ * Compute Frobenius norm using SVD
2370
+ * Equals sqrt(sum of squared singular values)
2371
+ */
2372
+ declare function normFro(matrix: number[][]): number;
2373
+
2374
+ /**
2375
+ * WASM-accelerated Eigendecomposition
2376
+ *
2377
+ * Provides eigenvalue/eigenvector computation with optional WASM acceleration
2378
+ * via the Rust-compiled Jacobi eigenvalue algorithm. Falls back to the pure
2379
+ * JavaScript QR-based implementation when WASM is unavailable.
2380
+ *
2381
+ * WASM acceleration path:
2382
+ * - Symmetric matrices: Jacobi eigenvalue algorithm (Rust WASM)
2383
+ * - Non-symmetric matrices: falls back to JS QR algorithm
2384
+ *
2385
+ * JS fallback path:
2386
+ * - Full QR algorithm with implicit shifts (eig.ts)
2387
+ *
2388
+ * @packageDocumentation
2389
+ */
2390
+
2391
+ /**
2392
+ * WASM-accelerated eigendecomposition for symmetric matrices.
2393
+ *
2394
+ * Uses the Rust Jacobi eigenvalue algorithm when WASM is loaded,
2395
+ * otherwise falls back to the JavaScript QR algorithm.
2396
+ *
2397
+ * @param matrix - Square matrix as 2D array
2398
+ * @param options - Computation options
2399
+ * @returns Eigenvalues and eigenvectors
2400
+ */
2401
+ declare function eigWasm(matrix: number[][], options?: EigOptions): Promise<EigResult>;
2402
+ /**
2403
+ * WASM-accelerated eigenvalues only (no eigenvectors).
2404
+ * Faster than full eigWasm when only eigenvalues are needed.
2405
+ *
2406
+ * @param matrix - Square matrix as 2D array
2407
+ * @param options - Computation options (computeVectors is ignored)
2408
+ * @returns Array of eigenvalues
2409
+ */
2410
+ declare function eigvalsWasm(matrix: number[][], options?: Omit<EigOptions, 'computeVectors'>): Promise<Array<{
2411
+ re: number;
2412
+ im: number;
2413
+ }>>;
2414
+ /**
2415
+ * WASM-accelerated spectral radius.
2416
+ * Uses Rust power iteration when WASM is available.
2417
+ *
2418
+ * @param matrix - Square matrix as 2D array
2419
+ * @param options - Iteration options
2420
+ * @returns Spectral radius (absolute value of largest eigenvalue)
2421
+ */
2422
+ declare function spectralRadiusWasm(matrix: number[][], options?: {
2423
+ maxIterations?: number;
2424
+ tolerance?: number;
2425
+ }): Promise<number>;
2426
+
2427
+ /**
2428
+ * WASM-accelerated Singular Value Decomposition
2429
+ *
2430
+ * Provides SVD computation with optional WASM acceleration.
2431
+ * The Rust WASM crate currently exposes eigenvalue operations but not
2432
+ * a direct SVD implementation. For symmetric matrices, SVD can be
2433
+ * derived from eigendecomposition (singular values = |eigenvalues|).
2434
+ *
2435
+ * Strategy:
2436
+ * - Symmetric matrices: WASM eig -> derive SVD
2437
+ * - General matrices: JS Golub-Reinsch bidiagonalization (svd.ts)
2438
+ *
2439
+ * @packageDocumentation
2440
+ */
2441
+
2442
+ /**
2443
+ * WASM-accelerated SVD.
2444
+ *
2445
+ * For symmetric matrices, uses WASM eigendecomposition to derive SVD.
2446
+ * For general matrices, falls back to the JavaScript Golub-Reinsch algorithm.
2447
+ *
2448
+ * @param matrix - Input matrix (m x n) as 2D array
2449
+ * @param options - SVD computation options
2450
+ * @returns SVD decomposition { U, S, V, rank }
2451
+ */
2452
+ declare function svdWasm(matrix: number[][], options?: SVDOptions): Promise<SVDResult>;
2453
+
2454
+ /**
2455
+ * Typed Matrix Operations
2456
+ *
2457
+ * Polymorphic matrix operations using typed-function for runtime dispatch.
2458
+ * Supports operations on DenseMatrix with automatic type coercion.
2459
+ *
2460
+ * @packageDocumentation
2461
+ */
2462
+ /**
2463
+ * Create a matrix from various input types
2464
+ */
2465
+ declare const matrix: typed_function.TypedFunction;
2466
+ /**
2467
+ * Create an identity matrix
2468
+ */
2469
+ declare const identity: typed_function.TypedFunction;
2470
+ /**
2471
+ * Create a matrix of zeros
2472
+ */
2473
+ declare const zeros: typed_function.TypedFunction;
2474
+ /**
2475
+ * Create a matrix of ones
2476
+ */
2477
+ declare const ones: typed_function.TypedFunction;
2478
+ /**
2479
+ * Create a diagonal matrix
2480
+ */
2481
+ declare const diag: typed_function.TypedFunction;
2482
+ /**
2483
+ * Create a random matrix
2484
+ */
2485
+ declare const random: typed_function.TypedFunction;
2486
+ /**
2487
+ * Matrix addition - polymorphic add function
2488
+ */
2489
+ declare const add: typed_function.TypedFunction;
2490
+ /**
2491
+ * Matrix subtraction
2492
+ */
2493
+ declare const subtract: typed_function.TypedFunction;
2494
+ /**
2495
+ * Matrix multiplication (matmul)
2496
+ */
2497
+ declare const multiply: typed_function.TypedFunction;
2498
+ /**
2499
+ * Element-wise multiplication (Hadamard product)
2500
+ */
2501
+ declare const dotMultiply: typed_function.TypedFunction;
2502
+ /**
2503
+ * Matrix division (A / B where B is scalar or element-wise)
2504
+ */
2505
+ declare const divide: typed_function.TypedFunction;
2506
+ /**
2507
+ * Matrix negation
2508
+ */
2509
+ declare const unaryMinus: typed_function.TypedFunction;
2510
+ /**
2511
+ * Matrix transpose
2512
+ */
2513
+ declare const transpose: typed_function.TypedFunction;
2514
+ /**
2515
+ * Sum of all elements
2516
+ */
2517
+ declare const sum: typed_function.TypedFunction;
2518
+ /**
2519
+ * Mean of all elements
2520
+ */
2521
+ declare const mean: typed_function.TypedFunction;
2522
+ /**
2523
+ * Minimum element
2524
+ */
2525
+ declare const min: typed_function.TypedFunction;
2526
+ /**
2527
+ * Maximum element
2528
+ */
2529
+ declare const max: typed_function.TypedFunction;
2530
+ /**
2531
+ * Frobenius norm
2532
+ */
2533
+ declare const norm: typed_function.TypedFunction;
2534
+ /**
2535
+ * Matrix trace (sum of diagonal elements)
2536
+ */
2537
+ declare const trace: typed_function.TypedFunction;
2538
+ /**
2539
+ * Element-wise absolute value
2540
+ */
2541
+ declare const abs: typed_function.TypedFunction;
2542
+ /**
2543
+ * Element-wise square root
2544
+ */
2545
+ declare const sqrt: typed_function.TypedFunction;
2546
+ /**
2547
+ * Element-wise square
2548
+ */
2549
+ declare const square: typed_function.TypedFunction;
2550
+ /**
2551
+ * Element-wise exponential
2552
+ */
2553
+ declare const exp: typed_function.TypedFunction;
2554
+ /**
2555
+ * Element-wise natural logarithm
2556
+ */
2557
+ declare const log: typed_function.TypedFunction;
2558
+ /**
2559
+ * Element-wise power
2560
+ */
2561
+ declare const pow: typed_function.TypedFunction;
2562
+ /**
2563
+ * Get matrix dimensions
2564
+ */
2565
+ declare const size: typed_function.TypedFunction;
2566
+ /**
2567
+ * Get element at position
2568
+ */
2569
+ declare const subset: typed_function.TypedFunction;
2570
+ /**
2571
+ * Get row from matrix
2572
+ */
2573
+ declare const row: typed_function.TypedFunction;
2574
+ /**
2575
+ * Get column from matrix
2576
+ */
2577
+ declare const column: typed_function.TypedFunction;
2578
+ /**
2579
+ * Get diagonal from matrix
2580
+ */
2581
+ declare const diagonal: typed_function.TypedFunction;
2582
+ declare const typedMatrixOperations: {
2583
+ matrix: typed_function.TypedFunction;
2584
+ identity: typed_function.TypedFunction;
2585
+ zeros: typed_function.TypedFunction;
2586
+ ones: typed_function.TypedFunction;
2587
+ diag: typed_function.TypedFunction;
2588
+ random: typed_function.TypedFunction;
2589
+ add: typed_function.TypedFunction;
2590
+ subtract: typed_function.TypedFunction;
2591
+ multiply: typed_function.TypedFunction;
2592
+ dotMultiply: typed_function.TypedFunction;
2593
+ divide: typed_function.TypedFunction;
2594
+ unaryMinus: typed_function.TypedFunction;
2595
+ transpose: typed_function.TypedFunction;
2596
+ sum: typed_function.TypedFunction;
2597
+ mean: typed_function.TypedFunction;
2598
+ min: typed_function.TypedFunction;
2599
+ max: typed_function.TypedFunction;
2600
+ norm: typed_function.TypedFunction;
2601
+ trace: typed_function.TypedFunction;
2602
+ abs: typed_function.TypedFunction;
2603
+ sqrt: typed_function.TypedFunction;
2604
+ square: typed_function.TypedFunction;
2605
+ exp: typed_function.TypedFunction;
2606
+ log: typed_function.TypedFunction;
2607
+ pow: typed_function.TypedFunction;
2608
+ size: typed_function.TypedFunction;
2609
+ subset: typed_function.TypedFunction;
2610
+ row: typed_function.TypedFunction;
2611
+ column: typed_function.TypedFunction;
2612
+ diagonal: typed_function.TypedFunction;
2613
+ };
2614
+
2615
+ /**
2616
+ * Parallel-First Matrix Operations
2617
+ *
2618
+ * AssemblyScript-friendly TypeScript implementations with typed-function
2619
+ * integration and workerpool parallel execution via @danielsimonjr/mathts-parallel.
2620
+ *
2621
+ * These operations use Float64Array flat row-major format for efficient
2622
+ * parallel processing through the ComputePool worker infrastructure.
2623
+ *
2624
+ * Following the parallel-first philosophy per CLAUDE.md:
2625
+ * - Use workers for ALL matrix operations (matmul, transpose, etc.)
2626
+ * - Use workers for ALL element-wise operations on matrices
2627
+ * - Only fall back to sequential for trivial scalar operations
2628
+ *
2629
+ * @packageDocumentation
2630
+ */
2631
+ /**
2632
+ * Create a matrix from various input types - parallel-first
2633
+ */
2634
+ declare const parallelMatrix: typed_function.TypedFunction;
2635
+ /**
2636
+ * Create an identity matrix
2637
+ */
2638
+ declare const parallelIdentity: typed_function.TypedFunction;
2639
+ /**
2640
+ * Create a matrix of zeros
2641
+ */
2642
+ declare const parallelZeros: typed_function.TypedFunction;
2643
+ /**
2644
+ * Create a matrix of ones
2645
+ */
2646
+ declare const parallelOnes: typed_function.TypedFunction;
2647
+ /**
2648
+ * Create a diagonal matrix
2649
+ */
2650
+ declare const parallelDiag: typed_function.TypedFunction;
2651
+ /**
2652
+ * Create a random matrix
2653
+ */
2654
+ declare const parallelRandom: typed_function.TypedFunction;
2655
+ /**
2656
+ * Parallel matrix addition with typed-function dispatch
2657
+ */
2658
+ declare const parallelMatrixAdd: typed_function.TypedFunction;
2659
+ /**
2660
+ * Parallel matrix subtraction with typed-function dispatch
2661
+ */
2662
+ declare const parallelMatrixSubtract: typed_function.TypedFunction;
2663
+ /**
2664
+ * Parallel matrix multiplication (matmul) with typed-function dispatch
2665
+ */
2666
+ declare const parallelMatrixMultiply: typed_function.TypedFunction;
2667
+ /**
2668
+ * Element-wise multiplication (Hadamard product) - parallel execution
2669
+ */
2670
+ declare const parallelDotMultiply: typed_function.TypedFunction;
2671
+ /**
2672
+ * Parallel matrix division
2673
+ */
2674
+ declare const parallelMatrixDivide: typed_function.TypedFunction;
2675
+ /**
2676
+ * Matrix negation - parallel execution
2677
+ */
2678
+ declare const parallelUnaryMinus: typed_function.TypedFunction;
2679
+ /**
2680
+ * Matrix transpose - parallel execution
2681
+ */
2682
+ declare const parallelMatrixTranspose: typed_function.TypedFunction;
2683
+ /**
2684
+ * Parallel sum of all elements
2685
+ */
2686
+ declare const parallelMatrixSum: typed_function.TypedFunction;
2687
+ /**
2688
+ * Parallel mean of all elements
2689
+ */
2690
+ declare const parallelMatrixMean: typed_function.TypedFunction;
2691
+ /**
2692
+ * Parallel minimum element
2693
+ */
2694
+ declare const parallelMatrixMin: typed_function.TypedFunction;
2695
+ /**
2696
+ * Parallel maximum element
2697
+ */
2698
+ declare const parallelMatrixMax: typed_function.TypedFunction;
2699
+ /**
2700
+ * Parallel variance computation
2701
+ */
2702
+ declare const parallelMatrixVariance: typed_function.TypedFunction;
2703
+ /**
2704
+ * Parallel standard deviation
2705
+ */
2706
+ declare const parallelMatrixStd: typed_function.TypedFunction;
2707
+ /**
2708
+ * Parallel Frobenius norm
2709
+ */
2710
+ declare const parallelMatrixNorm: typed_function.TypedFunction;
2711
+ /**
2712
+ * Parallel dot product
2713
+ */
2714
+ declare const parallelMatrixDot: typed_function.TypedFunction;
2715
+ /**
2716
+ * Matrix trace (sum of diagonal elements)
2717
+ */
2718
+ declare const parallelMatrixTrace: typed_function.TypedFunction;
2719
+ /**
2720
+ * Parallel Euclidean distance
2721
+ */
2722
+ declare const parallelMatrixDistance: typed_function.TypedFunction;
2723
+ /**
2724
+ * Parallel element-wise absolute value
2725
+ */
2726
+ declare const parallelMatrixAbs: typed_function.TypedFunction;
2727
+ /**
2728
+ * Parallel element-wise square root
2729
+ */
2730
+ declare const parallelMatrixSqrt: typed_function.TypedFunction;
2731
+ /**
2732
+ * Parallel element-wise square
2733
+ */
2734
+ declare const parallelMatrixSquare: typed_function.TypedFunction;
2735
+ /**
2736
+ * Parallel element-wise exponential
2737
+ */
2738
+ declare const parallelMatrixExp: typed_function.TypedFunction;
2739
+ /**
2740
+ * Parallel element-wise natural logarithm
2741
+ */
2742
+ declare const parallelMatrixLog: typed_function.TypedFunction;
2743
+ /**
2744
+ * Parallel element-wise sine
2745
+ */
2746
+ declare const parallelMatrixSin: typed_function.TypedFunction;
2747
+ /**
2748
+ * Parallel element-wise cosine
2749
+ */
2750
+ declare const parallelMatrixCos: typed_function.TypedFunction;
2751
+ /**
2752
+ * Parallel element-wise tangent
2753
+ */
2754
+ declare const parallelMatrixTan: typed_function.TypedFunction;
2755
+ /**
2756
+ * Get matrix dimensions
2757
+ */
2758
+ declare const parallelMatrixSize: typed_function.TypedFunction;
2759
+ /**
2760
+ * Get element at position
2761
+ */
2762
+ declare const parallelMatrixSubset: typed_function.TypedFunction;
2763
+ /**
2764
+ * Get row from matrix
2765
+ */
2766
+ declare const parallelMatrixRow: typed_function.TypedFunction;
2767
+ /**
2768
+ * Get column from matrix
2769
+ */
2770
+ declare const parallelMatrixColumn: typed_function.TypedFunction;
2771
+ /**
2772
+ * Get diagonal from matrix
2773
+ */
2774
+ declare const parallelMatrixDiagonal: typed_function.TypedFunction;
2775
+ /**
2776
+ * Parallel matrix-vector multiplication
2777
+ */
2778
+ declare const parallelMatrixMatvec: typed_function.TypedFunction;
2779
+ /**
2780
+ * Parallel outer product
2781
+ */
2782
+ declare const parallelMatrixOuter: typed_function.TypedFunction;
2783
+ /**
2784
+ * Parallel histogram computation
2785
+ */
2786
+ declare const parallelMatrixHistogram: typed_function.TypedFunction;
2787
+ declare const parallelMatrixOperations: {
2788
+ matrix: typed_function.TypedFunction;
2789
+ identity: typed_function.TypedFunction;
2790
+ zeros: typed_function.TypedFunction;
2791
+ ones: typed_function.TypedFunction;
2792
+ diag: typed_function.TypedFunction;
2793
+ random: typed_function.TypedFunction;
2794
+ add: typed_function.TypedFunction;
2795
+ subtract: typed_function.TypedFunction;
2796
+ multiply: typed_function.TypedFunction;
2797
+ dotMultiply: typed_function.TypedFunction;
2798
+ divide: typed_function.TypedFunction;
2799
+ unaryMinus: typed_function.TypedFunction;
2800
+ transpose: typed_function.TypedFunction;
2801
+ sum: typed_function.TypedFunction;
2802
+ mean: typed_function.TypedFunction;
2803
+ min: typed_function.TypedFunction;
2804
+ max: typed_function.TypedFunction;
2805
+ variance: typed_function.TypedFunction;
2806
+ std: typed_function.TypedFunction;
2807
+ norm: typed_function.TypedFunction;
2808
+ dot: typed_function.TypedFunction;
2809
+ trace: typed_function.TypedFunction;
2810
+ distance: typed_function.TypedFunction;
2811
+ abs: typed_function.TypedFunction;
2812
+ sqrt: typed_function.TypedFunction;
2813
+ square: typed_function.TypedFunction;
2814
+ exp: typed_function.TypedFunction;
2815
+ log: typed_function.TypedFunction;
2816
+ sin: typed_function.TypedFunction;
2817
+ cos: typed_function.TypedFunction;
2818
+ tan: typed_function.TypedFunction;
2819
+ size: typed_function.TypedFunction;
2820
+ subset: typed_function.TypedFunction;
2821
+ row: typed_function.TypedFunction;
2822
+ column: typed_function.TypedFunction;
2823
+ diagonal: typed_function.TypedFunction;
2824
+ matvec: typed_function.TypedFunction;
2825
+ outer: typed_function.TypedFunction;
2826
+ histogram: typed_function.TypedFunction;
2827
+ };
2828
+ /**
2829
+ * Initialize parallel matrix operations
2830
+ */
2831
+ declare function initializeParallelMatrix(): Promise<void>;
2832
+ /**
2833
+ * Terminate parallel matrix operations pool
2834
+ */
2835
+ declare function terminateParallelMatrix(): Promise<void>;
2836
+
2837
+ export { BUILTIN_SHADERS, type BackendHints, BackendManager, BackendRegistry, type BackendType, BatchExecutor, BufferPool, DEFAULT_BACKEND_HINTS, DEFAULT_EXTENDED_HINTS, DenseMatrix, type EigOptions, type EigResult, type ExtendedBackendHints, GPUBackend, type GPUBackendOptions, type GPUBackendStatus, type GPUCapabilities, GPUContext, type GPUContextOptions, GPUMatrixBackend, type GPUMatrixBackendConfig, JSBackend, Matrix, type MatrixBackend, type MatrixDimensions, type MatrixEntry, type MatrixIndex, type MatrixType, type OperationType, ParallelBackend, type ParallelBackendConfig, type SVDOptions, type SVDResult, ShaderManager, type SliceSpec, SparseMatrix, type SyncConfig, SyncManager, type SyncStrategy, WASMBackend, type WASMBackendConfig, type WasmFeatures, abs, add, backendManager, backendRegistry, clearFeatureCache, column, cond, createBackendManager, createGPUMatrixBackend, createParallelBackend, createSyncManager, createWASMBackend, destroyGlobalGPU, destroyGlobalGPUBackend, detectGPUCapabilities, detectWasmFeatures, diag, diagonal, divide, dotMultiply, eig, eigWasm, eigvals, eigvalsWasm, exp, getCachedFeatures, getGlobalGPUBackend, getGlobalGPUContext, getRecommendedWorkgroupSize, gpuMatrixBackend, hasWebGPU, identity, initializeGlobalGPUBackend, initializeParallelMatrix, isAtomicsAvailable, isDenseMatrix, isMatrix, isSharedMemoryAvailable, isSparseMatrix, isWasmAvailable, jsBackend, log, lowRankApprox, matrix, max, mean, min, multiply, norm, norm2, normFro, ones, parallelBackend, parallelDiag, parallelDotMultiply, parallelIdentity, parallelMatrix, parallelMatrixAbs, parallelMatrixAdd, parallelMatrixColumn, parallelMatrixCos, parallelMatrixDiagonal, parallelMatrixDistance, parallelMatrixDivide, parallelMatrixDot, parallelMatrixExp, parallelMatrixHistogram, parallelMatrixLog, parallelMatrixMatvec, parallelMatrixMax, parallelMatrixMean, parallelMatrixMin, parallelMatrixMultiply, parallelMatrixNorm, parallelMatrixOperations, parallelMatrixOuter, parallelMatrixRow, parallelMatrixSin, parallelMatrixSize, parallelMatrixSqrt, parallelMatrixSquare, parallelMatrixStd, parallelMatrixSubset, parallelMatrixSubtract, parallelMatrixSum, parallelMatrixTan, parallelMatrixTrace, parallelMatrixTranspose, parallelMatrixVariance, parallelOnes, parallelRandom, parallelUnaryMinus, parallelZeros, pinv, pow, powerIteration, random, row, singularValues, size, spectralRadiusWasm, sqrt, square, subset, subtract, sum, svd, svdWasm, terminateParallelMatrix, trace, transpose, typedMatrixOperations, unaryMinus, wasmBackend, zeros };