@pawells/math-extended 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +319 -0
- package/build/angles.d.ts +31 -0
- package/build/angles.d.ts.map +1 -0
- package/build/angles.js +85 -0
- package/build/angles.js.map +1 -0
- package/build/angles.spec.d.ts +2 -0
- package/build/angles.spec.d.ts.map +1 -0
- package/build/angles.spec.js +147 -0
- package/build/angles.spec.js.map +1 -0
- package/build/clamp.d.ts +17 -0
- package/build/clamp.d.ts.map +1 -0
- package/build/clamp.js +19 -0
- package/build/clamp.js.map +1 -0
- package/build/clamp.spec.d.ts +2 -0
- package/build/clamp.spec.d.ts.map +1 -0
- package/build/clamp.spec.js +19 -0
- package/build/clamp.spec.js.map +1 -0
- package/build/documentation-validation.spec.d.ts +11 -0
- package/build/documentation-validation.spec.d.ts.map +1 -0
- package/build/documentation-validation.spec.js +401 -0
- package/build/documentation-validation.spec.js.map +1 -0
- package/build/index.d.ts +8 -0
- package/build/index.d.ts.map +1 -0
- package/build/index.js +8 -0
- package/build/index.js.map +1 -0
- package/build/interpolation.d.ts +175 -0
- package/build/interpolation.d.ts.map +1 -0
- package/build/interpolation.js +369 -0
- package/build/interpolation.js.map +1 -0
- package/build/interpolation.spec.d.ts +2 -0
- package/build/interpolation.spec.d.ts.map +1 -0
- package/build/interpolation.spec.js +480 -0
- package/build/interpolation.spec.js.map +1 -0
- package/build/matrices/arithmetic.d.ts +411 -0
- package/build/matrices/arithmetic.d.ts.map +1 -0
- package/build/matrices/arithmetic.js +954 -0
- package/build/matrices/arithmetic.js.map +1 -0
- package/build/matrices/arithmetic.spec.d.ts +2 -0
- package/build/matrices/arithmetic.spec.d.ts.map +1 -0
- package/build/matrices/arithmetic.spec.js +915 -0
- package/build/matrices/arithmetic.spec.js.map +1 -0
- package/build/matrices/asserts.d.ts +306 -0
- package/build/matrices/asserts.d.ts.map +1 -0
- package/build/matrices/asserts.js +396 -0
- package/build/matrices/asserts.js.map +1 -0
- package/build/matrices/asserts.spec.d.ts +2 -0
- package/build/matrices/asserts.spec.d.ts.map +1 -0
- package/build/matrices/asserts.spec.js +565 -0
- package/build/matrices/asserts.spec.js.map +1 -0
- package/build/matrices/core.d.ts +168 -0
- package/build/matrices/core.d.ts.map +1 -0
- package/build/matrices/core.js +457 -0
- package/build/matrices/core.js.map +1 -0
- package/build/matrices/core.spec.d.ts +2 -0
- package/build/matrices/core.spec.d.ts.map +1 -0
- package/build/matrices/core.spec.js +634 -0
- package/build/matrices/core.spec.js.map +1 -0
- package/build/matrices/decompositions.d.ts +326 -0
- package/build/matrices/decompositions.d.ts.map +1 -0
- package/build/matrices/decompositions.js +816 -0
- package/build/matrices/decompositions.js.map +1 -0
- package/build/matrices/decompositions.spec.d.ts +2 -0
- package/build/matrices/decompositions.spec.d.ts.map +1 -0
- package/build/matrices/decompositions.spec.js +195 -0
- package/build/matrices/decompositions.spec.js.map +1 -0
- package/build/matrices/index.d.ts +9 -0
- package/build/matrices/index.d.ts.map +1 -0
- package/build/matrices/index.js +9 -0
- package/build/matrices/index.js.map +1 -0
- package/build/matrices/linear-algebra.d.ts +64 -0
- package/build/matrices/linear-algebra.d.ts.map +1 -0
- package/build/matrices/linear-algebra.js +253 -0
- package/build/matrices/linear-algebra.js.map +1 -0
- package/build/matrices/linear-algebra.spec.d.ts +2 -0
- package/build/matrices/linear-algebra.spec.d.ts.map +1 -0
- package/build/matrices/linear-algebra.spec.js +355 -0
- package/build/matrices/linear-algebra.spec.js.map +1 -0
- package/build/matrices/normalization.d.ts +62 -0
- package/build/matrices/normalization.d.ts.map +1 -0
- package/build/matrices/normalization.js +167 -0
- package/build/matrices/normalization.js.map +1 -0
- package/build/matrices/normalization.spec.d.ts +2 -0
- package/build/matrices/normalization.spec.d.ts.map +1 -0
- package/build/matrices/normalization.spec.js +335 -0
- package/build/matrices/normalization.spec.js.map +1 -0
- package/build/matrices/transformations.d.ts +484 -0
- package/build/matrices/transformations.d.ts.map +1 -0
- package/build/matrices/transformations.js +592 -0
- package/build/matrices/transformations.js.map +1 -0
- package/build/matrices/transformations.spec.d.ts +2 -0
- package/build/matrices/transformations.spec.d.ts.map +1 -0
- package/build/matrices/transformations.spec.js +755 -0
- package/build/matrices/transformations.spec.js.map +1 -0
- package/build/matrices/types.d.ts +134 -0
- package/build/matrices/types.d.ts.map +1 -0
- package/build/matrices/types.js +6 -0
- package/build/matrices/types.js.map +1 -0
- package/build/quaternions/asserts.d.ts +77 -0
- package/build/quaternions/asserts.d.ts.map +1 -0
- package/build/quaternions/asserts.js +175 -0
- package/build/quaternions/asserts.js.map +1 -0
- package/build/quaternions/asserts.spec.d.ts +2 -0
- package/build/quaternions/asserts.spec.d.ts.map +1 -0
- package/build/quaternions/asserts.spec.js +320 -0
- package/build/quaternions/asserts.spec.js.map +1 -0
- package/build/quaternions/conversions.d.ts +73 -0
- package/build/quaternions/conversions.d.ts.map +1 -0
- package/build/quaternions/conversions.js +179 -0
- package/build/quaternions/conversions.js.map +1 -0
- package/build/quaternions/conversions.spec.d.ts +2 -0
- package/build/quaternions/conversions.spec.d.ts.map +1 -0
- package/build/quaternions/conversions.spec.js +344 -0
- package/build/quaternions/conversions.spec.js.map +1 -0
- package/build/quaternions/core.d.ts +203 -0
- package/build/quaternions/core.d.ts.map +1 -0
- package/build/quaternions/core.js +374 -0
- package/build/quaternions/core.js.map +1 -0
- package/build/quaternions/core.spec.d.ts +2 -0
- package/build/quaternions/core.spec.d.ts.map +1 -0
- package/build/quaternions/core.spec.js +294 -0
- package/build/quaternions/core.spec.js.map +1 -0
- package/build/quaternions/index.d.ts +7 -0
- package/build/quaternions/index.d.ts.map +1 -0
- package/build/quaternions/index.js +7 -0
- package/build/quaternions/index.js.map +1 -0
- package/build/quaternions/interpolation.d.ts +54 -0
- package/build/quaternions/interpolation.d.ts.map +1 -0
- package/build/quaternions/interpolation.js +201 -0
- package/build/quaternions/interpolation.js.map +1 -0
- package/build/quaternions/interpolation.spec.d.ts +2 -0
- package/build/quaternions/interpolation.spec.d.ts.map +1 -0
- package/build/quaternions/interpolation.spec.js +64 -0
- package/build/quaternions/interpolation.spec.js.map +1 -0
- package/build/quaternions/predefined.d.ts +36 -0
- package/build/quaternions/predefined.d.ts.map +1 -0
- package/build/quaternions/predefined.js +42 -0
- package/build/quaternions/predefined.js.map +1 -0
- package/build/quaternions/predefined.spec.d.ts +2 -0
- package/build/quaternions/predefined.spec.d.ts.map +1 -0
- package/build/quaternions/predefined.spec.js +35 -0
- package/build/quaternions/predefined.spec.js.map +1 -0
- package/build/quaternions/types.d.ts +55 -0
- package/build/quaternions/types.d.ts.map +1 -0
- package/build/quaternions/types.js +7 -0
- package/build/quaternions/types.js.map +1 -0
- package/build/random.d.ts +66 -0
- package/build/random.d.ts.map +1 -0
- package/build/random.js +115 -0
- package/build/random.js.map +1 -0
- package/build/random.spec.d.ts +2 -0
- package/build/random.spec.d.ts.map +1 -0
- package/build/random.spec.js +267 -0
- package/build/random.spec.js.map +1 -0
- package/build/vectors/asserts.d.ts +182 -0
- package/build/vectors/asserts.d.ts.map +1 -0
- package/build/vectors/asserts.js +285 -0
- package/build/vectors/asserts.js.map +1 -0
- package/build/vectors/asserts.spec.d.ts +2 -0
- package/build/vectors/asserts.spec.d.ts.map +1 -0
- package/build/vectors/asserts.spec.js +260 -0
- package/build/vectors/asserts.spec.js.map +1 -0
- package/build/vectors/core.d.ts +507 -0
- package/build/vectors/core.d.ts.map +1 -0
- package/build/vectors/core.js +825 -0
- package/build/vectors/core.js.map +1 -0
- package/build/vectors/core.spec.d.ts +2 -0
- package/build/vectors/core.spec.d.ts.map +1 -0
- package/build/vectors/core.spec.js +343 -0
- package/build/vectors/core.spec.js.map +1 -0
- package/build/vectors/index.d.ts +6 -0
- package/build/vectors/index.d.ts.map +1 -0
- package/build/vectors/index.js +6 -0
- package/build/vectors/index.js.map +1 -0
- package/build/vectors/interpolation.d.ts +404 -0
- package/build/vectors/interpolation.d.ts.map +1 -0
- package/build/vectors/interpolation.js +585 -0
- package/build/vectors/interpolation.js.map +1 -0
- package/build/vectors/interpolation.spec.d.ts +2 -0
- package/build/vectors/interpolation.spec.d.ts.map +1 -0
- package/build/vectors/interpolation.spec.js +378 -0
- package/build/vectors/interpolation.spec.js.map +1 -0
- package/build/vectors/predefined.d.ts +191 -0
- package/build/vectors/predefined.d.ts.map +1 -0
- package/build/vectors/predefined.js +191 -0
- package/build/vectors/predefined.js.map +1 -0
- package/build/vectors/predefined.spec.d.ts +2 -0
- package/build/vectors/predefined.spec.d.ts.map +1 -0
- package/build/vectors/predefined.spec.js +333 -0
- package/build/vectors/predefined.spec.js.map +1 -0
- package/build/vectors/types.d.ts +62 -0
- package/build/vectors/types.d.ts.map +1 -0
- package/build/vectors/types.js +6 -0
- package/build/vectors/types.js.map +1 -0
- package/package.json +75 -0
|
@@ -0,0 +1,816 @@
|
|
|
1
|
+
import { AssertNumber, AssertInstanceOf } from '@pawells/typescript-common';
|
|
2
|
+
import { MatrixMultiply } from './arithmetic.js';
|
|
3
|
+
import { AssertMatrix, AssertMatrixRow, AssertMatrixValue, AssertMatrix1, AssertMatrix2, MatrixError } from './asserts.js';
|
|
4
|
+
import { MatrixSize, MatrixCreate, MatrixClone, MatrixIdentity, MatrixTranspose } from './core.js';
|
|
5
|
+
import { MatrixGramSchmidt } from './linear-algebra.js';
|
|
6
|
+
const MATRIX_NUMERICAL_TOLERANCE = 1e-12;
|
|
7
|
+
const EIGEN_CONVERGENCE_TOLERANCE = 1e-10;
|
|
8
|
+
/**
|
|
9
|
+
* Performs Cholesky decomposition for symmetric positive definite matrices A = L × L^T.
|
|
10
|
+
*
|
|
11
|
+
* Cholesky decomposition is a specialized form of LU decomposition that takes advantage
|
|
12
|
+
* of the symmetry and positive definiteness of the input matrix. It's approximately twice
|
|
13
|
+
* as efficient as general LU decomposition for matrices that meet these conditions.
|
|
14
|
+
*
|
|
15
|
+
* **Mathematical Background:**
|
|
16
|
+
* For a symmetric positive definite matrix A, there exists a unique lower triangular matrix L
|
|
17
|
+
* with positive diagonal entries such that A = L × L^T. This decomposition is useful for:
|
|
18
|
+
* - Solving linear systems Ax = b efficiently
|
|
19
|
+
* - Computing matrix determinants (det(A) = (det(L))²)
|
|
20
|
+
* - Matrix inversion and computing quadratic forms
|
|
21
|
+
* - Numerical stability in optimization algorithms
|
|
22
|
+
*
|
|
23
|
+
* @param matrix - The symmetric positive definite square matrix to decompose
|
|
24
|
+
* @returns Lower triangular matrix L such that A = L × L^T
|
|
25
|
+
* @throws {Error} If matrix is not square, not symmetric, or not positive definite
|
|
26
|
+
*
|
|
27
|
+
* @example
|
|
28
|
+
* ```ts
|
|
29
|
+
* // Symmetric positive definite matrix
|
|
30
|
+
* const A = [[4, 2], [2, 3]];
|
|
31
|
+
* const L = MatrixCholesky(A);
|
|
32
|
+
* // L = [[2, 0], [1, √2]] ≈ [[2, 0], [1, 1.414]]
|
|
33
|
+
*
|
|
34
|
+
* // Verify: L × L^T should equal A
|
|
35
|
+
* const LT = MatrixTranspose(L);
|
|
36
|
+
* const reconstructed = MatrixMultiply(L, LT);
|
|
37
|
+
* // reconstructed ≈ [[4, 2], [2, 3]]
|
|
38
|
+
* ```
|
|
39
|
+
*
|
|
40
|
+
* @complexity Time: O(n³/3), Space: O(n²) - About 2x faster than general LU decomposition
|
|
41
|
+
* @see {@link MatrixLU} For general square matrices that may not be positive definite
|
|
42
|
+
*/
|
|
43
|
+
export function MatrixCholesky(matrix) {
|
|
44
|
+
AssertMatrix(matrix, { square: true });
|
|
45
|
+
const [n] = MatrixSize(matrix);
|
|
46
|
+
const L = MatrixCreate(n, n);
|
|
47
|
+
// Perform Cholesky decomposition using the Cholesky-Banachiewicz algorithm
|
|
48
|
+
for (let i = 0; i < n; i++) {
|
|
49
|
+
const lRowI = L[i];
|
|
50
|
+
const matrixRowI = matrix[i];
|
|
51
|
+
AssertMatrixRow(lRowI);
|
|
52
|
+
AssertMatrixRow(matrixRowI);
|
|
53
|
+
for (let j = 0; j <= i; j++) {
|
|
54
|
+
const lRowJ = L[j];
|
|
55
|
+
AssertMatrixRow(lRowJ);
|
|
56
|
+
if (i === j) {
|
|
57
|
+
// Compute diagonal elements: L[i,i] = √(A[i,i] - Σ(k=0 to i-1) L[i,k]²)
|
|
58
|
+
let sum = 0;
|
|
59
|
+
// Sum of squares of elements in row i before column j
|
|
60
|
+
for (let k = 0; k < j; k++) {
|
|
61
|
+
const lVal = lRowI[k];
|
|
62
|
+
AssertMatrixValue(lVal, { rowIndex: i, columnIndex: k });
|
|
63
|
+
sum += lVal * lVal;
|
|
64
|
+
}
|
|
65
|
+
const matrixVal = matrixRowI[j];
|
|
66
|
+
AssertMatrixValue(matrixVal, { rowIndex: i, columnIndex: j });
|
|
67
|
+
const diagonal = matrixVal - sum;
|
|
68
|
+
// Check positive definiteness - diagonal must be positive
|
|
69
|
+
if (diagonal <= 0) {
|
|
70
|
+
throw new Error(`Matrix is not positive definite at element [${i},${j}]`);
|
|
71
|
+
}
|
|
72
|
+
lRowI[j] = Math.sqrt(diagonal);
|
|
73
|
+
}
|
|
74
|
+
else {
|
|
75
|
+
// Compute off-diagonal elements: L[i,j] = (A[i,j] - Σ(k=0 to j-1) L[i,k]*L[j,k]) / L[j,j]
|
|
76
|
+
let sum = 0;
|
|
77
|
+
// Sum of products of corresponding elements in rows i and j
|
|
78
|
+
for (let k = 0; k < j; k++) {
|
|
79
|
+
const lIVal = lRowI[k];
|
|
80
|
+
const lJVal = lRowJ[k];
|
|
81
|
+
AssertMatrixValue(lIVal, { rowIndex: i, columnIndex: k });
|
|
82
|
+
AssertMatrixValue(lJVal, { rowIndex: j, columnIndex: k });
|
|
83
|
+
sum += lIVal * lJVal;
|
|
84
|
+
}
|
|
85
|
+
const matrixVal = matrixRowI[j];
|
|
86
|
+
const lJDiag = lRowJ[j];
|
|
87
|
+
AssertMatrixValue(matrixVal, { rowIndex: i, columnIndex: j });
|
|
88
|
+
AssertMatrixValue(lJDiag, { rowIndex: j, columnIndex: j });
|
|
89
|
+
// Check for numerical stability - diagonal element should not be too small
|
|
90
|
+
if (Math.abs(lJDiag) < MATRIX_NUMERICAL_TOLERANCE) {
|
|
91
|
+
throw new Error(`Zero diagonal element at [${j},${j}] - matrix not positive definite`);
|
|
92
|
+
}
|
|
93
|
+
lRowI[j] = (matrixVal - sum) / lJDiag;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
return L;
|
|
98
|
+
}
|
|
99
|
+
/**
|
|
100
|
+
* Performs eigenvalue decomposition for square matrices to find A × v = λ × v.
|
|
101
|
+
*
|
|
102
|
+
* Eigenvalue decomposition (also called spectral decomposition) finds the eigenvalues λ
|
|
103
|
+
* and corresponding eigenvectors v of a square matrix A. This fundamental decomposition
|
|
104
|
+
* reveals the principal directions and scaling factors of the linear transformation
|
|
105
|
+
* represented by the matrix.
|
|
106
|
+
*
|
|
107
|
+
* **Mathematical Background:**
|
|
108
|
+
* - Eigenvalues λ are scalars such that A × v = λ × v for some non-zero vector v
|
|
109
|
+
* - Eigenvectors v are the directions that remain unchanged (up to scaling) under A
|
|
110
|
+
* - The characteristic polynomial det(A - λI) = 0 gives the eigenvalues
|
|
111
|
+
* - Applications include stability analysis, principal component analysis, and vibration modes
|
|
112
|
+
*
|
|
113
|
+
* **Implementation Notes:**
|
|
114
|
+
* - Uses direct analytical computation for 1×1 and 2×2 matrices
|
|
115
|
+
* - Uses QR iteration for larger matrices (simplified implementation)
|
|
116
|
+
* - Currently supports only real eigenvalues (complex eigenvalues throw an error)
|
|
117
|
+
*
|
|
118
|
+
* @param matrix - The square matrix to decompose
|
|
119
|
+
* @returns Object containing eigenvalues and eigenvectors
|
|
120
|
+
* @throws {Error} If matrix is not square, contains invalid values, or has complex eigenvalues
|
|
121
|
+
*
|
|
122
|
+
* @example
|
|
123
|
+
* ```ts
|
|
124
|
+
* // Simple 2x2 matrix
|
|
125
|
+
* const A = [[3, 1], [0, 2]];
|
|
126
|
+
* const { eigenvalues, eigenvectors } = MatrixEigen(A);
|
|
127
|
+
* // eigenvalues: [3, 2]
|
|
128
|
+
* // eigenvectors: matrix where each column corresponds to an eigenvalue
|
|
129
|
+
*
|
|
130
|
+
* // Verify eigenvalue equation: A × v = λ × v
|
|
131
|
+
* const v = Matrix_GetColumn(eigenvectors, 0); // First eigenvector
|
|
132
|
+
* const Av = MatrixMultiplyVector(A, v);
|
|
133
|
+
* const lambdaV = Matrix_ScaleVector(v, eigenvalues[0]);
|
|
134
|
+
* // Av should approximately equal lambdaV
|
|
135
|
+
* ```
|
|
136
|
+
*
|
|
137
|
+
* @complexity O(n³) time for an n×n matrix
|
|
138
|
+
* @see {@link MatrixEigenQRIteration} For the iterative algorithm used for larger matrices
|
|
139
|
+
*/
|
|
140
|
+
export function MatrixEigen(matrix) {
|
|
141
|
+
AssertMatrix(matrix, { square: true });
|
|
142
|
+
const [n] = MatrixSize(matrix);
|
|
143
|
+
// For small matrices, use direct analytical computation for efficiency and accuracy
|
|
144
|
+
if (n === 1) {
|
|
145
|
+
AssertMatrix1(matrix);
|
|
146
|
+
// For 1×1 matrix, the single element is the eigenvalue, eigenvector is [1]
|
|
147
|
+
const [[value]] = matrix;
|
|
148
|
+
AssertNumber(value, {}, { message: 'Matrix[0,0] Not a Number' });
|
|
149
|
+
return {
|
|
150
|
+
eigenvalues: [value],
|
|
151
|
+
eigenvectors: [[1]],
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
if (n === 2) {
|
|
155
|
+
AssertMatrix2(matrix);
|
|
156
|
+
// Direct computation for 2×2 matrices using the characteristic polynomial
|
|
157
|
+
const [[a, b], [c, d]] = matrix;
|
|
158
|
+
// Characteristic polynomial: λ² - (a+d)λ + (ad-bc) = 0
|
|
159
|
+
// Using quadratic formula: λ = (trace ± √(trace² - 4×det)) / 2
|
|
160
|
+
const trace = a + d;
|
|
161
|
+
const det = (a * d) - (b * c);
|
|
162
|
+
const discriminant = (trace * trace) - (4 * det);
|
|
163
|
+
// Check for complex eigenvalues
|
|
164
|
+
if (discriminant < 0) {
|
|
165
|
+
throw new Error('Complex eigenvalues not supported in this implementation');
|
|
166
|
+
}
|
|
167
|
+
const sqrtDisc = Math.sqrt(discriminant);
|
|
168
|
+
const lambda1 = (trace + sqrtDisc) / 2;
|
|
169
|
+
const lambda2 = (trace - sqrtDisc) / 2;
|
|
170
|
+
// Compute eigenvectors by solving (A - λI)v = 0
|
|
171
|
+
const eigenvectors = MatrixCreate(2, 2);
|
|
172
|
+
// For each eigenvalue, find the corresponding eigenvector
|
|
173
|
+
if (Math.abs(b) > MATRIX_NUMERICAL_TOLERANCE && Math.abs(c) < MATRIX_NUMERICAL_TOLERANCE) {
|
|
174
|
+
// Upper-triangular or b ≠ 0, c ≈ 0
|
|
175
|
+
// First eigenvector: [1, 0] (for lambda1 = a)
|
|
176
|
+
// Second eigenvector: [1, (lambda2 - a)/b]
|
|
177
|
+
const [eigenvectorsRow0, eigenvectorsRow1] = eigenvectors;
|
|
178
|
+
AssertMatrixRow(eigenvectorsRow0);
|
|
179
|
+
AssertMatrixRow(eigenvectorsRow1);
|
|
180
|
+
eigenvectorsRow0[0] = 1;
|
|
181
|
+
eigenvectorsRow1[0] = 0;
|
|
182
|
+
eigenvectorsRow0[1] = 1;
|
|
183
|
+
eigenvectorsRow1[1] = (lambda2 - a) / b;
|
|
184
|
+
}
|
|
185
|
+
else if (Math.abs(c) > MATRIX_NUMERICAL_TOLERANCE) {
|
|
186
|
+
// Use the first row to find eigenvectors when c ≠ 0
|
|
187
|
+
const [eigenvectorsRow0, eigenvectorsRow1] = eigenvectors;
|
|
188
|
+
AssertMatrixRow(eigenvectorsRow0);
|
|
189
|
+
AssertMatrixRow(eigenvectorsRow1);
|
|
190
|
+
eigenvectorsRow0[0] = lambda1 - d;
|
|
191
|
+
eigenvectorsRow1[0] = c;
|
|
192
|
+
eigenvectorsRow0[1] = lambda2 - d;
|
|
193
|
+
eigenvectorsRow1[1] = c;
|
|
194
|
+
}
|
|
195
|
+
else {
|
|
196
|
+
// Diagonal matrix case - eigenvectors are standard basis vectors
|
|
197
|
+
const [eigenvectorsRow0, eigenvectorsRow1] = eigenvectors;
|
|
198
|
+
AssertMatrixRow(eigenvectorsRow0);
|
|
199
|
+
AssertMatrixRow(eigenvectorsRow1);
|
|
200
|
+
eigenvectorsRow0[0] = 1;
|
|
201
|
+
eigenvectorsRow1[0] = 0;
|
|
202
|
+
eigenvectorsRow0[1] = 0;
|
|
203
|
+
eigenvectorsRow1[1] = 1;
|
|
204
|
+
}
|
|
205
|
+
// Normalize eigenvectors to unit length for numerical stability
|
|
206
|
+
for (let j = 0; j < 2; j++) {
|
|
207
|
+
let norm = 0;
|
|
208
|
+
// Calculate the norm (length) of the eigenvector
|
|
209
|
+
for (let i = 0; i < 2; i++) {
|
|
210
|
+
const eigenvectorsRowI = eigenvectors[i];
|
|
211
|
+
AssertMatrixRow(eigenvectorsRowI);
|
|
212
|
+
const val = eigenvectorsRowI[j];
|
|
213
|
+
AssertNumber(val, {}, { message: `Eigenvector[${i},${j}] Not a Number` });
|
|
214
|
+
norm += val * val;
|
|
215
|
+
}
|
|
216
|
+
norm = Math.sqrt(norm);
|
|
217
|
+
// Normalize if the norm is significant
|
|
218
|
+
if (norm > MATRIX_NUMERICAL_TOLERANCE) {
|
|
219
|
+
for (let i = 0; i < 2; i++) {
|
|
220
|
+
const eigenvectorsRowI = eigenvectors[i];
|
|
221
|
+
AssertMatrixRow(eigenvectorsRowI);
|
|
222
|
+
const val = eigenvectorsRowI[j];
|
|
223
|
+
AssertMatrixValue(val, { rowIndex: i, columnIndex: j });
|
|
224
|
+
eigenvectorsRowI[j] = val / norm;
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
return {
|
|
229
|
+
eigenvalues: [lambda1, lambda2],
|
|
230
|
+
eigenvectors,
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
// For larger matrices, use the QR iteration algorithm
|
|
234
|
+
return MatrixEigenQRIteration(matrix);
|
|
235
|
+
}
|
|
236
|
+
/**
|
|
237
|
+
* Simplified QR iteration algorithm for eigenvalue computation of larger matrices.
|
|
238
|
+
*
|
|
239
|
+
* The QR algorithm is an iterative method for computing eigenvalues. It repeatedly
|
|
240
|
+
* performs QR decomposition and matrix multiplication until convergence. This is
|
|
241
|
+
* a simplified educational implementation without optimizations like Hessenberg
|
|
242
|
+
* reduction or deflation techniques used in production libraries.
|
|
243
|
+
*
|
|
244
|
+
* **Algorithm Overview:**
|
|
245
|
+
* 1. Start with A₀ = A
|
|
246
|
+
* 2. For each iteration k: decompose Aₖ = QₖRₖ
|
|
247
|
+
* 3. Set Aₖ₊₁ = RₖQₖ
|
|
248
|
+
* 4. Repeat until convergence (off-diagonal elements become small)
|
|
249
|
+
* 5. Eigenvalues are the diagonal elements of the final matrix
|
|
250
|
+
*
|
|
251
|
+
* @private
|
|
252
|
+
* @param matrix - Square matrix to compute eigenvalues for
|
|
253
|
+
* @param iterations - Maximum number of QR iterations (default: 50)
|
|
254
|
+
* @returns Object containing eigenvalues and approximated eigenvectors
|
|
255
|
+
*
|
|
256
|
+
* @complexity O(n³) per iteration, typically converges in O(n) iterations
|
|
257
|
+
* @see {@link MatrixQR} For the QR decomposition used in each iteration
|
|
258
|
+
*/
|
|
259
|
+
export function MatrixEigenQRIteration(matrix, iterations = 50) {
|
|
260
|
+
const [n] = MatrixSize(matrix);
|
|
261
|
+
// Copy matrix for iteration to avoid modifying the original
|
|
262
|
+
let A = MatrixClone(matrix);
|
|
263
|
+
let qTotal = MatrixIdentity(n); // Accumulate transformations for eigenvectors
|
|
264
|
+
for (let iter = 0; iter < iterations; iter++) {
|
|
265
|
+
// Try/catch to allow fallback for rank-deficient matrices
|
|
266
|
+
let Q;
|
|
267
|
+
let R;
|
|
268
|
+
try {
|
|
269
|
+
({ Q, R } = MatrixQR(A));
|
|
270
|
+
}
|
|
271
|
+
catch (err) {
|
|
272
|
+
// If QR fails due to linear dependence, fill Q with orthonormal basis and R with zeros
|
|
273
|
+
AssertInstanceOf(err, Error, { message: 'Unexpected error in QR iteration' });
|
|
274
|
+
if (err instanceof Error && typeof err.message === 'string' && err.message.includes('linearly dependent')) {
|
|
275
|
+
Q = MatrixGramSchmidt(A);
|
|
276
|
+
R = MatrixCreate(n, n); // All zeros
|
|
277
|
+
}
|
|
278
|
+
else {
|
|
279
|
+
throw err;
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
A = MatrixMultiply(R, Q);
|
|
283
|
+
qTotal = MatrixMultiply(qTotal, Q);
|
|
284
|
+
// Check for convergence - examine off-diagonal elements
|
|
285
|
+
let converged = true;
|
|
286
|
+
for (let i = 0; i < n - 1; i++) {
|
|
287
|
+
const aRowI = A[i];
|
|
288
|
+
AssertMatrixRow(aRowI);
|
|
289
|
+
for (let j = i + 1; j < n; j++) {
|
|
290
|
+
const aVal = aRowI[j];
|
|
291
|
+
AssertMatrixValue(aVal, { rowIndex: i, columnIndex: j });
|
|
292
|
+
if (Math.abs(aVal) > EIGEN_CONVERGENCE_TOLERANCE) {
|
|
293
|
+
converged = false;
|
|
294
|
+
break;
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
if (!converged)
|
|
298
|
+
break;
|
|
299
|
+
}
|
|
300
|
+
if (converged)
|
|
301
|
+
break;
|
|
302
|
+
}
|
|
303
|
+
// Extract eigenvalues from the diagonal of the final matrix
|
|
304
|
+
const eigenvalues = [];
|
|
305
|
+
for (let i = 0; i < n; i++) {
|
|
306
|
+
const aRowI = A[i];
|
|
307
|
+
AssertMatrixRow(aRowI);
|
|
308
|
+
const eigenvalue = aRowI[i];
|
|
309
|
+
AssertMatrixValue(eigenvalue, { rowIndex: i, columnIndex: i });
|
|
310
|
+
eigenvalues.push(eigenvalue);
|
|
311
|
+
}
|
|
312
|
+
return {
|
|
313
|
+
eigenvalues,
|
|
314
|
+
eigenvectors: qTotal, // Accumulated orthogonal transformations give eigenvectors
|
|
315
|
+
};
|
|
316
|
+
}
|
|
317
|
+
/**
|
|
318
|
+
* Performs LU decomposition of a square matrix A = L × U using Doolittle's method.
|
|
319
|
+
*
|
|
320
|
+
* LU decomposition factors a square matrix into the product of a lower triangular matrix L
|
|
321
|
+
* and an upper triangular matrix U. This decomposition is fundamental for solving linear
|
|
322
|
+
* systems, computing determinants, and matrix inversion.
|
|
323
|
+
*
|
|
324
|
+
* **Mathematical Background:**
|
|
325
|
+
* - L is lower triangular with 1's on the diagonal (unit lower triangular)
|
|
326
|
+
* - U is upper triangular containing the pivot elements
|
|
327
|
+
* - The decomposition exists if all leading principal minors are non-zero
|
|
328
|
+
* - Gaussian elimination with partial pivoting would be more stable, but this
|
|
329
|
+
* implementation uses Doolittle's method without pivoting for simplicity
|
|
330
|
+
*
|
|
331
|
+
* **Applications:**
|
|
332
|
+
* - Solving linear systems: Ax = b becomes LUx = b, solve Ly = b then Ux = y
|
|
333
|
+
* - Computing determinant: det(A) = det(L) × det(U) = det(U) = ∏ᵢ U[i,i]
|
|
334
|
+
* - Matrix inversion: A⁻¹ = U⁻¹L⁻¹
|
|
335
|
+
*
|
|
336
|
+
* @param matrix - Square matrix to decompose (must be non-singular and not require pivoting)
|
|
337
|
+
* @returns Object containing L (lower triangular) and U (upper triangular) matrices
|
|
338
|
+
* @throws {Error} If matrix is not square, singular, or contains invalid values
|
|
339
|
+
*
|
|
340
|
+
* @note This implementation does not use partial pivoting (row swapping). It will fail for matrices with zero-valued or near-zero leading minors even if the matrix is otherwise invertible (e.g., [[0,1],[1,0]]). For general matrices, use MatrixInverse instead.
|
|
341
|
+
*
|
|
342
|
+
* @example
|
|
343
|
+
* ```ts
|
|
344
|
+
* const A = [[2, 1], [1, 1]];
|
|
345
|
+
* const { L, U } = MatrixLU(A);
|
|
346
|
+
* // L = [[1, 0], [0.5, 1]], U = [[2, 1], [0, 0.5]]
|
|
347
|
+
*
|
|
348
|
+
* // Verify: L × U should equal A
|
|
349
|
+
* const product = MatrixMultiply(L, U);
|
|
350
|
+
* // product ≈ [[2, 1], [1, 1]]
|
|
351
|
+
*
|
|
352
|
+
* // Solve Ax = b using LU decomposition
|
|
353
|
+
* const b = [3, 2];
|
|
354
|
+
* // First solve Ly = b, then Ux = y
|
|
355
|
+
* ```
|
|
356
|
+
*
|
|
357
|
+
* @complexity Time: O(n³/3), Space: O(n²)
|
|
358
|
+
* @see {@link MatrixCholesky} For symmetric positive definite matrices (more efficient)
|
|
359
|
+
*/
|
|
360
|
+
export function MatrixLU(matrix) {
|
|
361
|
+
AssertMatrix(matrix, { square: true });
|
|
362
|
+
const [n] = MatrixSize(matrix);
|
|
363
|
+
const L = MatrixCreate(n, n);
|
|
364
|
+
const U = MatrixCreate(n, n);
|
|
365
|
+
// Initialize L's diagonal with 1's (unit lower triangular)
|
|
366
|
+
for (let i = 0; i < n; i++) {
|
|
367
|
+
const lRow = L[i];
|
|
368
|
+
AssertMatrixRow(lRow);
|
|
369
|
+
lRow[i] = 1;
|
|
370
|
+
}
|
|
371
|
+
// Perform Doolittle's LU decomposition
|
|
372
|
+
for (let i = 0; i < n; i++) {
|
|
373
|
+
// Compute upper triangular matrix U (row by row)
|
|
374
|
+
for (let j = i; j < n; j++) {
|
|
375
|
+
let sum = 0;
|
|
376
|
+
// Subtract the sum of L[i,k] * U[k,j] for k < i
|
|
377
|
+
for (let k = 0; k < i; k++) {
|
|
378
|
+
const lRow = L[i];
|
|
379
|
+
const uRowK = U[k];
|
|
380
|
+
AssertMatrixRow(lRow);
|
|
381
|
+
AssertMatrixRow(uRowK);
|
|
382
|
+
const lVal = lRow[k];
|
|
383
|
+
AssertMatrixValue(lVal, { rowIndex: i, columnIndex: k });
|
|
384
|
+
const uVal = uRowK[j];
|
|
385
|
+
AssertMatrixValue(uVal, { rowIndex: k, columnIndex: j });
|
|
386
|
+
sum += lVal * uVal;
|
|
387
|
+
}
|
|
388
|
+
const mRow = matrix[i];
|
|
389
|
+
const uRow = U[i];
|
|
390
|
+
AssertMatrixRow(mRow);
|
|
391
|
+
AssertMatrixRow(uRow);
|
|
392
|
+
const mVal = mRow[j];
|
|
393
|
+
AssertMatrixValue(mVal, { rowIndex: i, columnIndex: j });
|
|
394
|
+
uRow[j] = mVal - sum; // U[i,j] = A[i,j] - sum
|
|
395
|
+
}
|
|
396
|
+
// Check for zero pivot after computing the diagonal element
|
|
397
|
+
const uRow = U[i];
|
|
398
|
+
AssertMatrixRow(uRow);
|
|
399
|
+
const uVal = uRow[i]; // Pivot element U[i,i]
|
|
400
|
+
AssertMatrixValue(uVal, { rowIndex: i, columnIndex: i });
|
|
401
|
+
// Fix: Should throw if pivot is too close to zero (singular matrix)
|
|
402
|
+
if (Math.abs(uVal) < MATRIX_NUMERICAL_TOLERANCE) {
|
|
403
|
+
throw new MatrixError('Matrix is singular (zero pivot element)');
|
|
404
|
+
}
|
|
405
|
+
// Compute lower triangular matrix L (column by column)
|
|
406
|
+
for (let j = i + 1; j < n; j++) {
|
|
407
|
+
let sum = 0;
|
|
408
|
+
// Subtract the sum of L[j,k] * U[k,i] for k < i
|
|
409
|
+
for (let k = 0; k < i; k++) {
|
|
410
|
+
const lRowJ = L[j];
|
|
411
|
+
const uRowK = U[k];
|
|
412
|
+
AssertMatrixRow(lRowJ);
|
|
413
|
+
AssertMatrixRow(uRowK);
|
|
414
|
+
const lVal = lRowJ[k];
|
|
415
|
+
AssertMatrixValue(lVal, { rowIndex: j, columnIndex: k });
|
|
416
|
+
const uVal = uRowK[i];
|
|
417
|
+
AssertMatrixValue(uVal, { rowIndex: k, columnIndex: i });
|
|
418
|
+
sum += lVal * uVal;
|
|
419
|
+
}
|
|
420
|
+
const uRow = U[i];
|
|
421
|
+
AssertMatrixRow(uRow);
|
|
422
|
+
const uVal = uRow[i]; // Pivot element U[i,i]
|
|
423
|
+
AssertMatrixValue(uVal, { rowIndex: i, columnIndex: i });
|
|
424
|
+
// Check for zero pivot (singular matrix)
|
|
425
|
+
if (Math.abs(uVal) < MATRIX_NUMERICAL_TOLERANCE) {
|
|
426
|
+
throw new MatrixError('Matrix is singular (zero pivot element)');
|
|
427
|
+
}
|
|
428
|
+
const mRowJ = matrix[j];
|
|
429
|
+
const lRowJ = L[j];
|
|
430
|
+
AssertMatrixRow(mRowJ);
|
|
431
|
+
AssertMatrixRow(lRowJ);
|
|
432
|
+
const mVal = mRowJ[i];
|
|
433
|
+
AssertMatrixValue(mVal, { rowIndex: j, columnIndex: i });
|
|
434
|
+
lRowJ[i] = (mVal - sum) / uVal; // L[j,i] = (A[j,i] - sum) / U[i,i]
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
return { L, U };
|
|
438
|
+
}
|
|
439
|
+
/**
|
|
440
|
+
* Performs QR decomposition A = Q × R using Modified Gram-Schmidt orthogonalization.
|
|
441
|
+
*
|
|
442
|
+
* QR decomposition factors a matrix into the product of an orthogonal matrix Q and
|
|
443
|
+
* an upper triangular matrix R. This decomposition is fundamental for solving
|
|
444
|
+
* overdetermined linear systems, least squares problems, and eigenvalue computations.
|
|
445
|
+
*
|
|
446
|
+
* **Mathematical Background:**
|
|
447
|
+
* - Q is orthogonal: Q^T × Q = I (columns are orthonormal vectors)
|
|
448
|
+
* - R is upper triangular with non-negative diagonal elements
|
|
449
|
+
* - The decomposition always exists for matrices with linearly independent columns
|
|
450
|
+
* - Uses Modified Gram-Schmidt for better numerical stability than Classical Gram-Schmidt
|
|
451
|
+
*
|
|
452
|
+
* **Applications:**
|
|
453
|
+
* - Solving overdetermined systems Ax = b: x = R⁻¹Q^T b
|
|
454
|
+
* - Least squares solutions: minimize ||Ax - b||²
|
|
455
|
+
* - QR algorithm for eigenvalue computation
|
|
456
|
+
* - Orthogonal basis construction from linearly independent vectors
|
|
457
|
+
*
|
|
458
|
+
* @param matrix - Matrix to decompose (m×n where m ≥ n, must have full column rank)
|
|
459
|
+
* @returns Object containing orthogonal Q and upper triangular R matrices
|
|
460
|
+
* @throws {Error} If matrix has more columns than rows or columns are linearly dependent
|
|
461
|
+
*
|
|
462
|
+
* @example
|
|
463
|
+
* ```ts
|
|
464
|
+
* const A = [[1, 1], [1, 0], [0, 1]]; // 3×2 matrix
|
|
465
|
+
* const { Q, R } = MatrixQR(A);
|
|
466
|
+
* // Q: 3×2 orthogonal matrix with Q^T × Q = I₂
|
|
467
|
+
* // R: 2×2 upper triangular matrix
|
|
468
|
+
*
|
|
469
|
+
* // Verify: Q × R should equal A
|
|
470
|
+
* const reconstructed = MatrixMultiply(Q, R);
|
|
471
|
+
* // reconstructed ≈ A
|
|
472
|
+
*
|
|
473
|
+
* // Check orthogonality: Q^T × Q should be identity
|
|
474
|
+
* const QT = MatrixTranspose(Q);
|
|
475
|
+
* const identity = MatrixMultiply(QT, Q);
|
|
476
|
+
* ```
|
|
477
|
+
*
|
|
478
|
+
* @complexity Time: O(mn²), Space: O(mn) where m ≥ n
|
|
479
|
+
* @see {@link MatrixGramSchmidt} {@link MatrixLU} {@link MatrixEigenQRIteration}
|
|
480
|
+
*/
|
|
481
|
+
export function MatrixQR(matrix, allowDependentColumns = false) {
|
|
482
|
+
AssertMatrix(matrix);
|
|
483
|
+
const [m, n] = MatrixSize(matrix);
|
|
484
|
+
// Verify that the matrix has at least as many rows as columns
|
|
485
|
+
if (m < n) {
|
|
486
|
+
throw new Error('QR decomposition requires matrix to have at least as many rows as columns');
|
|
487
|
+
}
|
|
488
|
+
const Q = MatrixClone(matrix);
|
|
489
|
+
const R = MatrixCreate(n, n);
|
|
490
|
+
// Modified Gram-Schmidt orthogonalization process
|
|
491
|
+
for (let k = 0; k < n; k++) {
|
|
492
|
+
let norm = 0;
|
|
493
|
+
for (let i = 0; i < m; i++) {
|
|
494
|
+
const qRow = Q[i];
|
|
495
|
+
if (!qRow)
|
|
496
|
+
continue;
|
|
497
|
+
AssertMatrixRow(qRow);
|
|
498
|
+
const qVal = qRow[k];
|
|
499
|
+
AssertMatrixValue(qVal, { rowIndex: i, columnIndex: k });
|
|
500
|
+
norm += qVal * qVal;
|
|
501
|
+
}
|
|
502
|
+
norm = Math.sqrt(norm);
|
|
503
|
+
if (norm < MATRIX_NUMERICAL_TOLERANCE) {
|
|
504
|
+
if (!allowDependentColumns) {
|
|
505
|
+
throw new Error(`Column ${k} is linearly dependent on previous columns`);
|
|
506
|
+
}
|
|
507
|
+
// Fill Q[:,k] with an orthonormal vector not in the span of previous columns
|
|
508
|
+
const candidate = Array(m).fill(0);
|
|
509
|
+
candidate[k % m] = 1;
|
|
510
|
+
for (let j = 0; j < k; j++) {
|
|
511
|
+
let dot = 0;
|
|
512
|
+
for (let i = 0; i < m; i++) {
|
|
513
|
+
const qRow = Q[i];
|
|
514
|
+
AssertMatrixRow(qRow);
|
|
515
|
+
const qVal = qRow[j];
|
|
516
|
+
AssertMatrixValue(qVal, { rowIndex: i, columnIndex: j });
|
|
517
|
+
dot += qVal * candidate[i];
|
|
518
|
+
}
|
|
519
|
+
for (let i = 0; i < m; i++) {
|
|
520
|
+
const qRow = Q[i];
|
|
521
|
+
AssertMatrixRow(qRow);
|
|
522
|
+
const qVal = qRow[j];
|
|
523
|
+
AssertMatrixValue(qVal, { rowIndex: i, columnIndex: j });
|
|
524
|
+
const candidateVal = candidate[i];
|
|
525
|
+
AssertNumber(candidateVal, {}, { message: `candidate[${i}] Not a Number` });
|
|
526
|
+
candidate[i] = candidateVal - (dot * qVal);
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
const candNorm = Math.sqrt(candidate.reduce((sum, v) => sum + (v * v), 0));
|
|
530
|
+
if (candNorm > MATRIX_NUMERICAL_TOLERANCE) {
|
|
531
|
+
for (let i = 0; i < m; i++) {
|
|
532
|
+
const qRow = Q[i];
|
|
533
|
+
if (!Array.isArray(qRow))
|
|
534
|
+
throw new Error(`Internal error: Q[${i}] is not an array`);
|
|
535
|
+
qRow[k] = candidate[i] / candNorm;
|
|
536
|
+
}
|
|
537
|
+
}
|
|
538
|
+
else {
|
|
539
|
+
for (let i = 0; i < m; i++) {
|
|
540
|
+
const qRow = Q[i];
|
|
541
|
+
if (!Array.isArray(qRow))
|
|
542
|
+
throw new Error(`Internal error: Q[${i}] is not an array`);
|
|
543
|
+
qRow[k] = 0;
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
const rRow = R[k];
|
|
547
|
+
AssertMatrixRow(rRow);
|
|
548
|
+
for (let j = 0; j < n; j++) {
|
|
549
|
+
rRow[j] = 0;
|
|
550
|
+
}
|
|
551
|
+
continue;
|
|
552
|
+
}
|
|
553
|
+
const rRow = R[k];
|
|
554
|
+
AssertMatrixRow(rRow);
|
|
555
|
+
rRow[k] = norm;
|
|
556
|
+
for (let i = 0; i < m; i++) {
|
|
557
|
+
const qRow = Q[i];
|
|
558
|
+
if (!qRow)
|
|
559
|
+
continue;
|
|
560
|
+
AssertMatrixRow(qRow);
|
|
561
|
+
const qVal = qRow[k];
|
|
562
|
+
AssertMatrixValue(qVal, { rowIndex: i, columnIndex: k });
|
|
563
|
+
qRow[k] = qVal / norm;
|
|
564
|
+
}
|
|
565
|
+
for (let j = k + 1; j < n; j++) {
|
|
566
|
+
let dot = 0;
|
|
567
|
+
for (let i = 0; i < m; i++) {
|
|
568
|
+
const qRow = Q[i];
|
|
569
|
+
if (!qRow)
|
|
570
|
+
continue;
|
|
571
|
+
AssertMatrixRow(qRow);
|
|
572
|
+
const qValK = qRow[k];
|
|
573
|
+
const qValJ = qRow[j];
|
|
574
|
+
AssertMatrixValue(qValK, { rowIndex: i, columnIndex: k });
|
|
575
|
+
AssertMatrixValue(qValJ, { rowIndex: i, columnIndex: j });
|
|
576
|
+
dot += qValK * qValJ;
|
|
577
|
+
}
|
|
578
|
+
rRow[j] = dot;
|
|
579
|
+
for (let i = 0; i < m; i++) {
|
|
580
|
+
const qRow = Q[i];
|
|
581
|
+
if (!qRow)
|
|
582
|
+
continue;
|
|
583
|
+
AssertMatrixRow(qRow);
|
|
584
|
+
const qValK = qRow[k];
|
|
585
|
+
const qValJ = qRow[j];
|
|
586
|
+
AssertMatrixValue(qValK, { rowIndex: i, columnIndex: k });
|
|
587
|
+
AssertMatrixValue(qValJ, { rowIndex: i, columnIndex: j });
|
|
588
|
+
qRow[j] = qValJ - (dot * qValK);
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
}
|
|
592
|
+
return { Q, R };
|
|
593
|
+
}
|
|
594
|
+
/**
|
|
595
|
+
* Performs Singular Value Decomposition (SVD) of a matrix A = U × Σ × V^T.
|
|
596
|
+
*
|
|
597
|
+
* SVD is a generalization of eigendecomposition that works for any matrix (not just square).
|
|
598
|
+
* It decomposes a matrix into three components that reveal fundamental properties about
|
|
599
|
+
* the linear transformation, including its rank, range, and null space.
|
|
600
|
+
*
|
|
601
|
+
* **Mathematical Background:**
|
|
602
|
+
* - U contains the left singular vectors (orthonormal columns)
|
|
603
|
+
* - Σ is diagonal with singular values σᵢ ≥ 0 in descending order
|
|
604
|
+
* - V^T contains the right singular vectors (orthonormal rows)
|
|
605
|
+
* - Singular values are the square roots of eigenvalues of A^T A
|
|
606
|
+
* - The rank of A equals the number of non-zero singular values
|
|
607
|
+
*
|
|
608
|
+
* **Applications:**
|
|
609
|
+
* - Principal Component Analysis (PCA): V gives principal directions
|
|
610
|
+
* - Matrix approximation: truncated SVD for dimensionality reduction
|
|
611
|
+
* - Pseudo-inverse computation: A⁺ = V Σ⁺ U^T
|
|
612
|
+
* - Least squares solutions for overdetermined systems
|
|
613
|
+
* - Image compression and noise reduction
|
|
614
|
+
* - Numerical rank determination
|
|
615
|
+
*
|
|
616
|
+
* **Implementation Notes:**
|
|
617
|
+
* - Uses eigendecomposition of A^T A to find V and singular values
|
|
618
|
+
* - Computes U = A V Σ⁻¹ for the left singular vectors
|
|
619
|
+
* - Applies Gram-Schmidt to ensure orthogonality of U
|
|
620
|
+
* - Handles edge cases for 1×1, single row, and single column matrices
|
|
621
|
+
*
|
|
622
|
+
* @param matrix - Matrix to decompose (any m×n matrix)
|
|
623
|
+
* @returns Object containing U, S (singular values), and VT matrices
|
|
624
|
+
* @throws {Error} If matrix contains invalid values (NaN, Infinity)
|
|
625
|
+
*
|
|
626
|
+
* @example
|
|
627
|
+
* ```ts
|
|
628
|
+
* const A = [[1, 2], [3, 4], [5, 6]]; // 3×2 matrix
|
|
629
|
+
* const { U, S, VT } = MatrixSVD(A);
|
|
630
|
+
* // U: 3×2 matrix with orthonormal columns
|
|
631
|
+
* // S: [σ₁, σ₂] singular values in descending order
|
|
632
|
+
* // VT: 2×2 orthogonal matrix (V transposed)
|
|
633
|
+
*
|
|
634
|
+
* // Verify reconstruction: U × diag(S) × VT ≈ A
|
|
635
|
+
* const Sigma = Matrix_Diagonal(S);
|
|
636
|
+
* const reconstructed = MatrixMultiply(MatrixMultiply(U, Sigma), VT);
|
|
637
|
+
*
|
|
638
|
+
* // Matrix rank from singular values (count non-zero values)
|
|
639
|
+
* const rank = S.filter(s => s > 1e-10).length;
|
|
640
|
+
*
|
|
641
|
+
* // Condition number for stability analysis
|
|
642
|
+
* const conditionNumber = S[0] / S[S.length - 1];
|
|
643
|
+
* ```
|
|
644
|
+
*
|
|
645
|
+
* @complexity Time: O(min(m²n, mn²)), Space: O(m² + n²)
|
|
646
|
+
* @see {@link MatrixQR} {@link MatrixEigenQRIteration} {@link Matrix_PseudoInverse}
|
|
647
|
+
*/
|
|
648
|
+
export function MatrixSVD(matrix) {
|
|
649
|
+
AssertMatrix(matrix);
|
|
650
|
+
const [m, n] = MatrixSize(matrix);
|
|
651
|
+
// Handle trivial case: 1x1 matrix
|
|
652
|
+
if (m === 1 && n === 1) {
|
|
653
|
+
const value = matrix[0]?.[0] ?? 0;
|
|
654
|
+
return {
|
|
655
|
+
U: [[1]],
|
|
656
|
+
S: [Math.abs(value)],
|
|
657
|
+
VT: [[value >= 0 ? 1 : -1]],
|
|
658
|
+
};
|
|
659
|
+
}
|
|
660
|
+
// Handle single row or single column
|
|
661
|
+
if (m === 1 || n === 1) {
|
|
662
|
+
const vec = m === 1 ? matrix[0] ?? [0] : matrix.map((row) => row?.[0] ?? 0);
|
|
663
|
+
const norm = Math.sqrt(vec.reduce((sum, v) => sum + ((v ?? 0) * (v ?? 0)), 0));
|
|
664
|
+
const U = m === 1 ? [[1]] : matrix.map((row) => [(row?.[0] ?? 0) / (norm > 0 ? norm : 1)]);
|
|
665
|
+
const VT = n === 1 ? [[1]] : [(matrix[0] ?? []).map((v) => (v ?? 0) / (norm > 0 ? norm : 1))];
|
|
666
|
+
return {
|
|
667
|
+
U,
|
|
668
|
+
S: [norm],
|
|
669
|
+
VT,
|
|
670
|
+
};
|
|
671
|
+
}
|
|
672
|
+
// General case: m x n matrix
|
|
673
|
+
// Step 1: Compute A^T * A (n x n)
|
|
674
|
+
const AT = MatrixTranspose(matrix);
|
|
675
|
+
const ATA = MatrixMultiply(AT, matrix);
|
|
676
|
+
// Step 2: Eigendecomposition of A^T A
|
|
677
|
+
const { eigenvalues, eigenvectors } = MatrixEigen(ATA);
|
|
678
|
+
const S = Array.isArray(eigenvalues) ? eigenvalues.map((ev) => Math.sqrt(Math.max(ev, 0))) : [];
|
|
679
|
+
const indices = [];
|
|
680
|
+
if (Array.isArray(S)) {
|
|
681
|
+
for (let i = 0; i < S.length; i++) {
|
|
682
|
+
indices.push(i);
|
|
683
|
+
}
|
|
684
|
+
indices.sort((a, b) => (S[b] ?? 0) - (S[a] ?? 0));
|
|
685
|
+
}
|
|
686
|
+
// Defensive: filter out undefined indices and values
|
|
687
|
+
const validIndices = [];
|
|
688
|
+
for (const idx of indices) {
|
|
689
|
+
if (typeof idx === 'number' && isFinite(idx) && idx >= 0 && idx < eigenvalues.length) {
|
|
690
|
+
validIndices.push(idx);
|
|
691
|
+
}
|
|
692
|
+
}
|
|
693
|
+
const sSorted = [];
|
|
694
|
+
const V = [];
|
|
695
|
+
for (const i of validIndices) {
|
|
696
|
+
sSorted.push(typeof S[i] === 'number' ? S[i] : 0);
|
|
697
|
+
const vCol = [];
|
|
698
|
+
for (const r of eigenvectors) {
|
|
699
|
+
vCol.push(Array.isArray(r) && typeof r[i] === 'number' ? r[i] : 0);
|
|
700
|
+
}
|
|
701
|
+
V.push(vCol);
|
|
702
|
+
}
|
|
703
|
+
// vMat: n x n, columns are right singular vectors
|
|
704
|
+
const vMat = [];
|
|
705
|
+
const vColLength = V.length > 0 && Array.isArray(V[0]) ? V[0].length : 0;
|
|
706
|
+
for (let colIdx = 0; colIdx < vColLength; colIdx++) {
|
|
707
|
+
const col = [];
|
|
708
|
+
for (const row of V) {
|
|
709
|
+
const value = Array.isArray(row) && typeof row[colIdx] === 'number' ? row[colIdx] : 0;
|
|
710
|
+
col.push(typeof value === 'number' ? value : 0);
|
|
711
|
+
}
|
|
712
|
+
vMat.push(col);
|
|
713
|
+
}
|
|
714
|
+
// Step 3: Compute U = AVΣ⁻¹ (m x n)
|
|
715
|
+
const U = MatrixCreate(m, n);
|
|
716
|
+
for (let j = 0; j < n; j++) {
|
|
717
|
+
const sigma = sSorted[j];
|
|
718
|
+
if (sigma && sigma > MATRIX_NUMERICAL_TOLERANCE) {
|
|
719
|
+
// vj as a column vector (n x 1)
|
|
720
|
+
const vjCol = [];
|
|
721
|
+
for (const row of vMat) {
|
|
722
|
+
const value = Array.isArray(row) && typeof row[j] === 'number' ? row[j] : 0;
|
|
723
|
+
vjCol.push([typeof value === 'number' ? value : 0]);
|
|
724
|
+
}
|
|
725
|
+
const av = MatrixMultiply(matrix, vjCol); // m x 1
|
|
726
|
+
for (let i = 0; i < m; i++) {
|
|
727
|
+
const uRow = U[i];
|
|
728
|
+
const avRow = av[i];
|
|
729
|
+
if (Array.isArray(uRow) && Array.isArray(avRow))
|
|
730
|
+
uRow[j] = (typeof avRow[0] === 'number' ? avRow[0] : 0) / sigma;
|
|
731
|
+
}
|
|
732
|
+
}
|
|
733
|
+
else {
|
|
734
|
+
for (let i = 0; i < m; i++) {
|
|
735
|
+
const uRow = U[i];
|
|
736
|
+
if (Array.isArray(uRow))
|
|
737
|
+
uRow[j] = 0;
|
|
738
|
+
}
|
|
739
|
+
}
|
|
740
|
+
}
|
|
741
|
+
// Step 4: Orthonormalize U columns (Gram-Schmidt)
|
|
742
|
+
const uOrtho = MatrixGramSchmidt(U);
|
|
743
|
+
// Step 5: VT is vMat^T (n x n)
|
|
744
|
+
const VT = MatrixTranspose(vMat);
|
|
745
|
+
return {
|
|
746
|
+
U: uOrtho,
|
|
747
|
+
S: sSorted,
|
|
748
|
+
VT,
|
|
749
|
+
};
|
|
750
|
+
}
|
|
751
|
+
/**
|
|
752
|
+
* Solves the linear system Ax = b for the unknown vector x.
|
|
753
|
+
* Uses LU decomposition internally followed by forward and back substitution.
|
|
754
|
+
*
|
|
755
|
+
* Given an n×n coefficient matrix A and an n-element right-hand side vector b,
|
|
756
|
+
* finds x such that A × x = b.
|
|
757
|
+
*
|
|
758
|
+
* @param a - Square n×n coefficient matrix (must be non-singular)
|
|
759
|
+
* @param b - Right-hand side vector of length n
|
|
760
|
+
* @returns Solution vector x of length n satisfying Ax = b
|
|
761
|
+
* @throws {MatrixError} If A is not square, singular, or b has the wrong length
|
|
762
|
+
*
|
|
763
|
+
* @example
|
|
764
|
+
* // 2x + y = 8
|
|
765
|
+
* // 5x + 3y = 20
|
|
766
|
+
* MatrixSolve([[2, 1], [5, 3]], [8, 20]); // [4, 0]
|
|
767
|
+
*
|
|
768
|
+
* @example
|
|
769
|
+
* // Solve a 3×3 system
|
|
770
|
+
* const A = [[1, 2, -1], [2, 1, 1], [3, -1, 2]];
|
|
771
|
+
* const b = [4, 7, 2];
|
|
772
|
+
* MatrixSolve(A, b); // solution vector x
|
|
773
|
+
*/
|
|
774
|
+
export function MatrixSolve(a, b) {
|
|
775
|
+
AssertMatrix(a, { square: true });
|
|
776
|
+
const [n] = MatrixSize(a);
|
|
777
|
+
if (b.length !== n) {
|
|
778
|
+
throw new MatrixError(`Right-hand side vector length (${b.length}) must match matrix dimension (${n})`);
|
|
779
|
+
}
|
|
780
|
+
const { L, U } = MatrixLU(a);
|
|
781
|
+
// Forward substitution: solve Ly = b (L has 1s on its diagonal)
|
|
782
|
+
const y = new Array(n).fill(0);
|
|
783
|
+
for (let i = 0; i < n; i++) {
|
|
784
|
+
const lRow = L[i];
|
|
785
|
+
AssertMatrixRow(lRow);
|
|
786
|
+
let sum = 0;
|
|
787
|
+
for (let j = 0; j < i; j++) {
|
|
788
|
+
const lVal = lRow[j];
|
|
789
|
+
AssertMatrixValue(lVal);
|
|
790
|
+
sum += lVal * y[j];
|
|
791
|
+
}
|
|
792
|
+
const bi = b[i];
|
|
793
|
+
if (bi === undefined)
|
|
794
|
+
throw new MatrixError(`b[${i}] is undefined`);
|
|
795
|
+
y[i] = bi - sum;
|
|
796
|
+
}
|
|
797
|
+
// Back substitution: solve Ux = y
|
|
798
|
+
const x = new Array(n).fill(0);
|
|
799
|
+
for (let i = n - 1; i >= 0; i--) {
|
|
800
|
+
const uRow = U[i];
|
|
801
|
+
AssertMatrixRow(uRow);
|
|
802
|
+
let sum = 0;
|
|
803
|
+
for (let j = i + 1; j < n; j++) {
|
|
804
|
+
const uVal = uRow[j];
|
|
805
|
+
AssertMatrixValue(uVal);
|
|
806
|
+
sum += uVal * x[j];
|
|
807
|
+
}
|
|
808
|
+
const uDiag = uRow[i];
|
|
809
|
+
AssertMatrixValue(uDiag);
|
|
810
|
+
if (uDiag === 0)
|
|
811
|
+
throw new MatrixError('Matrix is singular — cannot solve the linear system');
|
|
812
|
+
x[i] = (y[i] - sum) / uDiag;
|
|
813
|
+
}
|
|
814
|
+
return x;
|
|
815
|
+
}
|
|
816
|
+
//# sourceMappingURL=decompositions.js.map
|