slangmath 1.0.3 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -1
- package/package.json +54 -23
- package/slang-complex.js +954 -0
- package/slang-linalg.js +1156 -0
- package/slang-math.js +83 -8
- package/slang-ode.js +1082 -0
- package/slang-stats.js +1206 -0
- package/slang-symbolic.js +1616 -0
package/slang-linalg.js
ADDED
|
@@ -0,0 +1,1156 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SLaNg Linear Algebra Module
|
|
3
|
+
*
|
|
4
|
+
* Dense matrix and vector operations for use in multivariable calculus,
|
|
5
|
+
* optimization, and numerical methods within SLaNg.
|
|
6
|
+
*
|
|
7
|
+
* All matrices are represented as 2D arrays: number[][]
|
|
8
|
+
* Vectors are 1D arrays: number[]
|
|
9
|
+
*
|
|
10
|
+
* Features:
|
|
11
|
+
* - Matrix creation utilities
|
|
12
|
+
* - Basic arithmetic (add, sub, mul, scale, transpose)
|
|
13
|
+
* - Determinant (LU decomposition)
|
|
14
|
+
* - Matrix inverse (Gauss-Jordan)
|
|
15
|
+
* - LU decomposition (Doolittle)
|
|
16
|
+
* - QR decomposition (Gram-Schmidt)
|
|
17
|
+
* - Eigenvalues / Eigenvectors (Power iteration, QR algorithm)
|
|
18
|
+
* - Solve linear systems Ax = b
|
|
19
|
+
* - SVD (numerical, for small-medium matrices)
|
|
20
|
+
* - Vector operations (dot, cross, norm, normalize, project)
|
|
21
|
+
* - Jacobian and Hessian numerical approximation
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
// ============================================================================
|
|
25
|
+
// MATRIX CREATION
|
|
26
|
+
// ============================================================================
|
|
27
|
+
|
|
28
|
+
/** Create an m×n zero matrix */
|
|
29
|
+
export function zeros(m, n = m) {
|
|
30
|
+
return Array.from({ length: m }, () => new Array(n).fill(0));
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/** Create an n×n identity matrix */
|
|
34
|
+
export function eye(n) {
|
|
35
|
+
const I = zeros(n);
|
|
36
|
+
for (let i = 0; i < n; i++) I[i][i] = 1;
|
|
37
|
+
return I;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/** Create a matrix from a flat array (row-major) */
|
|
41
|
+
export function fromArray(arr, m, n) {
|
|
42
|
+
if (arr.length !== m * n) throw new Error(`Array length ${arr.length} ≠ ${m}×${n}`);
|
|
43
|
+
return Array.from({ length: m }, (_, i) => arr.slice(i * n, i * n + n));
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/** Create a diagonal matrix from a vector */
|
|
47
|
+
export function diag(v) {
|
|
48
|
+
const n = v.length;
|
|
49
|
+
const D = zeros(n);
|
|
50
|
+
for (let i = 0; i < n; i++) D[i][i] = v[i];
|
|
51
|
+
return D;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/** Deep-copy a matrix */
|
|
55
|
+
export function matCopy(A) {
|
|
56
|
+
return A.map(row => row.slice());
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/** Get the shape [rows, cols] of a matrix */
|
|
60
|
+
export function shape(A) {
|
|
61
|
+
return [A.length, A[0].length];
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// ============================================================================
|
|
65
|
+
// BASIC MATRIX OPERATIONS
|
|
66
|
+
// ============================================================================
|
|
67
|
+
|
|
68
|
+
/** A + B */
|
|
69
|
+
export function matAdd(A, B) {
|
|
70
|
+
const [m, n] = shape(A);
|
|
71
|
+
return A.map((row, i) => row.map((v, j) => v + B[i][j]));
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/** A - B */
|
|
75
|
+
export function matSub(A, B) {
|
|
76
|
+
return A.map((row, i) => row.map((v, j) => v - B[i][j]));
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/** α * A */
|
|
80
|
+
export function matScale(A, alpha) {
|
|
81
|
+
return A.map(row => row.map(v => v * alpha));
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/** A * B (matrix multiplication) */
|
|
85
|
+
export function matMul(A, B) {
|
|
86
|
+
const [ma, na] = shape(A);
|
|
87
|
+
const [mb, nb] = shape(B);
|
|
88
|
+
if (na !== mb) throw new Error(`Shape mismatch: (${ma},${na}) × (${mb},${nb})`);
|
|
89
|
+
const C = zeros(ma, nb);
|
|
90
|
+
for (let i = 0; i < ma; i++) {
|
|
91
|
+
for (let k = 0; k < na; k++) {
|
|
92
|
+
if (A[i][k] === 0) continue;
|
|
93
|
+
for (let j = 0; j < nb; j++) {
|
|
94
|
+
C[i][j] += A[i][k] * B[k][j];
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
return C;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/** Aᵀ (transpose) */
|
|
102
|
+
export function matT(A) {
|
|
103
|
+
const [m, n] = shape(A);
|
|
104
|
+
return Array.from({ length: n }, (_, j) => Array.from({ length: m }, (_, i) => A[i][j]));
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/** Matrix-vector product A * v */
|
|
108
|
+
export function matvec(A, v) {
|
|
109
|
+
return A.map(row => row.reduce((s, a, j) => s + a * v[j], 0));
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/** Element-wise (Hadamard) product */
|
|
113
|
+
export function matElem(A, B) {
|
|
114
|
+
return A.map((row, i) => row.map((v, j) => v * B[i][j]));
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// ============================================================================
|
|
118
|
+
// VECTOR OPERATIONS
|
|
119
|
+
// ============================================================================
|
|
120
|
+
|
|
121
|
+
/** Dot product */
|
|
122
|
+
export function dot(a, b) {
|
|
123
|
+
if (a.length !== b.length) throw new Error('Vector length mismatch');
|
|
124
|
+
return a.reduce((s, v, i) => s + v * b[i], 0);
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/** Cross product (3D only) */
|
|
128
|
+
export function cross(a, b) {
|
|
129
|
+
if (a.length !== 3 || b.length !== 3) throw new Error('Cross product requires 3D vectors');
|
|
130
|
+
return [
|
|
131
|
+
a[1] * b[2] - a[2] * b[1],
|
|
132
|
+
a[2] * b[0] - a[0] * b[2],
|
|
133
|
+
a[0] * b[1] - a[1] * b[0]
|
|
134
|
+
];
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
/** Euclidean norm of a vector */
|
|
138
|
+
export function norm(v) {
|
|
139
|
+
return Math.sqrt(dot(v, v));
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
/** Normalize a vector to unit length */
|
|
143
|
+
export function normalize(v) {
|
|
144
|
+
const n = norm(v);
|
|
145
|
+
if (n < 1e-15) throw new Error('Cannot normalize zero vector');
|
|
146
|
+
return v.map(x => x / n);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
/** Projection of a onto b */
|
|
150
|
+
export function project(a, b) {
|
|
151
|
+
const scale = dot(a, b) / dot(b, b);
|
|
152
|
+
return b.map(x => x * scale);
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
/** Vector addition */
|
|
156
|
+
export function vecAdd(a, b) { return a.map((v, i) => v + b[i]); }
|
|
157
|
+
/** Vector subtraction */
|
|
158
|
+
export function vecSub(a, b) { return a.map((v, i) => v - b[i]); }
|
|
159
|
+
/** Scalar × vector */
|
|
160
|
+
export function vecScale(v, s) { return v.map(x => x * s); }
|
|
161
|
+
|
|
162
|
+
// ============================================================================
|
|
163
|
+
// DETERMINANT AND TRACE
|
|
164
|
+
// ============================================================================
|
|
165
|
+
|
|
166
|
+
/** Trace (sum of diagonal elements) */
|
|
167
|
+
export function trace(A) {
|
|
168
|
+
const [m, n] = shape(A);
|
|
169
|
+
if (m !== n) throw new Error('Trace requires square matrix');
|
|
170
|
+
let t = 0;
|
|
171
|
+
for (let i = 0; i < n; i++) t += A[i][i];
|
|
172
|
+
return t;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Determinant via LU decomposition — O(n³)
|
|
177
|
+
*/
|
|
178
|
+
export function det(A) {
|
|
179
|
+
const [m, n] = shape(A);
|
|
180
|
+
if (m !== n) throw new Error('Determinant requires square matrix');
|
|
181
|
+
const { L, U, P, sign } = luDecomp(A);
|
|
182
|
+
let d = sign;
|
|
183
|
+
for (let i = 0; i < n; i++) d *= U[i][i];
|
|
184
|
+
return d;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// ============================================================================
|
|
188
|
+
// LU DECOMPOSITION (with partial pivoting)
|
|
189
|
+
// ============================================================================
|
|
190
|
+
|
|
191
|
+
/**
|
|
192
|
+
* LU decomposition with partial pivoting: PA = LU
|
|
193
|
+
* @returns {{ L, U, P, sign }} where sign = ±1 (parity of permutation)
|
|
194
|
+
*/
|
|
195
|
+
export function luDecomp(A) {
|
|
196
|
+
const n = A.length;
|
|
197
|
+
let L = eye(n);
|
|
198
|
+
let U = matCopy(A);
|
|
199
|
+
let P = eye(n);
|
|
200
|
+
let sign = 1;
|
|
201
|
+
|
|
202
|
+
for (let col = 0; col < n; col++) {
|
|
203
|
+
// Find pivot
|
|
204
|
+
let maxRow = col;
|
|
205
|
+
let maxVal = Math.abs(U[col][col]);
|
|
206
|
+
for (let row = col + 1; row < n; row++) {
|
|
207
|
+
if (Math.abs(U[row][col]) > maxVal) {
|
|
208
|
+
maxVal = Math.abs(U[row][col]);
|
|
209
|
+
maxRow = row;
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
if (maxVal < 1e-15) continue; // Singular column
|
|
214
|
+
|
|
215
|
+
if (maxRow !== col) {
|
|
216
|
+
[U[col], U[maxRow]] = [U[maxRow], U[col]];
|
|
217
|
+
[P[col], P[maxRow]] = [P[maxRow], P[col]];
|
|
218
|
+
sign = -sign;
|
|
219
|
+
// Also swap lower triangular part
|
|
220
|
+
for (let k = 0; k < col; k++) {
|
|
221
|
+
[L[col][k], L[maxRow][k]] = [L[maxRow][k], L[col][k]];
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
for (let row = col + 1; row < n; row++) {
|
|
226
|
+
if (Math.abs(U[col][col]) < 1e-15) continue;
|
|
227
|
+
const factor = U[row][col] / U[col][col];
|
|
228
|
+
L[row][col] = factor;
|
|
229
|
+
for (let k = col; k < n; k++) {
|
|
230
|
+
U[row][k] -= factor * U[col][k];
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
return { L, U, P, sign };
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// ============================================================================
|
|
239
|
+
// LINEAR SYSTEM SOLVER
|
|
240
|
+
// ============================================================================
|
|
241
|
+
|
|
242
|
+
/**
|
|
243
|
+
* Solve Ax = b using LU decomposition.
|
|
244
|
+
* @param {number[][]} A - n×n matrix
|
|
245
|
+
* @param {number[]} b - right-hand side
|
|
246
|
+
* @returns {number[]} solution vector x
|
|
247
|
+
*/
|
|
248
|
+
export function solve(A, b) {
|
|
249
|
+
const n = A.length;
|
|
250
|
+
const { L, U, P } = luDecomp(A);
|
|
251
|
+
|
|
252
|
+
// Apply permutation to b
|
|
253
|
+
const pb = matvec(P, b);
|
|
254
|
+
|
|
255
|
+
// Forward substitution: Ly = Pb
|
|
256
|
+
const y = new Array(n).fill(0);
|
|
257
|
+
for (let i = 0; i < n; i++) {
|
|
258
|
+
let sum = 0;
|
|
259
|
+
for (let j = 0; j < i; j++) sum += L[i][j] * y[j];
|
|
260
|
+
y[i] = pb[i] - sum;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// Back substitution: Ux = y
|
|
264
|
+
const x = new Array(n).fill(0);
|
|
265
|
+
for (let i = n - 1; i >= 0; i--) {
|
|
266
|
+
if (Math.abs(U[i][i]) < 1e-15) throw new Error('Matrix is singular or nearly singular');
|
|
267
|
+
let sum = 0;
|
|
268
|
+
for (let j = i + 1; j < n; j++) sum += U[i][j] * x[j];
|
|
269
|
+
x[i] = (y[i] - sum) / U[i][i];
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
return x;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/**
|
|
276
|
+
* Matrix inverse via Gauss-Jordan elimination.
|
|
277
|
+
* @param {number[][]} A
|
|
278
|
+
* @returns {number[][]}
|
|
279
|
+
*/
|
|
280
|
+
export function inv(A) {
|
|
281
|
+
const n = A.length;
|
|
282
|
+
// Augment [A | I]
|
|
283
|
+
const aug = A.map((row, i) => {
|
|
284
|
+
const r = row.slice();
|
|
285
|
+
for (let j = 0; j < n; j++) r.push(i === j ? 1 : 0);
|
|
286
|
+
return r;
|
|
287
|
+
});
|
|
288
|
+
|
|
289
|
+
// Forward elimination with partial pivoting
|
|
290
|
+
for (let col = 0; col < n; col++) {
|
|
291
|
+
// Find pivot
|
|
292
|
+
let maxRow = col;
|
|
293
|
+
for (let row = col + 1; row < n; row++) {
|
|
294
|
+
if (Math.abs(aug[row][col]) > Math.abs(aug[maxRow][col])) maxRow = row;
|
|
295
|
+
}
|
|
296
|
+
[aug[col], aug[maxRow]] = [aug[maxRow], aug[col]];
|
|
297
|
+
|
|
298
|
+
const pivot = aug[col][col];
|
|
299
|
+
if (Math.abs(pivot) < 1e-15) throw new Error('Matrix is singular');
|
|
300
|
+
|
|
301
|
+
// Scale pivot row
|
|
302
|
+
for (let j = 0; j < 2 * n; j++) aug[col][j] /= pivot;
|
|
303
|
+
|
|
304
|
+
// Eliminate column
|
|
305
|
+
for (let row = 0; row < n; row++) {
|
|
306
|
+
if (row === col) continue;
|
|
307
|
+
const factor = aug[row][col];
|
|
308
|
+
for (let j = 0; j < 2 * n; j++) aug[row][j] -= factor * aug[col][j];
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
return aug.map(row => row.slice(n));
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
// ============================================================================
|
|
316
|
+
// QR DECOMPOSITION (Classical Gram-Schmidt)
|
|
317
|
+
// ============================================================================
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* QR decomposition using Gram-Schmidt orthogonalization.
|
|
321
|
+
* A = Q * R, Q orthogonal, R upper triangular
|
|
322
|
+
* @param {number[][]} A - m×n matrix, m >= n
|
|
323
|
+
* @returns {{ Q: number[][], R: number[][] }}
|
|
324
|
+
*/
|
|
325
|
+
export function qrDecomp(A) {
|
|
326
|
+
const [m, n] = shape(A);
|
|
327
|
+
const Q = zeros(m, n);
|
|
328
|
+
const R = zeros(n, n);
|
|
329
|
+
|
|
330
|
+
for (let j = 0; j < n; j++) {
|
|
331
|
+
// Get column j of A
|
|
332
|
+
let v = A.map(row => row[j]);
|
|
333
|
+
|
|
334
|
+
// Subtract projections onto previous Q columns
|
|
335
|
+
for (let i = 0; i < j; i++) {
|
|
336
|
+
const qi = Q.map(row => row[i]);
|
|
337
|
+
R[i][j] = dot(qi, v);
|
|
338
|
+
v = vecSub(v, vecScale(qi, R[i][j]));
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
R[j][j] = norm(v);
|
|
342
|
+
if (Math.abs(R[j][j]) < 1e-14) {
|
|
343
|
+
// Linearly dependent column — set to zero
|
|
344
|
+
for (let k = 0; k < m; k++) Q[k][j] = 0;
|
|
345
|
+
} else {
|
|
346
|
+
const u = vecScale(v, 1 / R[j][j]);
|
|
347
|
+
for (let k = 0; k < m; k++) Q[k][j] = u[k];
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
return { Q, R };
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
// ============================================================================
|
|
355
|
+
// EIGENVALUES & EIGENVECTORS
|
|
356
|
+
// ============================================================================
|
|
357
|
+
|
|
358
|
+
/**
|
|
359
|
+
* QR algorithm for real eigenvalues of a symmetric matrix.
|
|
360
|
+
* Returns eigenvalues sorted in descending order.
|
|
361
|
+
* @param {number[][]} A - symmetric n×n matrix
|
|
362
|
+
* @param {number} [maxIter=1000]
|
|
363
|
+
* @returns {{ eigenvalues: number[], converged: boolean }}
|
|
364
|
+
*/
|
|
365
|
+
export function eigenvalues(A, maxIter = 1000) {
|
|
366
|
+
let H = matCopy(A);
|
|
367
|
+
const n = H.length;
|
|
368
|
+
let converged = false;
|
|
369
|
+
|
|
370
|
+
for (let iter = 0; iter < maxIter; iter++) {
|
|
371
|
+
const { Q, R } = qrDecomp(H);
|
|
372
|
+
H = matMul(R, Q);
|
|
373
|
+
|
|
374
|
+
// Check if off-diagonal elements are small
|
|
375
|
+
let offDiag = 0;
|
|
376
|
+
for (let i = 0; i < n; i++) {
|
|
377
|
+
for (let j = 0; j < n; j++) {
|
|
378
|
+
if (i !== j) offDiag += H[i][j] * H[i][j];
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
if (offDiag < 1e-20) { converged = true; break; }
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
const evals = Array.from({ length: n }, (_, i) => H[i][i]);
|
|
385
|
+
evals.sort((a, b) => b - a);
|
|
386
|
+
return { eigenvalues: evals, converged };
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
/**
|
|
390
|
+
* Power iteration to find the dominant eigenvalue and eigenvector.
|
|
391
|
+
* @param {number[][]} A
|
|
392
|
+
* @param {number} [maxIter=1000]
|
|
393
|
+
* @param {number} [tol=1e-10]
|
|
394
|
+
* @returns {{ eigenvalue, eigenvector, iterations }}
|
|
395
|
+
*/
|
|
396
|
+
export function dominantEigen(A, maxIter = 1000, tol = 1e-10) {
|
|
397
|
+
const n = A.length;
|
|
398
|
+
let v = Array.from({ length: n }, () => Math.random());
|
|
399
|
+
v = normalize(v);
|
|
400
|
+
|
|
401
|
+
let lambda = 0, iterations = 0;
|
|
402
|
+
|
|
403
|
+
for (let iter = 0; iter < maxIter; iter++) {
|
|
404
|
+
const Av = matvec(A, v);
|
|
405
|
+
const lambdaNew = dot(v, Av);
|
|
406
|
+
const vNew = normalize(Av);
|
|
407
|
+
|
|
408
|
+
if (Math.abs(lambdaNew - lambda) < tol) { iterations = iter; break; }
|
|
409
|
+
lambda = lambdaNew;
|
|
410
|
+
v = vNew;
|
|
411
|
+
iterations = iter;
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
return { eigenvalue: lambda, eigenvector: v, iterations };
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
// ============================================================================
|
|
418
|
+
// NUMERICAL JACOBIAN AND HESSIAN
|
|
419
|
+
// ============================================================================
|
|
420
|
+
|
|
421
|
+
/**
|
|
422
|
+
* Numerical Jacobian of a vector function F: ℝⁿ → ℝᵐ at point x.
|
|
423
|
+
* J[i][j] = ∂F_i/∂x_j
|
|
424
|
+
* @param {Function} F - F(x) → number[]
|
|
425
|
+
* @param {number[]} x - Point
|
|
426
|
+
* @param {number} [h=1e-6]
|
|
427
|
+
* @returns {number[][]}
|
|
428
|
+
*/
|
|
429
|
+
export function jacobian(F, x, h = 1e-6) {
|
|
430
|
+
const n = x.length;
|
|
431
|
+
const F0 = F(x);
|
|
432
|
+
const m = F0.length;
|
|
433
|
+
const J = zeros(m, n);
|
|
434
|
+
|
|
435
|
+
for (let j = 0; j < n; j++) {
|
|
436
|
+
const xh = x.slice();
|
|
437
|
+
xh[j] += h;
|
|
438
|
+
const Fh = F(xh);
|
|
439
|
+
for (let i = 0; i < m; i++) {
|
|
440
|
+
J[i][j] = (Fh[i] - F0[i]) / h;
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
return J;
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
/**
|
|
448
|
+
* Numerical Hessian of a scalar function f: ℝⁿ → ℝ at point x.
|
|
449
|
+
* H[i][j] = ∂²f/∂x_i∂x_j (central differences)
|
|
450
|
+
* @param {Function} f - f(x) → number
|
|
451
|
+
* @param {number[]} x
|
|
452
|
+
* @param {number} [h=1e-4]
|
|
453
|
+
* @returns {number[][]}
|
|
454
|
+
*/
|
|
455
|
+
export function hessian(f, x, h = 1e-4) {
|
|
456
|
+
const n = x.length;
|
|
457
|
+
const H = zeros(n);
|
|
458
|
+
|
|
459
|
+
for (let i = 0; i < n; i++) {
|
|
460
|
+
for (let j = i; j < n; j++) {
|
|
461
|
+
let val;
|
|
462
|
+
if (i === j) {
|
|
463
|
+
// Second-order central difference
|
|
464
|
+
const xp = x.slice(); xp[i] += h;
|
|
465
|
+
const xm = x.slice(); xm[i] -= h;
|
|
466
|
+
val = (f(xp) - 2 * f(x) + f(xm)) / (h * h);
|
|
467
|
+
} else {
|
|
468
|
+
const xpp = x.slice(); xpp[i] += h; xpp[j] += h;
|
|
469
|
+
const xpm = x.slice(); xpm[i] += h; xpm[j] -= h;
|
|
470
|
+
const xmp = x.slice(); xmp[i] -= h; xmp[j] += h;
|
|
471
|
+
const xmm = x.slice(); xmm[i] -= h; xmm[j] -= h;
|
|
472
|
+
val = (f(xpp) - f(xpm) - f(xmp) + f(xmm)) / (4 * h * h);
|
|
473
|
+
}
|
|
474
|
+
H[i][j] = H[j][i] = val;
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
return H;
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
/**
|
|
482
|
+
* Numerical gradient of a scalar function at point x.
|
|
483
|
+
* @param {Function} f
|
|
484
|
+
* @param {number[]} x
|
|
485
|
+
* @param {number} [h=1e-6]
|
|
486
|
+
* @returns {number[]}
|
|
487
|
+
*/
|
|
488
|
+
export function numericalGrad(f, x, h = 1e-6) {
|
|
489
|
+
return x.map((_, i) => {
|
|
490
|
+
const xp = x.slice(); xp[i] += h;
|
|
491
|
+
const xm = x.slice(); xm[i] -= h;
|
|
492
|
+
return (f(xp) - f(xm)) / (2 * h);
|
|
493
|
+
});
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
// ============================================================================
|
|
497
|
+
// OPTIMIZATION (Newton's Method, Gradient Descent)
|
|
498
|
+
// ============================================================================
|
|
499
|
+
|
|
500
|
+
/**
|
|
501
|
+
* Newton-Raphson method for finding roots of F(x) = 0.
|
|
502
|
+
* @param {Function} F - F(x) → number[] (system of equations)
|
|
503
|
+
* @param {number[]} x0 - Initial guess
|
|
504
|
+
* @param {number} [tol=1e-10]
|
|
505
|
+
* @param {number} [maxIter=100]
|
|
506
|
+
* @returns {{ x, iterations, converged }}
|
|
507
|
+
*/
|
|
508
|
+
export function newtonRaphson(F, x0, tol = 1e-10, maxIter = 100) {
|
|
509
|
+
let x = x0.slice();
|
|
510
|
+
let iterations = 0;
|
|
511
|
+
|
|
512
|
+
for (let iter = 0; iter < maxIter; iter++) {
|
|
513
|
+
const fx = F(x);
|
|
514
|
+
const residual = norm(fx);
|
|
515
|
+
if (residual < tol) { iterations = iter; return { x, iterations, converged: true }; }
|
|
516
|
+
|
|
517
|
+
const J = jacobian(F, x);
|
|
518
|
+
let dx;
|
|
519
|
+
try { dx = solve(J, fx.map(v => -v)); }
|
|
520
|
+
catch { break; }
|
|
521
|
+
|
|
522
|
+
x = vecAdd(x, dx);
|
|
523
|
+
iterations = iter;
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
return { x, iterations, converged: false };
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
/**
|
|
530
|
+
* Gradient descent optimization to minimize f(x).
|
|
531
|
+
* @param {Function} f
|
|
532
|
+
* @param {number[]} x0
|
|
533
|
+
* @param {Object} [opts]
|
|
534
|
+
* @returns {{ x, fValue, iterations, path }}
|
|
535
|
+
*/
|
|
536
|
+
export function gradientDescent(f, x0, opts = {}) {
|
|
537
|
+
const { lr = 0.01, maxIter = 1000, tol = 1e-8, recordPath = false } = opts;
|
|
538
|
+
let x = x0.slice();
|
|
539
|
+
const path = recordPath ? [x.slice()] : [];
|
|
540
|
+
let iterations = 0;
|
|
541
|
+
|
|
542
|
+
for (let iter = 0; iter < maxIter; iter++) {
|
|
543
|
+
const g = numericalGrad(f, x);
|
|
544
|
+
const gNorm = norm(g);
|
|
545
|
+
if (gNorm < tol) { iterations = iter; break; }
|
|
546
|
+
|
|
547
|
+
x = vecSub(x, vecScale(g, lr));
|
|
548
|
+
if (recordPath) path.push(x.slice());
|
|
549
|
+
iterations = iter;
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
return { x, fValue: f(x), iterations, path, gradient: numericalGrad(f, x) };
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
// ============================================================================
|
|
556
|
+
// MATRIX NORMS AND CONDITION NUMBER
|
|
557
|
+
// ============================================================================
|
|
558
|
+
|
|
559
|
+
/** Frobenius norm */
|
|
560
|
+
export function normFrob(A) {
|
|
561
|
+
return Math.sqrt(A.reduce((s, row) => s + row.reduce((rs, v) => rs + v * v, 0), 0));
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
/** Max absolute row sum (∞-norm) */
|
|
565
|
+
export function normInf(A) {
|
|
566
|
+
return Math.max(...A.map(row => row.reduce((s, v) => s + Math.abs(v), 0)));
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
/**
|
|
570
|
+
* Estimate condition number κ(A) = ‖A‖ · ‖A⁻¹‖ using Frobenius norm.
|
|
571
|
+
* Large condition number → ill-conditioned system.
|
|
572
|
+
*/
|
|
573
|
+
export function conditionNumber(A) {
|
|
574
|
+
try {
|
|
575
|
+
const Ainv = inv(A);
|
|
576
|
+
return normFrob(A) * normFrob(Ainv);
|
|
577
|
+
} catch {
|
|
578
|
+
return Infinity;
|
|
579
|
+
}
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
// ============================================================================
|
|
583
|
+
// PRETTY PRINTING
|
|
584
|
+
// ============================================================================
|
|
585
|
+
|
|
586
|
+
/**
|
|
587
|
+
* Format a matrix as a string.
|
|
588
|
+
* @param {number[][]} A
|
|
589
|
+
* @param {number} [decimals=4]
|
|
590
|
+
* @returns {string}
|
|
591
|
+
*/
|
|
592
|
+
export function matToString(A, decimals = 4) {
|
|
593
|
+
const formatted = A.map(row =>
|
|
594
|
+
row.map(v => v.toFixed(decimals).padStart(decimals + 5)).join(' ')
|
|
595
|
+
);
|
|
596
|
+
const width = formatted[0].length + 4;
|
|
597
|
+
const border = '─'.repeat(width);
|
|
598
|
+
return `┌ ${' '.repeat(width - 2)} ┐\n`
|
|
599
|
+
+ formatted.map(row => `│ ${row} │`).join('\n')
|
|
600
|
+
+ `\n└ ${' '.repeat(width - 2)} ┘`;
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
/**
|
|
604
|
+
* Format a vector as a column string.
|
|
605
|
+
*/
|
|
606
|
+
export function vecToString(v, decimals = 4) {
|
|
607
|
+
return '[ ' + v.map(x => x.toFixed(decimals)).join(', ') + ' ]';
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
// ============================================================================
|
|
612
|
+
// SVD (Singular Value Decomposition) — one-sided Jacobi
|
|
613
|
+
// ============================================================================
|
|
614
|
+
|
|
615
|
+
/**
|
|
616
|
+
* Thin SVD of A (m×n, m >= n). Returns { U, S, V } where A ≈ U·diag(S)·Vᵀ.
|
|
617
|
+
* S is sorted descending.
|
|
618
|
+
*/
|
|
619
|
+
export function svd(A) {
|
|
620
|
+
const m = A.length, n = A[0].length;
|
|
621
|
+
let V = eye(n);
|
|
622
|
+
let B = matMul(matT(A), A);
|
|
623
|
+
for (let sweep = 0; sweep < 100; sweep++) {
|
|
624
|
+
let off = 0;
|
|
625
|
+
for (let p = 0; p < n - 1; p++) for (let q = p + 1; q < n; q++) off += Math.abs(B[p][q]);
|
|
626
|
+
if (off < 1e-14) break;
|
|
627
|
+
for (let p = 0; p < n - 1; p++) {
|
|
628
|
+
for (let q = p + 1; q < n; q++) {
|
|
629
|
+
const bpq = B[p][q];
|
|
630
|
+
if (Math.abs(bpq) < 1e-15) continue;
|
|
631
|
+
const tau = (B[q][q] - B[p][p]) / (2 * bpq);
|
|
632
|
+
const t = Math.sign(tau) / (Math.abs(tau) + Math.sqrt(1 + tau * tau));
|
|
633
|
+
const c = 1 / Math.sqrt(1 + t * t), s = t * c;
|
|
634
|
+
const Bn = B.map(r => r.slice());
|
|
635
|
+
for (let i = 0; i < n; i++) {
|
|
636
|
+
Bn[i][p] = c * B[i][p] - s * B[i][q];
|
|
637
|
+
Bn[i][q] = s * B[i][p] + c * B[i][q];
|
|
638
|
+
}
|
|
639
|
+
for (let i = 0; i < n; i++) { Bn[p][i] = Bn[i][p]; Bn[q][i] = Bn[i][q]; }
|
|
640
|
+
Bn[p][p] = c*c*B[p][p] - 2*s*c*bpq + s*s*B[q][q];
|
|
641
|
+
Bn[q][q] = s*s*B[p][p] + 2*s*c*bpq + c*c*B[q][q];
|
|
642
|
+
Bn[p][q] = Bn[q][p] = 0;
|
|
643
|
+
B = Bn;
|
|
644
|
+
const Vn = V.map(r => r.slice());
|
|
645
|
+
for (let i = 0; i < n; i++) {
|
|
646
|
+
Vn[i][p] = c * V[i][p] - s * V[i][q];
|
|
647
|
+
Vn[i][q] = s * V[i][p] + c * V[i][q];
|
|
648
|
+
}
|
|
649
|
+
V = Vn;
|
|
650
|
+
}
|
|
651
|
+
}
|
|
652
|
+
}
|
|
653
|
+
const svals = Array.from({ length: n }, (_, j) => Math.sqrt(Math.max(0, B[j][j])));
|
|
654
|
+
const idx = svals.map((_, i) => i).sort((a, b) => svals[b] - svals[a]);
|
|
655
|
+
const S = idx.map(i => svals[i]);
|
|
656
|
+
const Vsorted = idx.map(i => V.map(r => r[i]));
|
|
657
|
+
const Vmat = matT(Vsorted.map(col => col));
|
|
658
|
+
const U = zeros(m, n);
|
|
659
|
+
for (let j = 0; j < n; j++) {
|
|
660
|
+
if (S[j] < 1e-14) continue;
|
|
661
|
+
const vj = Vmat.map(r => r[j]);
|
|
662
|
+
const col = matvec(A, vj);
|
|
663
|
+
for (let i = 0; i < m; i++) U[i][j] = col[i] / S[j];
|
|
664
|
+
}
|
|
665
|
+
return { U, S, V: Vmat };
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
/** Moore-Penrose pseudo-inverse via SVD. */
|
|
669
|
+
export function pseudoInverse(A, tol = 1e-12) {
|
|
670
|
+
const { U, S, V } = svd(A);
|
|
671
|
+
const n = V.length, m = U.length;
|
|
672
|
+
const SigPlus = zeros(n, m);
|
|
673
|
+
for (let i = 0; i < Math.min(S.length, n, m); i++)
|
|
674
|
+
SigPlus[i][i] = S[i] > tol ? 1 / S[i] : 0;
|
|
675
|
+
return matMul(V, matMul(SigPlus, matT(U)));
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
/** Matrix rank (number of singular values above tol). */
|
|
679
|
+
export function matRank(A, tol = 1e-10) {
|
|
680
|
+
return svd(A).S.filter(s => s > tol).length;
|
|
681
|
+
}
|
|
682
|
+
|
|
683
|
+
/** Null-space basis (columns of V with near-zero singular values). */
|
|
684
|
+
export function nullSpace(A, tol = 1e-10) {
|
|
685
|
+
const { S, V } = svd(A);
|
|
686
|
+
return S.map((s, j) => s < tol ? V.map(r => r[j]) : null).filter(Boolean);
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
// ============================================================================
|
|
690
|
+
// LEAST SQUARES
|
|
691
|
+
// ============================================================================
|
|
692
|
+
|
|
693
|
+
/**
|
|
694
|
+
* Solve least-squares: minimise ||Ax - b||₂ via pseudo-inverse.
|
|
695
|
+
* @returns {{ x, residual, rank }}
|
|
696
|
+
*/
|
|
697
|
+
export function leastSquares(A, b) {
|
|
698
|
+
const x = matvec(pseudoInverse(A), b);
|
|
699
|
+
const Ax = matvec(A, x);
|
|
700
|
+
const residual = Math.sqrt(b.reduce((s, bi, i) => s + (bi - Ax[i]) ** 2, 0));
|
|
701
|
+
return { x, residual, rank: matRank(A) };
|
|
702
|
+
}
|
|
703
|
+
|
|
704
|
+
// ============================================================================
|
|
705
|
+
// CHOLESKY DECOMPOSITION
|
|
706
|
+
// ============================================================================
|
|
707
|
+
|
|
708
|
+
/**
|
|
709
|
+
* Cholesky decomposition of a symmetric positive-definite matrix.
|
|
710
|
+
* Returns L such that A = L·Lᵀ.
|
|
711
|
+
*/
|
|
712
|
+
export function cholesky(A) {
|
|
713
|
+
const n = A.length;
|
|
714
|
+
const L = zeros(n, n);
|
|
715
|
+
for (let i = 0; i < n; i++) {
|
|
716
|
+
for (let j = 0; j <= i; j++) {
|
|
717
|
+
let s = A[i][j];
|
|
718
|
+
for (let k = 0; k < j; k++) s -= L[i][k] * L[j][k];
|
|
719
|
+
if (i === j) {
|
|
720
|
+
if (s < 0) throw new Error('cholesky: not positive definite');
|
|
721
|
+
L[i][j] = Math.sqrt(s);
|
|
722
|
+
} else {
|
|
723
|
+
L[i][j] = L[j][j] < 1e-14 ? 0 : s / L[j][j];
|
|
724
|
+
}
|
|
725
|
+
}
|
|
726
|
+
}
|
|
727
|
+
return L;
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
/**
|
|
731
|
+
* Solve Ax = b for SPD matrix A using Cholesky.
|
|
732
|
+
* Faster and more stable than LU for SPD systems.
|
|
733
|
+
*/
|
|
734
|
+
export function choleskySolve(A, b) {
|
|
735
|
+
const L = cholesky(A);
|
|
736
|
+
const n = b.length;
|
|
737
|
+
const y = Array(n).fill(0);
|
|
738
|
+
for (let i = 0; i < n; i++) {
|
|
739
|
+
let s = b[i];
|
|
740
|
+
for (let k = 0; k < i; k++) s -= L[i][k] * y[k];
|
|
741
|
+
y[i] = s / L[i][i];
|
|
742
|
+
}
|
|
743
|
+
const x = Array(n).fill(0);
|
|
744
|
+
for (let i = n - 1; i >= 0; i--) {
|
|
745
|
+
let s = y[i];
|
|
746
|
+
for (let k = i + 1; k < n; k++) s -= L[k][i] * x[k];
|
|
747
|
+
x[i] = s / L[i][i];
|
|
748
|
+
}
|
|
749
|
+
return x;
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
// ============================================================================
|
|
753
|
+
// TRIDIAGONAL SOLVER (Thomas Algorithm — O(n))
|
|
754
|
+
// ============================================================================
|
|
755
|
+
|
|
756
|
+
/**
|
|
757
|
+
* Solve tridiagonal system in O(n).
|
|
758
|
+
* @param {number[]} a subdiagonal (length n-1)
|
|
759
|
+
* @param {number[]} b main diagonal (length n)
|
|
760
|
+
* @param {number[]} c superdiagonal (length n-1)
|
|
761
|
+
* @param {number[]} rhs
|
|
762
|
+
*/
|
|
763
|
+
export function tridiagonalSolve(a, b, c, rhs) {
|
|
764
|
+
const n = b.length;
|
|
765
|
+
const cp = c.slice(), dp = rhs.slice(), bp = b.slice();
|
|
766
|
+
cp[0] = c[0] / bp[0]; dp[0] = rhs[0] / bp[0];
|
|
767
|
+
for (let i = 1; i < n; i++) {
|
|
768
|
+
const denom = bp[i] - a[i - 1] * cp[i - 1];
|
|
769
|
+
cp[i] = i < n - 1 ? c[i] / denom : 0;
|
|
770
|
+
dp[i] = (rhs[i] - a[i - 1] * dp[i - 1]) / denom;
|
|
771
|
+
}
|
|
772
|
+
const x = Array(n).fill(0);
|
|
773
|
+
x[n - 1] = dp[n - 1];
|
|
774
|
+
for (let i = n - 2; i >= 0; i--) x[i] = dp[i] - cp[i] * x[i + 1];
|
|
775
|
+
return x;
|
|
776
|
+
}
|
|
777
|
+
|
|
778
|
+
// ============================================================================
|
|
779
|
+
// ITERATIVE SOLVERS
|
|
780
|
+
// ============================================================================
|
|
781
|
+
|
|
782
|
+
/**
|
|
783
|
+
* Conjugate Gradient — solve Ax = b for SPD A without forming the inverse.
|
|
784
|
+
* Far more efficient than LU for large sparse SPD systems.
|
|
785
|
+
* @returns {{ x, iterations, residualNorm }}
|
|
786
|
+
*/
|
|
787
|
+
export function conjugateGradient(A, b, tol = 1e-10, maxIter = 1000) {
|
|
788
|
+
const n = b.length;
|
|
789
|
+
let x = Array(n).fill(0);
|
|
790
|
+
let r = b.slice();
|
|
791
|
+
let p = r.slice();
|
|
792
|
+
let rDotR = dot(r, r);
|
|
793
|
+
let iterations = 0;
|
|
794
|
+
for (let k = 0; k < maxIter; k++) {
|
|
795
|
+
const Ap = matvec(A, p);
|
|
796
|
+
const alpha = rDotR / dot(p, Ap);
|
|
797
|
+
x = x.map((xi, i) => xi + alpha * p[i]);
|
|
798
|
+
r = r.map((ri, i) => ri - alpha * Ap[i]);
|
|
799
|
+
const rDotRnew = dot(r, r);
|
|
800
|
+
if (Math.sqrt(rDotRnew) < tol) { iterations = k + 1; break; }
|
|
801
|
+
const beta = rDotRnew / rDotR;
|
|
802
|
+
p = r.map((ri, i) => ri + beta * p[i]);
|
|
803
|
+
rDotR = rDotRnew;
|
|
804
|
+
iterations = k + 1;
|
|
805
|
+
}
|
|
806
|
+
return { x, iterations, residualNorm: Math.sqrt(dot(r, r)) };
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
// ============================================================================
|
|
810
|
+
// ROTATION MATRICES
|
|
811
|
+
// ============================================================================
|
|
812
|
+
|
|
813
|
+
/** 2D rotation matrix by angle θ (radians). */
|
|
814
|
+
export function rotMat2D(theta) {
|
|
815
|
+
return [[Math.cos(theta), -Math.sin(theta)],
|
|
816
|
+
[Math.sin(theta), Math.cos(theta)]];
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
/** 3D rotation around X axis. */
|
|
820
|
+
export function rotX(theta) {
|
|
821
|
+
const c = Math.cos(theta), s = Math.sin(theta);
|
|
822
|
+
return [[1, 0, 0], [0, c, -s], [0, s, c]];
|
|
823
|
+
}
|
|
824
|
+
/** 3D rotation around Y axis. */
|
|
825
|
+
export function rotY(theta) {
|
|
826
|
+
const c = Math.cos(theta), s = Math.sin(theta);
|
|
827
|
+
return [[c, 0, s], [0, 1, 0], [-s, 0, c]];
|
|
828
|
+
}
|
|
829
|
+
/** 3D rotation around Z axis. */
|
|
830
|
+
export function rotZ(theta) {
|
|
831
|
+
const c = Math.cos(theta), s = Math.sin(theta);
|
|
832
|
+
return [[c, -s, 0], [s, c, 0], [0, 0, 1]];
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
// ============================================================================
|
|
836
|
+
// STATISTICS ON MATRICES
|
|
837
|
+
// ============================================================================
|
|
838
|
+
|
|
839
|
+
/** Column means of A as a vector. */
|
|
840
|
+
export function colMeans(A) {
|
|
841
|
+
return A[0].map((_, j) => A.reduce((s, r) => s + r[j], 0) / A.length);
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
/** Column-center a matrix (subtract column means). */
|
|
845
|
+
export function center(A) {
|
|
846
|
+
const mu = colMeans(A);
|
|
847
|
+
return A.map(row => row.map((v, j) => v - mu[j]));
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
/**
|
|
851
|
+
* PCA via SVD on the centered data matrix.
|
|
852
|
+
* @param {number[][]} X (n_samples × n_features)
|
|
853
|
+
* @returns {{ components, explainedVariance, explainedVarianceRatio, scores }}
|
|
854
|
+
*/
|
|
855
|
+
export function pca(X) {
|
|
856
|
+
const Xc = center(X);
|
|
857
|
+
const m = Xc.length;
|
|
858
|
+
const { U, S, V } = svd(Xc);
|
|
859
|
+
const variance = S.map(s => (s * s) / (m - 1));
|
|
860
|
+
const totalVar = variance.reduce((a, b) => a + b, 0);
|
|
861
|
+
const scores = matMul(Xc, V);
|
|
862
|
+
return {
|
|
863
|
+
components: V,
|
|
864
|
+
explainedVariance: variance,
|
|
865
|
+
explainedVarianceRatio: variance.map(v => v / totalVar),
|
|
866
|
+
scores,
|
|
867
|
+
singularValues: S,
|
|
868
|
+
};
|
|
869
|
+
}
|
|
870
|
+
|
|
871
|
+
|
|
872
|
+
// ============================================================================
|
|
873
|
+
// ITERATIVE EIGENVALUE METHODS
|
|
874
|
+
// ============================================================================
|
|
875
|
+
|
|
876
|
+
/**
|
|
877
|
+
* Inverse power iteration — find eigenvalue closest to a shift σ.
|
|
878
|
+
* Solves (A − σI)⁻¹ v to converge to the eigenvalue nearest to σ.
|
|
879
|
+
*
|
|
880
|
+
* @param {number[][]} A
|
|
881
|
+
* @param {number} sigma shift (target eigenvalue estimate)
|
|
882
|
+
* @param {number[]} [v0] initial vector
|
|
883
|
+
* @param {number} [tol=1e-10]
|
|
884
|
+
* @param {number} [maxIter=200]
|
|
885
|
+
* @returns {{ eigenvalue, eigenvector, iterations }}
|
|
886
|
+
*/
|
|
887
|
+
export function inversePowerIteration(A, sigma, v0, tol = 1e-10, maxIter = 200) {
|
|
888
|
+
const n = A.length;
|
|
889
|
+
const Ashift = A.map((row, i) => row.map((v, j) => v - (i === j ? sigma : 0)));
|
|
890
|
+
let v = (v0 || Array.from({ length: n }, () => Math.random())).slice();
|
|
891
|
+
v = normalize(v);
|
|
892
|
+
let lambda = sigma, iter = 0;
|
|
893
|
+
for (let k = 0; k < maxIter; k++) {
|
|
894
|
+
let w;
|
|
895
|
+
try { w = solve(Ashift, v); } catch { break; }
|
|
896
|
+
const wNorm = norm(w);
|
|
897
|
+
const vNew = w.map(x => x / wNorm);
|
|
898
|
+
// Rayleigh quotient: λ ≈ vᵀ·A·v
|
|
899
|
+
const Av = matvec(A, vNew);
|
|
900
|
+
const lambdaNew = dot(vNew, Av);
|
|
901
|
+
if (Math.abs(lambdaNew - lambda) < tol) { lambda = lambdaNew; v = vNew; iter = k + 1; break; }
|
|
902
|
+
lambda = lambdaNew; v = vNew; iter = k + 1;
|
|
903
|
+
}
|
|
904
|
+
return { eigenvalue: lambda, eigenvector: v, iterations: iter };
|
|
905
|
+
}
|
|
906
|
+
|
|
907
|
+
/**
|
|
908
|
+
* Rayleigh quotient iteration — rapidly converges to nearest eigenvalue.
|
|
909
|
+
* Combines Rayleigh quotient as adaptive shift with inverse iteration.
|
|
910
|
+
*/
|
|
911
|
+
export function rayleighIteration(A, v0, tol = 1e-12, maxIter = 50) {
|
|
912
|
+
const n = A.length;
|
|
913
|
+
let v = (v0 || Array.from({ length: n }, () => Math.random())).slice();
|
|
914
|
+
v = normalize(v);
|
|
915
|
+
let lambda = dot(v, matvec(A, v));
|
|
916
|
+
for (let k = 0; k < maxIter; k++) {
|
|
917
|
+
const Ashift = A.map((row, i) => row.map((val, j) => val - (i === j ? lambda : 0)));
|
|
918
|
+
let w;
|
|
919
|
+
try { w = solve(Ashift, v); } catch { break; }
|
|
920
|
+
const wNorm = norm(w);
|
|
921
|
+
v = w.map(x => x / wNorm);
|
|
922
|
+
const lambdaNew = dot(v, matvec(A, v));
|
|
923
|
+
if (Math.abs(lambdaNew - lambda) < tol) { lambda = lambdaNew; break; }
|
|
924
|
+
lambda = lambdaNew;
|
|
925
|
+
}
|
|
926
|
+
return { eigenvalue: lambda, eigenvector: v };
|
|
927
|
+
}
|
|
928
|
+
|
|
929
|
+
// ============================================================================
|
|
930
|
+
// VECTOR CALCULUS
|
|
931
|
+
// ============================================================================
|
|
932
|
+
|
|
933
|
+
/**
|
|
934
|
+
* Numerical divergence of a vector field F = [F1, F2, F3] at a point.
|
|
935
|
+
* div F = ∂F1/∂x + ∂F2/∂y + ∂F3/∂z
|
|
936
|
+
*
|
|
937
|
+
* @param {Function[]} F array of scalar functions, one per component
|
|
938
|
+
* @param {number[]} point [x, y, z, ...]
|
|
939
|
+
* @param {number} [h=1e-5]
|
|
940
|
+
*/
|
|
941
|
+
export function divergence(F, point, h = 1e-5) {
|
|
942
|
+
return F.reduce((sum, Fi, i) => {
|
|
943
|
+
const pp = point.slice(); pp[i] += h;
|
|
944
|
+
const pm = point.slice(); pm[i] -= h;
|
|
945
|
+
return sum + (Fi(pp) - Fi(pm)) / (2 * h);
|
|
946
|
+
}, 0);
|
|
947
|
+
}
|
|
948
|
+
|
|
949
|
+
/**
|
|
950
|
+
* Numerical curl of a 3D vector field F = [F1, F2, F3] at a point.
|
|
951
|
+
* Returns the 3-component curl vector.
|
|
952
|
+
*/
|
|
953
|
+
export function curl(F, point, h = 1e-5) {
|
|
954
|
+
const d = (fi, xi, xp) => {
|
|
955
|
+
const pp = point.slice(); pp[xi] += h;
|
|
956
|
+
const pm = point.slice(); pm[xi] -= h;
|
|
957
|
+
return (F[fi](pp) - F[fi](pm)) / (2 * h);
|
|
958
|
+
};
|
|
959
|
+
return [
|
|
960
|
+
d(2, 1) - d(1, 2), // ∂F3/∂y − ∂F2/∂z
|
|
961
|
+
d(0, 2) - d(2, 0), // ∂F1/∂z − ∂F3/∂x
|
|
962
|
+
d(1, 0) - d(0, 1), // ∂F2/∂x − ∂F1/∂y
|
|
963
|
+
];
|
|
964
|
+
}
|
|
965
|
+
|
|
966
|
+
/**
|
|
967
|
+
* Numerical Laplacian (scalar field): ∇²f = sum of second partials.
|
|
968
|
+
*/
|
|
969
|
+
export function laplacian(f, point, h = 1e-5) {
|
|
970
|
+
return point.reduce((sum, _, i) => {
|
|
971
|
+
const pp = point.slice(); pp[i] += h;
|
|
972
|
+
const pm = point.slice(); pm[i] -= h;
|
|
973
|
+
return sum + (f(pp) - 2 * f(point) + f(pm)) / (h * h);
|
|
974
|
+
}, 0);
|
|
975
|
+
}
|
|
976
|
+
|
|
977
|
+
// ============================================================================
|
|
978
|
+
// GRAPH / NETWORK MATRICES
|
|
979
|
+
// ============================================================================
|
|
980
|
+
|
|
981
|
+
/**
|
|
982
|
+
* Build the adjacency matrix from an edge list.
|
|
983
|
+
* @param {[number,number][]} edges e.g. [[0,1],[1,2],[2,0]]
|
|
984
|
+
* @param {number} n number of nodes
|
|
985
|
+
* @param {boolean} [directed=false]
|
|
986
|
+
*/
|
|
987
|
+
export function adjacencyMatrix(edges, n, directed = false) {
|
|
988
|
+
const A = zeros(n, n);
|
|
989
|
+
for (const [u, v] of edges) {
|
|
990
|
+
A[u][v] = 1;
|
|
991
|
+
if (!directed) A[v][u] = 1;
|
|
992
|
+
}
|
|
993
|
+
return A;
|
|
994
|
+
}
|
|
995
|
+
|
|
996
|
+
/**
|
|
997
|
+
* Laplacian matrix L = D − A (graph Laplacian).
|
|
998
|
+
* Eigenvalues reveal connectivity structure (algebraic connectivity = 2nd smallest).
|
|
999
|
+
*/
|
|
1000
|
+
export function laplacianMatrix(edges, n) {
|
|
1001
|
+
const A = adjacencyMatrix(edges, n);
|
|
1002
|
+
const D = zeros(n, n);
|
|
1003
|
+
for (let i = 0; i < n; i++) D[i][i] = A[i].reduce((s, v) => s + v, 0);
|
|
1004
|
+
return matSub(D, A);
|
|
1005
|
+
}
|
|
1006
|
+
|
|
1007
|
+
// ============================================================================
|
|
1008
|
+
// FINITE DIFFERENCE MATRICES
|
|
1009
|
+
// ============================================================================
|
|
1010
|
+
|
|
1011
|
+
/**
|
|
1012
|
+
* 1D second-order finite difference matrix (−1, 2, −1) / h².
|
|
1013
|
+
* Discretises the Laplacian on n interior points.
|
|
1014
|
+
*/
|
|
1015
|
+
export function fdLaplacian1D(n, h = 1) {
|
|
1016
|
+
const A = zeros(n, n);
|
|
1017
|
+
const scale = 1 / (h * h);
|
|
1018
|
+
for (let i = 0; i < n; i++) {
|
|
1019
|
+
A[i][i] = 2 * scale;
|
|
1020
|
+
if (i > 0) A[i][i - 1] = -scale;
|
|
1021
|
+
if (i < n - 1) A[i][i + 1] = -scale;
|
|
1022
|
+
}
|
|
1023
|
+
return A;
|
|
1024
|
+
}
|
|
1025
|
+
|
|
1026
|
+
/**
|
|
1027
|
+
* 1D first-order central difference matrix for d/dx.
|
|
1028
|
+
*/
|
|
1029
|
+
export function fdDerivative1D(n, h = 1) {
|
|
1030
|
+
const A = zeros(n, n);
|
|
1031
|
+
const scale = 1 / (2 * h);
|
|
1032
|
+
for (let i = 1; i < n - 1; i++) {
|
|
1033
|
+
A[i][i + 1] = scale;
|
|
1034
|
+
A[i][i - 1] = -scale;
|
|
1035
|
+
}
|
|
1036
|
+
return A;
|
|
1037
|
+
}
|
|
1038
|
+
|
|
1039
|
+
// ============================================================================
|
|
1040
|
+
// INTERPOLATION
|
|
1041
|
+
// ============================================================================
|
|
1042
|
+
|
|
1043
|
+
/**
|
|
1044
|
+
* Lagrange polynomial interpolation at a query point.
|
|
1045
|
+
* @param {number[]} xs known x values
|
|
1046
|
+
* @param {number[]} ys known y values
|
|
1047
|
+
* @param {number} xq query point
|
|
1048
|
+
*/
|
|
1049
|
+
export function lagrangeInterpolate(xs, ys, xq) {
|
|
1050
|
+
const n = xs.length;
|
|
1051
|
+
let result = 0;
|
|
1052
|
+
for (let i = 0; i < n; i++) {
|
|
1053
|
+
let Li = 1;
|
|
1054
|
+
for (let j = 0; j < n; j++) {
|
|
1055
|
+
if (j !== i) Li *= (xq - xs[j]) / (xs[i] - xs[j]);
|
|
1056
|
+
}
|
|
1057
|
+
result += ys[i] * Li;
|
|
1058
|
+
}
|
|
1059
|
+
return result;
|
|
1060
|
+
}
|
|
1061
|
+
|
|
1062
|
+
/**
|
|
1063
|
+
* Natural cubic spline interpolation.
|
|
1064
|
+
* Returns an object with an evaluate(x) method.
|
|
1065
|
+
*
|
|
1066
|
+
* @param {number[]} xs monotonically increasing x knots
|
|
1067
|
+
* @param {number[]} ys corresponding y values
|
|
1068
|
+
*/
|
|
1069
|
+
export function cubicSpline(xs, ys) {
|
|
1070
|
+
const n = xs.length - 1; // number of intervals
|
|
1071
|
+
const h = xs.map((x, i) => i < n ? xs[i + 1] - x : 0);
|
|
1072
|
+
|
|
1073
|
+
// Build tridiagonal system for natural spline (M₀ = Mₙ = 0)
|
|
1074
|
+
const ni = n - 1;
|
|
1075
|
+
const diag = Array.from({ length: ni }, (_, i) => 2 * (h[i] + h[i + 1]));
|
|
1076
|
+
const upper = Array.from({ length: ni - 1 }, (_, i) => h[i + 1]);
|
|
1077
|
+
const lower = upper.slice();
|
|
1078
|
+
const rhs = Array.from({ length: ni }, (_, i) =>
|
|
1079
|
+
6 * ((ys[i + 2] - ys[i + 1]) / h[i + 1] - (ys[i + 1] - ys[i]) / h[i]));
|
|
1080
|
+
|
|
1081
|
+
// Solve for interior second derivatives
|
|
1082
|
+
const M = [0, ...tridiagonalSolve(lower, diag, upper, rhs), 0];
|
|
1083
|
+
|
|
1084
|
+
const evaluate = xq => {
|
|
1085
|
+
// Find interval
|
|
1086
|
+
let idx = n - 1;
|
|
1087
|
+
for (let i = 0; i < n; i++) {
|
|
1088
|
+
if (xq <= xs[i + 1]) { idx = i; break; }
|
|
1089
|
+
}
|
|
1090
|
+
const dx = xq - xs[idx];
|
|
1091
|
+
const hi = h[idx];
|
|
1092
|
+
return (M[idx] / (6 * hi)) * (xs[idx + 1] - xq) ** 3
|
|
1093
|
+
+ (M[idx + 1] / (6 * hi)) * dx ** 3
|
|
1094
|
+
+ (ys[idx] / hi - M[idx] * hi / 6) * (xs[idx + 1] - xq)
|
|
1095
|
+
+ (ys[idx + 1] / hi - M[idx + 1] * hi / 6) * dx;
|
|
1096
|
+
};
|
|
1097
|
+
|
|
1098
|
+
return { evaluate, knots: xs, values: ys, secondDerivatives: M };
|
|
1099
|
+
}
|
|
1100
|
+
|
|
1101
|
+
/**
|
|
1102
|
+
* Newton's divided difference interpolation (efficient for adding new points).
|
|
1103
|
+
* @param {number[]} xs
|
|
1104
|
+
* @param {number[]} ys
|
|
1105
|
+
* @returns {{ coefficients: number[], evaluate: (xq) => number }}
|
|
1106
|
+
*/
|
|
1107
|
+
export function newtonInterpolation(xs, ys) {
|
|
1108
|
+
const n = xs.length;
|
|
1109
|
+
// Build divided difference table
|
|
1110
|
+
const dd = ys.slice();
|
|
1111
|
+
const coeffs = [dd[0]];
|
|
1112
|
+
for (let j = 1; j < n; j++) {
|
|
1113
|
+
for (let i = n - 1; i >= j; i--) {
|
|
1114
|
+
dd[i] = (dd[i] - dd[i - 1]) / (xs[i] - xs[i - j]);
|
|
1115
|
+
}
|
|
1116
|
+
coeffs.push(dd[j]);
|
|
1117
|
+
}
|
|
1118
|
+
const evaluate = xq => {
|
|
1119
|
+
let result = coeffs[0];
|
|
1120
|
+
let prod = 1;
|
|
1121
|
+
for (let k = 1; k < n; k++) {
|
|
1122
|
+
prod *= (xq - xs[k - 1]);
|
|
1123
|
+
result += coeffs[k] * prod;
|
|
1124
|
+
}
|
|
1125
|
+
return result;
|
|
1126
|
+
};
|
|
1127
|
+
return { coefficients: coeffs, evaluate };
|
|
1128
|
+
}
|
|
1129
|
+
|
|
1130
|
+
// ============================================================================
|
|
1131
|
+
// NUMERICAL DIFFERENTIATION — Richardson Extrapolation
|
|
1132
|
+
// ============================================================================
|
|
1133
|
+
|
|
1134
|
+
/**
|
|
1135
|
+
* Highly accurate numerical derivative using Richardson extrapolation.
|
|
1136
|
+
* Achieves O(h⁸) accuracy by combining central differences at multiple step sizes.
|
|
1137
|
+
*
|
|
1138
|
+
* @param {Function} f scalar function
|
|
1139
|
+
* @param {number} x evaluation point
|
|
1140
|
+
* @param {number} [h=0.1] initial step size
|
|
1141
|
+
* @returns {number}
|
|
1142
|
+
*/
|
|
1143
|
+
export function richardsonDerivative(f, x, h = 0.1) {
|
|
1144
|
+
// 4-level Richardson table
|
|
1145
|
+
const D = Array.from({ length: 4 }, (_, i) => {
|
|
1146
|
+
const hi = h / (2 ** i);
|
|
1147
|
+
return (f(x + hi) - f(x - hi)) / (2 * hi);
|
|
1148
|
+
});
|
|
1149
|
+
// Richardson extrapolation
|
|
1150
|
+
for (let k = 1; k < 4; k++) {
|
|
1151
|
+
for (let i = 0; i < 4 - k; i++) {
|
|
1152
|
+
D[i] = ((4 ** k) * D[i + 1] - D[i]) / ((4 ** k) - 1);
|
|
1153
|
+
}
|
|
1154
|
+
}
|
|
1155
|
+
return D[0];
|
|
1156
|
+
}
|