pygeoinf 1.2.6__py3-none-any.whl → 1.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygeoinf/__init__.py +6 -1
- pygeoinf/gaussian_measure.py +25 -36
- pygeoinf/linear_operators.py +957 -133
- pygeoinf/linear_solvers.py +39 -7
- pygeoinf/random_matrix.py +114 -0
- {pygeoinf-1.2.6.dist-info → pygeoinf-1.2.8.dist-info}/METADATA +8 -15
- {pygeoinf-1.2.6.dist-info → pygeoinf-1.2.8.dist-info}/RECORD +9 -9
- {pygeoinf-1.2.6.dist-info → pygeoinf-1.2.8.dist-info}/LICENSE +0 -0
- {pygeoinf-1.2.6.dist-info → pygeoinf-1.2.8.dist-info}/WHEEL +0 -0
pygeoinf/linear_operators.py
CHANGED
|
@@ -18,12 +18,18 @@ Key Classes
|
|
|
18
18
|
"""
|
|
19
19
|
|
|
20
20
|
from __future__ import annotations
|
|
21
|
-
from typing import Callable, List, Optional, Any, Union, Tuple, TYPE_CHECKING
|
|
21
|
+
from typing import Callable, List, Optional, Any, Union, Tuple, TYPE_CHECKING, Dict
|
|
22
|
+
|
|
23
|
+
from collections import defaultdict
|
|
22
24
|
|
|
23
25
|
import numpy as np
|
|
26
|
+
import scipy.sparse as sp
|
|
24
27
|
from scipy.sparse.linalg import LinearOperator as ScipyLinOp
|
|
25
28
|
from scipy.sparse import diags
|
|
26
29
|
|
|
30
|
+
|
|
31
|
+
from joblib import Parallel, delayed
|
|
32
|
+
|
|
27
33
|
# from .operators import Operator
|
|
28
34
|
from .nonlinear_operators import NonLinearOperator
|
|
29
35
|
|
|
@@ -49,8 +55,8 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
49
55
|
|
|
50
56
|
This class represents a linear map `L(x) = Ax` and provides rich
|
|
51
57
|
functionality for linear algebraic operations. It specializes
|
|
52
|
-
`NonLinearOperator`,
|
|
53
|
-
itself.
|
|
58
|
+
`NonLinearOperator`, with the derivative mapping taking the
|
|
59
|
+
required form (i.e., the derivative is just the operator itself).
|
|
54
60
|
|
|
55
61
|
Key features include operator algebra (`@`, `+`, `*`), automatic
|
|
56
62
|
derivation of adjoint (`.adjoint`) and dual (`.dual`) operators, and
|
|
@@ -67,7 +73,6 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
67
73
|
*,
|
|
68
74
|
dual_mapping: Optional[Callable[[Any], Any]] = None,
|
|
69
75
|
adjoint_mapping: Optional[Callable[[Any], Any]] = None,
|
|
70
|
-
thread_safe: bool = False,
|
|
71
76
|
dual_base: Optional[LinearOperator] = None,
|
|
72
77
|
adjoint_base: Optional[LinearOperator] = None,
|
|
73
78
|
) -> None:
|
|
@@ -80,9 +85,14 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
80
85
|
mapping (callable): The function defining the linear mapping.
|
|
81
86
|
dual_mapping (callable, optional): The action of the dual operator.
|
|
82
87
|
adjoint_mapping (callable, optional): The action of the adjoint.
|
|
83
|
-
thread_safe (bool, optional): True if the mapping is thread-safe.
|
|
84
88
|
dual_base (LinearOperator, optional): Internal use for duals.
|
|
85
89
|
adjoint_base (LinearOperator, optional): Internal use for adjoints.
|
|
90
|
+
|
|
91
|
+
Notes:
|
|
92
|
+
If neither the dual or adjoint mappings are provided, an they are
|
|
93
|
+
deduced internally using a correction but very inefficient method.
|
|
94
|
+
In general this functionality should not be relied on other than
|
|
95
|
+
for operators between low-dimensional spaces.
|
|
86
96
|
"""
|
|
87
97
|
super().__init__(
|
|
88
98
|
domain, codomain, self._mapping_impl, derivative=self._derivative_impl
|
|
@@ -90,7 +100,6 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
90
100
|
self._mapping = mapping
|
|
91
101
|
self._dual_base: Optional[LinearOperator] = dual_base
|
|
92
102
|
self._adjoint_base: Optional[LinearOperator] = adjoint_base
|
|
93
|
-
self._thread_safe: bool = thread_safe
|
|
94
103
|
self.__adjoint_mapping: Callable[[Any], Any]
|
|
95
104
|
self.__dual_mapping: Callable[[Any], Any]
|
|
96
105
|
|
|
@@ -249,83 +258,107 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
249
258
|
def from_matrix(
|
|
250
259
|
domain: HilbertSpace,
|
|
251
260
|
codomain: HilbertSpace,
|
|
252
|
-
matrix: Union[np.ndarray, ScipyLinOp],
|
|
261
|
+
matrix: Union[np.ndarray, sp.sparray, ScipyLinOp],
|
|
253
262
|
/,
|
|
254
263
|
*,
|
|
255
264
|
galerkin: bool = False,
|
|
256
|
-
) ->
|
|
265
|
+
) -> MatrixLinearOperator:
|
|
257
266
|
"""
|
|
258
|
-
Creates
|
|
267
|
+
Creates the most appropriate LinearOperator from a matrix representation.
|
|
259
268
|
|
|
260
|
-
This factory
|
|
261
|
-
|
|
269
|
+
This factory method acts as a dispatcher, inspecting the type of the
|
|
270
|
+
input matrix and returning the most specialized and optimized operator
|
|
271
|
+
subclass (e.g., Dense, Sparse, or DiagonalSparse). It also handles
|
|
272
|
+
matrix-free `scipy.sparse.linalg.LinearOperator` objects.
|
|
262
273
|
|
|
263
274
|
Args:
|
|
264
275
|
domain: The operator's domain space.
|
|
265
276
|
codomain: The operator's codomain space.
|
|
266
|
-
matrix: The matrix representation (NumPy
|
|
267
|
-
|
|
268
|
-
galerkin: If `True`, the matrix is interpreted in
|
|
269
|
-
or Galerkin representation (`M_ij = <basis_j, A(basis_i)>`),
|
|
270
|
-
which maps a vector's components to the components of its
|
|
271
|
-
*dual*. This is crucial as it ensures a self-adjoint
|
|
272
|
-
operator is represented by a symmetric matrix. If `False`
|
|
273
|
-
(default), it's a standard component-to-component map.
|
|
277
|
+
matrix: The matrix representation (NumPy ndarray, SciPy sparray,
|
|
278
|
+
or SciPy LinearOperator).
|
|
279
|
+
galerkin: If `True`, the matrix is interpreted in Galerkin form.
|
|
274
280
|
|
|
275
281
|
Returns:
|
|
276
|
-
|
|
282
|
+
An instance of the most appropriate MatrixLinearOperator subclass.
|
|
277
283
|
"""
|
|
284
|
+
# The order of these checks is important: from most specific to most general.
|
|
278
285
|
|
|
279
|
-
|
|
286
|
+
# 1. Check for the most specific diagonal-sparse format
|
|
287
|
+
if isinstance(matrix, sp.dia_array):
|
|
288
|
+
diagonals_tuple = (matrix.data, matrix.offsets)
|
|
289
|
+
return DiagonalSparseMatrixLinearOperator(
|
|
290
|
+
domain, codomain, diagonals_tuple, galerkin=galerkin
|
|
291
|
+
)
|
|
280
292
|
|
|
281
|
-
|
|
293
|
+
# 2. Check for any other modern sparse format
|
|
294
|
+
elif isinstance(matrix, sp.sparray):
|
|
295
|
+
return SparseMatrixLinearOperator(
|
|
296
|
+
domain, codomain, matrix, galerkin=galerkin
|
|
297
|
+
)
|
|
282
298
|
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
299
|
+
# 3. Check for a dense NumPy array
|
|
300
|
+
elif isinstance(matrix, np.ndarray):
|
|
301
|
+
return DenseMatrixLinearOperator(
|
|
302
|
+
domain, codomain, matrix, galerkin=galerkin
|
|
303
|
+
)
|
|
288
304
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
return domain.from_dual(xp)
|
|
305
|
+
# 4. Check for a matrix-free SciPy LinearOperator
|
|
306
|
+
elif isinstance(matrix, ScipyLinOp):
|
|
307
|
+
# This is matrix-free, so the general MatrixLinearOperator is the correct wrapper.
|
|
308
|
+
return MatrixLinearOperator(domain, codomain, matrix, galerkin=galerkin)
|
|
294
309
|
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
310
|
+
# 5. Handle legacy sparse matrix formats (optional but robust)
|
|
311
|
+
elif sp.issparse(matrix):
|
|
312
|
+
modern_array = sp.csr_array(matrix)
|
|
313
|
+
return SparseMatrixLinearOperator(
|
|
314
|
+
domain, codomain, modern_array, galerkin=galerkin
|
|
300
315
|
)
|
|
301
316
|
|
|
317
|
+
# 6. Raise an error for unsupported types
|
|
302
318
|
else:
|
|
303
|
-
|
|
304
|
-
def mapping(x: Any) -> Any:
|
|
305
|
-
cx = domain.to_components(x)
|
|
306
|
-
cy = matrix @ cx
|
|
307
|
-
return codomain.from_components(cy)
|
|
308
|
-
|
|
309
|
-
def dual_mapping(yp: Any) -> Any:
|
|
310
|
-
cyp = codomain.dual.to_components(yp)
|
|
311
|
-
cxp = matrix.T @ cyp
|
|
312
|
-
return domain.dual.from_components(cxp)
|
|
313
|
-
|
|
314
|
-
return LinearOperator(domain, codomain, mapping, dual_mapping=dual_mapping)
|
|
319
|
+
raise TypeError(f"Unsupported matrix type: {type(matrix)}")
|
|
315
320
|
|
|
316
321
|
@staticmethod
|
|
317
322
|
def self_adjoint_from_matrix(
|
|
318
|
-
domain: HilbertSpace,
|
|
319
|
-
|
|
320
|
-
|
|
323
|
+
domain: HilbertSpace,
|
|
324
|
+
matrix: Union[np.ndarray, sp.sparray, ScipyLinOp],
|
|
325
|
+
) -> MatrixLinearOperator:
|
|
326
|
+
"""
|
|
327
|
+
Creates the most appropriate self-adjoint LinearOperator from a matrix.
|
|
321
328
|
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
cyp = matrix @ cx
|
|
325
|
-
yp = domain.dual.from_components(cyp)
|
|
326
|
-
return domain.from_dual(yp)
|
|
329
|
+
This factory acts as a dispatcher, returning the most specialized
|
|
330
|
+
subclass for the given matrix type (e.g., Dense, Sparse).
|
|
327
331
|
|
|
328
|
-
|
|
332
|
+
It ALWAYS assumes the provided matrix is the **Galerkin** representation
|
|
333
|
+
of the operator. The user is responsible for ensuring the input matrix
|
|
334
|
+
is symmetric (or self-adjoint for ScipyLinOp).
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
domain: The operator's domain and codomain space.
|
|
338
|
+
matrix: The symmetric matrix representation.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
An instance of the most appropriate MatrixLinearOperator subclass.
|
|
342
|
+
"""
|
|
343
|
+
# Dispatch to the appropriate subclass, always with galerkin=True
|
|
344
|
+
if isinstance(matrix, sp.dia_array):
|
|
345
|
+
diagonals_tuple = (matrix.data, matrix.offsets)
|
|
346
|
+
return DiagonalSparseMatrixLinearOperator(
|
|
347
|
+
domain, domain, diagonals_tuple, galerkin=True
|
|
348
|
+
)
|
|
349
|
+
elif isinstance(matrix, sp.sparray):
|
|
350
|
+
return SparseMatrixLinearOperator(domain, domain, matrix, galerkin=True)
|
|
351
|
+
elif isinstance(matrix, np.ndarray):
|
|
352
|
+
return DenseMatrixLinearOperator(domain, domain, matrix, galerkin=True)
|
|
353
|
+
elif isinstance(matrix, ScipyLinOp):
|
|
354
|
+
return MatrixLinearOperator(domain, domain, matrix, galerkin=True)
|
|
355
|
+
elif sp.issparse(matrix):
|
|
356
|
+
modern_array = sp.csr_array(matrix)
|
|
357
|
+
return SparseMatrixLinearOperator(
|
|
358
|
+
domain, domain, modern_array, galerkin=True
|
|
359
|
+
)
|
|
360
|
+
else:
|
|
361
|
+
raise TypeError(f"Unsupported matrix type: {type(matrix)}")
|
|
329
362
|
|
|
330
363
|
@staticmethod
|
|
331
364
|
def from_tensor_product(
|
|
@@ -413,11 +446,6 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
413
446
|
else:
|
|
414
447
|
return self._adjoint_base
|
|
415
448
|
|
|
416
|
-
@property
|
|
417
|
-
def thread_safe(self) -> bool:
|
|
418
|
-
"""True if the operator's mapping is thread-safe."""
|
|
419
|
-
return self._thread_safe
|
|
420
|
-
|
|
421
449
|
def matrix(
|
|
422
450
|
self,
|
|
423
451
|
/,
|
|
@@ -479,22 +507,34 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
479
507
|
return self.domain.dual.to_components(xp)
|
|
480
508
|
|
|
481
509
|
def matmat(xmat: np.ndarray) -> np.ndarray:
|
|
482
|
-
|
|
483
|
-
assert
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
510
|
+
_n, k = xmat.shape
|
|
511
|
+
assert _n == self.domain.dim
|
|
512
|
+
|
|
513
|
+
if not parallel:
|
|
514
|
+
ymat = np.zeros((self.codomain.dim, k))
|
|
515
|
+
for j in range(k):
|
|
516
|
+
ymat[:, j] = matvec(xmat[:, j])
|
|
517
|
+
return ymat
|
|
518
|
+
else:
|
|
519
|
+
result_cols = Parallel(n_jobs=n_jobs)(
|
|
520
|
+
delayed(matvec)(xmat[:, j]) for j in range(k)
|
|
521
|
+
)
|
|
522
|
+
return np.column_stack(result_cols)
|
|
489
523
|
|
|
490
524
|
def rmatmat(ymat: np.ndarray) -> np.ndarray:
|
|
491
|
-
|
|
492
|
-
assert
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
525
|
+
_m, k = ymat.shape
|
|
526
|
+
assert _m == self.codomain.dim
|
|
527
|
+
|
|
528
|
+
if not parallel:
|
|
529
|
+
xmat = np.zeros((self.domain.dim, k))
|
|
530
|
+
for j in range(k):
|
|
531
|
+
xmat[:, j] = rmatvec(ymat[:, j])
|
|
532
|
+
return xmat
|
|
533
|
+
else:
|
|
534
|
+
result_cols = Parallel(n_jobs=n_jobs)(
|
|
535
|
+
delayed(rmatvec)(ymat[:, j]) for j in range(k)
|
|
536
|
+
)
|
|
537
|
+
return np.column_stack(result_cols)
|
|
498
538
|
|
|
499
539
|
return ScipyLinOp(
|
|
500
540
|
(self.codomain.dim, self.domain.dim),
|
|
@@ -504,6 +544,124 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
504
544
|
rmatmat=rmatmat,
|
|
505
545
|
)
|
|
506
546
|
|
|
547
|
+
def extract_diagonal(
|
|
548
|
+
self,
|
|
549
|
+
/,
|
|
550
|
+
*,
|
|
551
|
+
galerkin: bool = False,
|
|
552
|
+
parallel: bool = False,
|
|
553
|
+
n_jobs: int = -1,
|
|
554
|
+
) -> np.ndarray:
|
|
555
|
+
"""
|
|
556
|
+
Computes the main diagonal of the operator's matrix representation.
|
|
557
|
+
|
|
558
|
+
This method is highly parallelizable and memory-efficient, as it
|
|
559
|
+
avoids forming the full dense matrix.
|
|
560
|
+
|
|
561
|
+
Args:
|
|
562
|
+
galerkin: If True, computes the diagonal of the Galerkin matrix.
|
|
563
|
+
parallel: If True, computes the entries in parallel.
|
|
564
|
+
n_jobs: Number of parallel jobs to use.
|
|
565
|
+
|
|
566
|
+
Returns:
|
|
567
|
+
A NumPy array containing the diagonal entries.
|
|
568
|
+
"""
|
|
569
|
+
|
|
570
|
+
dim = min(self.domain.dim, self.codomain.dim)
|
|
571
|
+
jobs = n_jobs if parallel else 1
|
|
572
|
+
|
|
573
|
+
def compute_entry(i: int) -> float:
|
|
574
|
+
"""Worker function to compute a single diagonal entry."""
|
|
575
|
+
e_i = self.domain.basis_vector(i)
|
|
576
|
+
L_e_i = self(e_i)
|
|
577
|
+
|
|
578
|
+
if galerkin:
|
|
579
|
+
return self.domain.inner_product(e_i, L_e_i)
|
|
580
|
+
else:
|
|
581
|
+
return self.codomain.to_components(L_e_i)[i]
|
|
582
|
+
|
|
583
|
+
diagonal_entries = Parallel(n_jobs=jobs)(
|
|
584
|
+
delayed(compute_entry)(i) for i in range(dim)
|
|
585
|
+
)
|
|
586
|
+
return np.array(diagonal_entries)
|
|
587
|
+
|
|
588
|
+
def extract_diagonals(
|
|
589
|
+
self,
|
|
590
|
+
offsets: List[int],
|
|
591
|
+
/,
|
|
592
|
+
*,
|
|
593
|
+
galerkin: bool = False,
|
|
594
|
+
parallel: bool = False,
|
|
595
|
+
n_jobs: int = -1,
|
|
596
|
+
) -> Tuple[np.ndarray, List[int]]:
|
|
597
|
+
"""
|
|
598
|
+
Computes specified diagonals of the operator's matrix representation.
|
|
599
|
+
|
|
600
|
+
This is a memory-efficient and parallelizable method that computes
|
|
601
|
+
the matrix one column at a time.
|
|
602
|
+
|
|
603
|
+
Args:
|
|
604
|
+
offsets: A list of diagonal offsets to extract (e.g., [0] for
|
|
605
|
+
the main diagonal, [-1, 0, 1] for a tridiagonal matrix).
|
|
606
|
+
galerkin: If True, computes the diagonals of the Galerkin matrix.
|
|
607
|
+
parallel: If True, computes columns in parallel.
|
|
608
|
+
n_jobs: Number of parallel jobs to use.
|
|
609
|
+
|
|
610
|
+
Returns:
|
|
611
|
+
A tuple containing:
|
|
612
|
+
- A NumPy array where each row is a diagonal.
|
|
613
|
+
- The list of offsets.
|
|
614
|
+
This format is compatible with scipy.sparse.spdiags.
|
|
615
|
+
"""
|
|
616
|
+
dim = min(self.domain.dim, self.codomain.dim)
|
|
617
|
+
jobs = n_jobs if parallel else 1
|
|
618
|
+
|
|
619
|
+
# Prepare a thread-safe dictionary to store results
|
|
620
|
+
|
|
621
|
+
results: Dict[int, Dict[int, float]] = defaultdict(dict)
|
|
622
|
+
|
|
623
|
+
def compute_column_entries(j: int) -> Dict[int, Dict[int, float]]:
|
|
624
|
+
"""
|
|
625
|
+
Worker function to compute all needed entries for column j.
|
|
626
|
+
"""
|
|
627
|
+
e_j = self.domain.basis_vector(j)
|
|
628
|
+
L_e_j = self(e_j)
|
|
629
|
+
|
|
630
|
+
col_results = defaultdict(dict)
|
|
631
|
+
|
|
632
|
+
for k in offsets:
|
|
633
|
+
i = j - k
|
|
634
|
+
if 0 <= i < dim:
|
|
635
|
+
if galerkin:
|
|
636
|
+
e_i = self.domain.basis_vector(i)
|
|
637
|
+
val = self.domain.inner_product(e_i, L_e_j)
|
|
638
|
+
else:
|
|
639
|
+
val = self.codomain.to_components(L_e_j)[i]
|
|
640
|
+
col_results[k][i] = val
|
|
641
|
+
return col_results
|
|
642
|
+
|
|
643
|
+
# Run the computation in parallel
|
|
644
|
+
column_data = Parallel(n_jobs=jobs)(
|
|
645
|
+
delayed(compute_column_entries)(j) for j in range(dim)
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
# Aggregate results from the parallel computation
|
|
649
|
+
for col_dict in column_data:
|
|
650
|
+
for k, entries in col_dict.items():
|
|
651
|
+
results[k].update(entries)
|
|
652
|
+
|
|
653
|
+
# Format the results for spdiags
|
|
654
|
+
# The array must have padding for shorter off-diagonals.
|
|
655
|
+
diagonals_array = np.zeros((len(offsets), dim))
|
|
656
|
+
for idx, k in enumerate(offsets):
|
|
657
|
+
diag_entries = results[k]
|
|
658
|
+
for i, val in diag_entries.items():
|
|
659
|
+
j = i + k
|
|
660
|
+
if 0 <= j < dim:
|
|
661
|
+
diagonals_array[idx, j] = val
|
|
662
|
+
|
|
663
|
+
return diagonals_array, offsets
|
|
664
|
+
|
|
507
665
|
def random_svd(
|
|
508
666
|
self,
|
|
509
667
|
size_estimate: int,
|
|
@@ -517,7 +675,11 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
517
675
|
block_size: int = 10,
|
|
518
676
|
parallel: bool = False,
|
|
519
677
|
n_jobs: int = -1,
|
|
520
|
-
) -> Tuple[
|
|
678
|
+
) -> Tuple[
|
|
679
|
+
DenseMatrixLinearOperator,
|
|
680
|
+
DiagonalSparseMatrixLinearOperator,
|
|
681
|
+
DenseMatrixLinearOperator,
|
|
682
|
+
]:
|
|
521
683
|
"""
|
|
522
684
|
Computes an approximate SVD using a randomized algorithm.
|
|
523
685
|
|
|
@@ -540,9 +702,9 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
540
702
|
n_jobs: Number of jobs for parallelism.
|
|
541
703
|
|
|
542
704
|
Returns:
|
|
543
|
-
left (
|
|
544
|
-
singular_values (
|
|
545
|
-
right (
|
|
705
|
+
left (DenseMatrixLinearOperator): The left singular vector matrix.
|
|
706
|
+
singular_values (DiagonalSparseMatrixLinearOperator): The singular values.
|
|
707
|
+
right (DenseMatrixLinearOperator): The right singular vector matrix.
|
|
546
708
|
|
|
547
709
|
Notes:
|
|
548
710
|
The right factor is in transposed form. This means the original
|
|
@@ -572,7 +734,9 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
572
734
|
)
|
|
573
735
|
|
|
574
736
|
euclidean = EuclideanSpace(qr_factor.shape[1])
|
|
575
|
-
diagonal =
|
|
737
|
+
diagonal = DiagonalSparseMatrixLinearOperator.from_diagonal_values(
|
|
738
|
+
euclidean, euclidean, singular_values
|
|
739
|
+
)
|
|
576
740
|
|
|
577
741
|
if galerkin:
|
|
578
742
|
right = LinearOperator.from_matrix(
|
|
@@ -603,7 +767,7 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
603
767
|
block_size: int = 10,
|
|
604
768
|
parallel: bool = False,
|
|
605
769
|
n_jobs: int = -1,
|
|
606
|
-
) -> Tuple[
|
|
770
|
+
) -> Tuple[DenseMatrixLinearOperator, DiagonalSparseMatrixLinearOperator]:
|
|
607
771
|
"""
|
|
608
772
|
Computes an approximate eigen-decomposition using a randomized algorithm.
|
|
609
773
|
|
|
@@ -625,8 +789,8 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
625
789
|
n_jobs: Number of jobs for parallelism.
|
|
626
790
|
|
|
627
791
|
Returns:
|
|
628
|
-
expansion (
|
|
629
|
-
eigenvaluevalues (
|
|
792
|
+
expansion (DenseMatrixLinearOperator): Mapping from coefficients in eigen-basis to vectors.
|
|
793
|
+
eigenvaluevalues (DiagonalSparseMatrixLinearOperator): The eigenvalues values.
|
|
630
794
|
|
|
631
795
|
"""
|
|
632
796
|
from .hilbert_space import EuclideanSpace
|
|
@@ -650,7 +814,9 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
650
814
|
|
|
651
815
|
eigenvectors, eigenvalues = rm_eig(matrix, qr_factor)
|
|
652
816
|
euclidean = EuclideanSpace(qr_factor.shape[1])
|
|
653
|
-
diagonal =
|
|
817
|
+
diagonal = DiagonalSparseMatrixLinearOperator.from_diagonal_values(
|
|
818
|
+
euclidean, euclidean, eigenvalues
|
|
819
|
+
)
|
|
654
820
|
|
|
655
821
|
expansion = LinearOperator.from_matrix(
|
|
656
822
|
euclidean, self.domain, eigenvectors, galerkin=True
|
|
@@ -670,7 +836,7 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
670
836
|
block_size: int = 10,
|
|
671
837
|
parallel: bool = False,
|
|
672
838
|
n_jobs: int = -1,
|
|
673
|
-
) ->
|
|
839
|
+
) -> DenseMatrixLinearOperator:
|
|
674
840
|
"""
|
|
675
841
|
Computes an approximate Cholesky decomposition for a positive-definite
|
|
676
842
|
self-adjoint operator using a randomized algorithm.
|
|
@@ -693,7 +859,7 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
693
859
|
n_jobs: Number of jobs for parallelism.
|
|
694
860
|
|
|
695
861
|
Returns:
|
|
696
|
-
factor (
|
|
862
|
+
factor (DenseMatrixLinearOperator): A linear operator from a Euclidean space
|
|
697
863
|
into the domain of the operator.
|
|
698
864
|
|
|
699
865
|
Notes:
|
|
@@ -915,85 +1081,743 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
|
|
|
915
1081
|
return self.matrix(dense=True).__str__()
|
|
916
1082
|
|
|
917
1083
|
|
|
918
|
-
class
|
|
919
|
-
"""
|
|
1084
|
+
class MatrixLinearOperator(LinearOperator):
|
|
1085
|
+
"""
|
|
1086
|
+
A sub-class of LinearOperator for which the operator's action is
|
|
1087
|
+
defined internally through its matrix representation.
|
|
1088
|
+
|
|
1089
|
+
This matrix can be either a dense numpy matrix or a
|
|
1090
|
+
scipy LinearOperator.
|
|
1091
|
+
"""
|
|
1092
|
+
|
|
1093
|
+
def __init__(
|
|
1094
|
+
self,
|
|
1095
|
+
domain: HilbertSpace,
|
|
1096
|
+
codomain: HilbertSpace,
|
|
1097
|
+
matrix: Union[np.ndarray, ScipyLinOp],
|
|
1098
|
+
/,
|
|
1099
|
+
*,
|
|
1100
|
+
galerkin=False,
|
|
1101
|
+
):
|
|
1102
|
+
"""
|
|
1103
|
+
Args:
|
|
1104
|
+
domain: The domain of the operator.
|
|
1105
|
+
codomain: The codomain of the operator.
|
|
1106
|
+
matrix: matrix representation of the linear operator in either standard
|
|
1107
|
+
or Galerkin form.
|
|
1108
|
+
galerkin: If True, galerkin representation used. Default is false.
|
|
1109
|
+
"""
|
|
1110
|
+
assert matrix.shape == (codomain.dim, domain.dim)
|
|
1111
|
+
|
|
1112
|
+
self._matrix = matrix
|
|
1113
|
+
self._is_dense = isinstance(matrix, np.ndarray)
|
|
1114
|
+
self._galerkin = galerkin
|
|
1115
|
+
|
|
1116
|
+
if galerkin:
|
|
1117
|
+
|
|
1118
|
+
def mapping(x: Any) -> Any:
|
|
1119
|
+
cx = domain.to_components(x)
|
|
1120
|
+
cyp = matrix @ cx
|
|
1121
|
+
yp = codomain.dual.from_components(cyp)
|
|
1122
|
+
return codomain.from_dual(yp)
|
|
1123
|
+
|
|
1124
|
+
def adjoint_mapping(y: Any) -> Any:
|
|
1125
|
+
cy = codomain.to_components(y)
|
|
1126
|
+
cxp = matrix.T @ cy
|
|
1127
|
+
xp = domain.dual.from_components(cxp)
|
|
1128
|
+
return domain.from_dual(xp)
|
|
1129
|
+
|
|
1130
|
+
super().__init__(domain, codomain, mapping, adjoint_mapping=adjoint_mapping)
|
|
920
1131
|
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
1132
|
+
else:
|
|
1133
|
+
|
|
1134
|
+
def mapping(x: Any) -> Any:
|
|
1135
|
+
cx = domain.to_components(x)
|
|
1136
|
+
cy = matrix @ cx
|
|
1137
|
+
return codomain.from_components(cy)
|
|
1138
|
+
|
|
1139
|
+
def dual_mapping(yp: Any) -> Any:
|
|
1140
|
+
cyp = codomain.dual.to_components(yp)
|
|
1141
|
+
cxp = matrix.T @ cyp
|
|
1142
|
+
return domain.dual.from_components(cxp)
|
|
1143
|
+
|
|
1144
|
+
super().__init__(domain, codomain, mapping, dual_mapping=dual_mapping)
|
|
1145
|
+
|
|
1146
|
+
@property
|
|
1147
|
+
def is_dense(self) -> bool:
|
|
1148
|
+
"""
|
|
1149
|
+
Returns True if the matrix representation is stored internally in dense form.
|
|
1150
|
+
"""
|
|
1151
|
+
return self._is_dense
|
|
1152
|
+
|
|
1153
|
+
@property
|
|
1154
|
+
def is_galerkin(self) -> bool:
|
|
1155
|
+
"""
|
|
1156
|
+
Returns True if the matrix representation is stored in Galerkin form.
|
|
1157
|
+
"""
|
|
1158
|
+
return self._galerkin
|
|
1159
|
+
|
|
1160
|
+
def _compute_dense_matrix(
|
|
1161
|
+
self, galerkin: bool, parallel: bool, n_jobs: int
|
|
1162
|
+
) -> np.ndarray:
|
|
1163
|
+
"""
|
|
1164
|
+
Overloaded method to efficiently compute the dense matrix.
|
|
1165
|
+
"""
|
|
1166
|
+
|
|
1167
|
+
if galerkin == self.is_galerkin and self.is_dense:
|
|
1168
|
+
return self._matrix
|
|
1169
|
+
else:
|
|
1170
|
+
return super()._compute_dense_matrix(galerkin, parallel, n_jobs)
|
|
924
1171
|
|
|
925
|
-
|
|
1172
|
+
def extract_diagonal(
|
|
1173
|
+
self,
|
|
1174
|
+
/,
|
|
1175
|
+
*,
|
|
1176
|
+
galerkin: bool = False,
|
|
1177
|
+
parallel: bool = False,
|
|
1178
|
+
n_jobs: int = -1,
|
|
1179
|
+
) -> np.ndarray:
|
|
1180
|
+
"""
|
|
1181
|
+
Overload for efficiency.
|
|
1182
|
+
"""
|
|
1183
|
+
|
|
1184
|
+
if galerkin == self.is_galerkin and self.is_dense:
|
|
1185
|
+
return self._matrix.diagonal()
|
|
1186
|
+
else:
|
|
1187
|
+
return super().extract_diagonal(
|
|
1188
|
+
galerkin=galerkin, parallel=parallel, n_jobs=n_jobs
|
|
1189
|
+
)
|
|
1190
|
+
|
|
1191
|
+
def extract_diagonals(
|
|
1192
|
+
self,
|
|
1193
|
+
offsets: List[int],
|
|
1194
|
+
/,
|
|
1195
|
+
*,
|
|
1196
|
+
galerkin: bool = False,
|
|
1197
|
+
parallel: bool = False,
|
|
1198
|
+
n_jobs: int = -1,
|
|
1199
|
+
) -> Tuple[np.ndarray, List[int]]:
|
|
1200
|
+
"""
|
|
1201
|
+
Overrides the base method for efficiency by extracting diagonals directly
|
|
1202
|
+
from the stored dense matrix when possible.
|
|
1203
|
+
"""
|
|
1204
|
+
|
|
1205
|
+
if self.is_dense and galerkin == self.is_galerkin:
|
|
1206
|
+
dim = self.domain.dim
|
|
1207
|
+
|
|
1208
|
+
diagonals_array = np.zeros((len(offsets), dim))
|
|
1209
|
+
|
|
1210
|
+
for i, k in enumerate(offsets):
|
|
1211
|
+
diag_k = np.diag(self._matrix, k=k)
|
|
1212
|
+
|
|
1213
|
+
if k >= 0:
|
|
1214
|
+
diagonals_array[i, k : k + len(diag_k)] = diag_k
|
|
1215
|
+
else:
|
|
1216
|
+
diagonals_array[i, : len(diag_k)] = diag_k
|
|
1217
|
+
|
|
1218
|
+
return diagonals_array, offsets
|
|
1219
|
+
|
|
1220
|
+
else:
|
|
1221
|
+
return super().extract_diagonals(
|
|
1222
|
+
offsets, galerkin=galerkin, parallel=parallel, n_jobs=n_jobs
|
|
1223
|
+
)
|
|
1224
|
+
|
|
1225
|
+
|
|
1226
|
+
class DenseMatrixLinearOperator(MatrixLinearOperator):
|
|
1227
|
+
"""
|
|
1228
|
+
A specialisation of the MatrixLinearOperator class to instances where
|
|
1229
|
+
the matrix representation is always provided as a numpy array.
|
|
1230
|
+
|
|
1231
|
+
This is a class provides some additional methods for component-wise access.
|
|
926
1232
|
"""
|
|
927
1233
|
|
|
928
1234
|
def __init__(
|
|
929
1235
|
self,
|
|
930
1236
|
domain: HilbertSpace,
|
|
931
1237
|
codomain: HilbertSpace,
|
|
932
|
-
|
|
1238
|
+
matrix: np.ndarray,
|
|
1239
|
+
/,
|
|
1240
|
+
*,
|
|
1241
|
+
galerkin=False,
|
|
1242
|
+
):
|
|
1243
|
+
"""
|
|
1244
|
+
domain: The domain of the operator.
|
|
1245
|
+
codomain: The codomain of the operator.
|
|
1246
|
+
matrix: matrix representation of the linear operator in either standard
|
|
1247
|
+
or Galerkin form.
|
|
1248
|
+
galerkin: If True, galerkin representation used. Default is false.
|
|
1249
|
+
"""
|
|
1250
|
+
|
|
1251
|
+
if not isinstance(matrix, np.ndarray):
|
|
1252
|
+
raise ValueError("Matrix must be input in dense form.")
|
|
1253
|
+
|
|
1254
|
+
super().__init__(domain, codomain, matrix, galerkin=galerkin)
|
|
1255
|
+
|
|
1256
|
+
@staticmethod
|
|
1257
|
+
def from_linear_operator(
|
|
1258
|
+
operator: LinearOperator,
|
|
933
1259
|
/,
|
|
934
1260
|
*,
|
|
935
1261
|
galerkin: bool = False,
|
|
936
|
-
|
|
1262
|
+
parallel: bool = False,
|
|
1263
|
+
n_jobs: int = -1,
|
|
1264
|
+
) -> DenseMatrixLinearOperator:
|
|
937
1265
|
"""
|
|
938
|
-
|
|
1266
|
+
Converts a LinearOperator into a DenseMatrixLinearOperator by forming its dense matrix representation.
|
|
939
1267
|
|
|
940
1268
|
Args:
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
galerkin (bool): If True, use the Galerkin representation.
|
|
1269
|
+
operator: The operator to be converted.
|
|
1270
|
+
galerkin: If True, the Galerkin representation is used. Default is False.
|
|
1271
|
+
parallel: If True, dense matrix calculation is done in parallel. Default is False.
|
|
1272
|
+
n_jobs: Number of jobs used for parallel calculations. Default is False.
|
|
946
1273
|
"""
|
|
947
1274
|
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
1275
|
+
if isinstance(operator, DenseMatrixLinearOperator):
|
|
1276
|
+
return operator
|
|
1277
|
+
|
|
1278
|
+
domain = operator.domain
|
|
1279
|
+
codomain = operator.codomain
|
|
1280
|
+
|
|
1281
|
+
matrix = operator.matrix(
|
|
1282
|
+
dense=True, galerkin=galerkin, parallel=parallel, n_jobs=n_jobs
|
|
954
1283
|
)
|
|
955
|
-
|
|
1284
|
+
|
|
1285
|
+
return DenseMatrixLinearOperator(domain, codomain, matrix, galerkin=galerkin)
|
|
1286
|
+
|
|
1287
|
+
def __getitem__(self, key: tuple[int, int] | int | slice) -> float | np.ndarray:
|
|
1288
|
+
"""
|
|
1289
|
+
Provides direct, component-wise access to the underlying matrix.
|
|
1290
|
+
|
|
1291
|
+
This allows for intuitive slicing and indexing, like `op[i, j]` or `op[0, :]`.
|
|
1292
|
+
Note: The access is on the stored matrix, which may be in either
|
|
1293
|
+
standard or Galerkin form depending on how the operator was initialized.
|
|
1294
|
+
"""
|
|
1295
|
+
return self._matrix[key]
|
|
1296
|
+
|
|
1297
|
+
|
|
1298
|
+
class SparseMatrixLinearOperator(MatrixLinearOperator):
|
|
1299
|
+
"""
|
|
1300
|
+
A specialization for operators represented by a modern SciPy sparse array.
|
|
1301
|
+
|
|
1302
|
+
This class requires a `scipy.sparse.sparray` object (e.g., csr_array)
|
|
1303
|
+
and provides optimized methods that delegate to efficient SciPy routines.
|
|
1304
|
+
|
|
1305
|
+
Upon initialization, the internal array is converted to the CSR
|
|
1306
|
+
(Compressed Sparse Row) format to ensure consistently fast matrix-vector
|
|
1307
|
+
products and row-slicing operations.
|
|
1308
|
+
"""
|
|
1309
|
+
|
|
1310
|
+
def __init__(
|
|
1311
|
+
self,
|
|
1312
|
+
domain: HilbertSpace,
|
|
1313
|
+
codomain: HilbertSpace,
|
|
1314
|
+
matrix: sp.sparray,
|
|
1315
|
+
/,
|
|
1316
|
+
*,
|
|
1317
|
+
galerkin: bool = False,
|
|
1318
|
+
):
|
|
1319
|
+
"""
|
|
1320
|
+
Args:
|
|
1321
|
+
domain: The domain of the operator.
|
|
1322
|
+
codomain: The codomain of the operator.
|
|
1323
|
+
matrix: The sparse array representation of the linear operator.
|
|
1324
|
+
Must be a modern sparray object (e.g., csr_array).
|
|
1325
|
+
galerkin: If True, the matrix is in Galerkin form. Defaults to False.
|
|
1326
|
+
"""
|
|
1327
|
+
# Strict check for the modern sparse array type
|
|
1328
|
+
if not isinstance(matrix, sp.sparray):
|
|
1329
|
+
raise TypeError(
|
|
1330
|
+
"Matrix must be a modern SciPy sparray object (e.g., csr_array)."
|
|
1331
|
+
)
|
|
1332
|
+
|
|
1333
|
+
super().__init__(domain, codomain, matrix, galerkin=galerkin)
|
|
1334
|
+
self._matrix = self._matrix.asformat("csr")
|
|
1335
|
+
|
|
1336
|
+
def __getitem__(self, key):
|
|
1337
|
+
"""Provides direct component access using SciPy's sparse indexing."""
|
|
1338
|
+
return self._matrix[key]
|
|
1339
|
+
|
|
1340
|
+
def _compute_dense_matrix(
|
|
1341
|
+
self, galerkin: bool, parallel: bool, n_jobs: int
|
|
1342
|
+
) -> np.ndarray:
|
|
1343
|
+
"""
|
|
1344
|
+
Overrides the base method to efficiently compute the dense matrix.
|
|
1345
|
+
"""
|
|
1346
|
+
# ⚡️ Fast path: Use the highly optimized .toarray() method.
|
|
1347
|
+
if galerkin == self.is_galerkin:
|
|
1348
|
+
return self._matrix.toarray()
|
|
1349
|
+
|
|
1350
|
+
# Fallback path for when a basis conversion is needed.
|
|
1351
|
+
else:
|
|
1352
|
+
return super()._compute_dense_matrix(galerkin, parallel, n_jobs)
|
|
1353
|
+
|
|
1354
|
+
def extract_diagonal(
|
|
1355
|
+
self,
|
|
1356
|
+
/,
|
|
1357
|
+
*,
|
|
1358
|
+
galerkin: bool = False,
|
|
1359
|
+
parallel: bool = False,
|
|
1360
|
+
n_jobs: int = -1,
|
|
1361
|
+
) -> np.ndarray:
|
|
1362
|
+
"""
|
|
1363
|
+
Overrides the base method to efficiently extract the main diagonal.
|
|
1364
|
+
"""
|
|
1365
|
+
if galerkin == self.is_galerkin:
|
|
1366
|
+
return self._matrix.diagonal(k=0)
|
|
1367
|
+
else:
|
|
1368
|
+
return super().extract_diagonal(
|
|
1369
|
+
galerkin=galerkin, parallel=parallel, n_jobs=n_jobs
|
|
1370
|
+
)
|
|
1371
|
+
|
|
1372
|
+
def extract_diagonals(
|
|
1373
|
+
self,
|
|
1374
|
+
offsets: List[int],
|
|
1375
|
+
/,
|
|
1376
|
+
*,
|
|
1377
|
+
galerkin: bool = False,
|
|
1378
|
+
parallel: bool = False,
|
|
1379
|
+
n_jobs: int = -1,
|
|
1380
|
+
) -> Tuple[np.ndarray, List[int]]:
|
|
1381
|
+
"""
|
|
1382
|
+
Overrides the base method for efficiency by extracting diagonals
|
|
1383
|
+
directly from the stored sparse array.
|
|
1384
|
+
"""
|
|
1385
|
+
if galerkin != self.is_galerkin:
|
|
1386
|
+
return super().extract_diagonals(
|
|
1387
|
+
offsets, galerkin=galerkin, parallel=parallel, n_jobs=n_jobs
|
|
1388
|
+
)
|
|
1389
|
+
|
|
1390
|
+
dim = self.domain.dim
|
|
1391
|
+
diagonals_array = np.zeros((len(offsets), dim))
|
|
1392
|
+
|
|
1393
|
+
for i, k in enumerate(offsets):
|
|
1394
|
+
# Use the sparse array's fast .diagonal() method
|
|
1395
|
+
diag_k = self._matrix.diagonal(k=k)
|
|
1396
|
+
|
|
1397
|
+
# Place the raw diagonal into the padded output array
|
|
1398
|
+
if k >= 0:
|
|
1399
|
+
diagonals_array[i, k : k + len(diag_k)] = diag_k
|
|
1400
|
+
else:
|
|
1401
|
+
diagonals_array[i, : len(diag_k)] = diag_k
|
|
1402
|
+
|
|
1403
|
+
return diagonals_array, offsets
|
|
1404
|
+
|
|
1405
|
+
|
|
1406
|
+
class DiagonalSparseMatrixLinearOperator(SparseMatrixLinearOperator):
|
|
1407
|
+
"""
|
|
1408
|
+
A highly specialized operator for matrices defined purely by a set of
|
|
1409
|
+
non-zero diagonals.
|
|
1410
|
+
|
|
1411
|
+
This class internally stores the operator using a `scipy.sparse.dia_array`
|
|
1412
|
+
for maximum efficiency in storage and matrix-vector products. It provides
|
|
1413
|
+
extremely fast methods for extracting diagonals, as this is its native
|
|
1414
|
+
storage format.
|
|
1415
|
+
|
|
1416
|
+
A key feature of this class is its support for **functional calculus**. It
|
|
1417
|
+
dynamically proxies element-wise mathematical functions (e.g., `.sqrt()`,
|
|
1418
|
+
`.log()`, `abs()`, `**`) to the underlying sparse array. For reasons of
|
|
1419
|
+
mathematical correctness, these operations are restricted to operators that
|
|
1420
|
+
are **strictly diagonal** (i.e., have only a non-zero main diagonal) and
|
|
1421
|
+
will raise a `NotImplementedError` otherwise.
|
|
1422
|
+
|
|
1423
|
+
Aggregation methods that do not return a new operator (e.g., `.sum()`)
|
|
1424
|
+
are not restricted and can be used on any multi-diagonal operator.
|
|
1425
|
+
|
|
1426
|
+
Class Methods
|
|
1427
|
+
-------------
|
|
1428
|
+
from_diagonal_values:
|
|
1429
|
+
Constructs a strictly diagonal operator from a 1D array of values.
|
|
1430
|
+
from_operator:
|
|
1431
|
+
Creates a diagonal approximation of another LinearOperator.
|
|
1432
|
+
|
|
1433
|
+
Properties
|
|
1434
|
+
----------
|
|
1435
|
+
offsets:
|
|
1436
|
+
The array of stored diagonal offsets.
|
|
1437
|
+
is_strictly_diagonal:
|
|
1438
|
+
True if the operator only has a non-zero main diagonal.
|
|
1439
|
+
inverse:
|
|
1440
|
+
The inverse of a strictly diagonal operator.
|
|
1441
|
+
sqrt:
|
|
1442
|
+
The square root of a strictly diagonal operator.
|
|
1443
|
+
"""
|
|
1444
|
+
|
|
1445
|
+
def __init__(
|
|
1446
|
+
self,
|
|
1447
|
+
domain: HilbertSpace,
|
|
1448
|
+
codomain: HilbertSpace,
|
|
1449
|
+
diagonals: Tuple[np.ndarray, List[int]],
|
|
1450
|
+
/,
|
|
1451
|
+
*,
|
|
1452
|
+
galerkin: bool = False,
|
|
1453
|
+
):
|
|
1454
|
+
"""
|
|
1455
|
+
Args:
|
|
1456
|
+
domain: The domain of the operator.
|
|
1457
|
+
codomain: The codomain of the operator.
|
|
1458
|
+
diagonals: A tuple `(data, offsets)` where `data` is a 2D array
|
|
1459
|
+
of diagonal values and `offsets` is a list of their
|
|
1460
|
+
positions. This is the native format for a dia_array.
|
|
1461
|
+
galerkin: If True, the matrix is in Galerkin form. Defaults to False.
|
|
1462
|
+
"""
|
|
1463
|
+
shape = (codomain.dim, domain.dim)
|
|
1464
|
+
dia_array = sp.dia_array(diagonals, shape=shape)
|
|
1465
|
+
|
|
1466
|
+
MatrixLinearOperator.__init__(
|
|
1467
|
+
self, domain, codomain, dia_array, galerkin=galerkin
|
|
1468
|
+
)
|
|
1469
|
+
|
|
1470
|
+
@classmethod
|
|
1471
|
+
def from_operator(
|
|
1472
|
+
cls, operator: LinearOperator, offsets: List[int], /, *, galerkin: bool = True
|
|
1473
|
+
) -> DiagonalSparseMatrixLinearOperator:
|
|
1474
|
+
"""
|
|
1475
|
+
Creates a diagonal approximation of another LinearOperator.
|
|
1476
|
+
|
|
1477
|
+
This factory method works by calling the source operator's
|
|
1478
|
+
`.extract_diagonals()` method and using the result to construct a
|
|
1479
|
+
new, highly efficient DiagonalSparseMatrixLinearOperator.
|
|
1480
|
+
|
|
1481
|
+
Args:
|
|
1482
|
+
operator: The source operator to approximate.
|
|
1483
|
+
offsets: The list of diagonal offsets to extract and keep.
|
|
1484
|
+
galerkin: Specifies which matrix representation to use.
|
|
1485
|
+
|
|
1486
|
+
Returns:
|
|
1487
|
+
A new DiagonalSparseMatrixLinearOperator.
|
|
1488
|
+
"""
|
|
1489
|
+
diagonals_data, extracted_offsets = operator.extract_diagonals(
|
|
1490
|
+
offsets, galerkin=galerkin
|
|
1491
|
+
)
|
|
1492
|
+
return cls(
|
|
956
1493
|
operator.domain,
|
|
957
1494
|
operator.codomain,
|
|
958
|
-
|
|
959
|
-
|
|
1495
|
+
(diagonals_data, extracted_offsets),
|
|
1496
|
+
galerkin=galerkin,
|
|
960
1497
|
)
|
|
961
1498
|
|
|
962
|
-
@
|
|
963
|
-
def
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
1499
|
+
@classmethod
|
|
1500
|
+
def from_diagonal_values(
|
|
1501
|
+
cls,
|
|
1502
|
+
domain: HilbertSpace,
|
|
1503
|
+
codomain: HilbertSpace,
|
|
1504
|
+
diagonal_values: np.ndarray,
|
|
1505
|
+
/,
|
|
1506
|
+
*,
|
|
1507
|
+
galerkin: bool = False,
|
|
1508
|
+
) -> "DiagonalSparseMatrixLinearOperator":
|
|
1509
|
+
"""
|
|
1510
|
+
Constructs a purely diagonal operator from a 1D array of values.
|
|
969
1511
|
|
|
970
|
-
This
|
|
971
|
-
|
|
972
|
-
`op.function(lambda x: 1/x)` computes the inverse.
|
|
1512
|
+
This provides a convenient way to create an operator with non-zero
|
|
1513
|
+
entries only on its main diagonal (offset k=0).
|
|
973
1514
|
|
|
974
1515
|
Args:
|
|
975
|
-
|
|
1516
|
+
domain: The domain of the operator.
|
|
1517
|
+
codomain: The codomain of the operator. Must have the same dimension.
|
|
1518
|
+
diagonal_values: A 1D NumPy array of the values for the main diagonal.
|
|
1519
|
+
galerkin: If True, the operator is in Galerkin form.
|
|
976
1520
|
|
|
977
1521
|
Returns:
|
|
978
|
-
A new
|
|
1522
|
+
A new DiagonalSparseMatrixLinearOperator.
|
|
979
1523
|
"""
|
|
980
|
-
|
|
981
|
-
|
|
1524
|
+
if domain.dim != codomain.dim or domain.dim != len(diagonal_values):
|
|
1525
|
+
raise ValueError(
|
|
1526
|
+
"Domain, codomain, and diagonal_values must all have the same dimension."
|
|
1527
|
+
)
|
|
1528
|
+
|
|
1529
|
+
# Reshape the 1D array of values into the 2D `data` array format
|
|
1530
|
+
diagonals_data = diagonal_values.reshape(1, -1)
|
|
1531
|
+
offsets = [0]
|
|
1532
|
+
|
|
1533
|
+
return cls(domain, codomain, (diagonals_data, offsets), galerkin=galerkin)
|
|
982
1534
|
|
|
983
1535
|
@property
|
|
984
|
-
def
|
|
1536
|
+
def offsets(self) -> np.ndarray:
|
|
1537
|
+
"""Returns the array of stored diagonal offsets."""
|
|
1538
|
+
return self._matrix.offsets
|
|
1539
|
+
|
|
1540
|
+
@property
|
|
1541
|
+
def is_strictly_diagonal(self) -> bool:
|
|
1542
|
+
"""
|
|
1543
|
+
True if the operator only has a non-zero main diagonal (offset=0).
|
|
1544
|
+
"""
|
|
1545
|
+
return len(self.offsets) == 1 and self.offsets[0] == 0
|
|
1546
|
+
|
|
1547
|
+
@property
|
|
1548
|
+
def inverse(self) -> "DiagonalSparseMatrixLinearOperator":
|
|
985
1549
|
"""
|
|
986
1550
|
The inverse of the operator, computed via functional calculus.
|
|
987
|
-
Requires
|
|
1551
|
+
Requires the operator to be strictly diagonal with no zero entries.
|
|
988
1552
|
"""
|
|
989
|
-
|
|
990
|
-
|
|
1553
|
+
if not self.is_strictly_diagonal:
|
|
1554
|
+
raise NotImplementedError(
|
|
1555
|
+
"Inverse is only implemented for strictly diagonal operators."
|
|
1556
|
+
)
|
|
1557
|
+
|
|
1558
|
+
if np.any(self._matrix.diagonal(k=0) == 0):
|
|
1559
|
+
raise ValueError("Cannot invert an operator with zeros on the diagonal.")
|
|
1560
|
+
|
|
1561
|
+
return self**-1
|
|
991
1562
|
|
|
992
1563
|
@property
|
|
993
|
-
def sqrt(self) ->
|
|
1564
|
+
def sqrt(self) -> "DiagonalSparseMatrixLinearOperator":
|
|
994
1565
|
"""
|
|
995
1566
|
The square root of the operator, computed via functional calculus.
|
|
996
|
-
Requires
|
|
1567
|
+
Requires the operator to be strictly diagonal with non-negative entries.
|
|
1568
|
+
"""
|
|
1569
|
+
|
|
1570
|
+
if np.any(self._matrix.data < 0):
|
|
1571
|
+
raise ValueError(
|
|
1572
|
+
"Cannot take the square root of an operator with negative entries."
|
|
1573
|
+
)
|
|
1574
|
+
|
|
1575
|
+
return self.__getattr__("sqrt")()
|
|
1576
|
+
|
|
1577
|
+
def extract_diagonals(
|
|
1578
|
+
self,
|
|
1579
|
+
offsets: List[int],
|
|
1580
|
+
/,
|
|
1581
|
+
*,
|
|
1582
|
+
galerkin: bool = True,
|
|
1583
|
+
# parallel and n_jobs are ignored but kept for signature consistency
|
|
1584
|
+
parallel: bool = False,
|
|
1585
|
+
n_jobs: int = -1,
|
|
1586
|
+
) -> Tuple[np.ndarray, List[int]]:
|
|
1587
|
+
"""
|
|
1588
|
+
Overrides the base method for extreme efficiency.
|
|
1589
|
+
|
|
1590
|
+
This operation is nearly free, as it involves selecting the requested
|
|
1591
|
+
diagonals from the data already stored in the native format.
|
|
1592
|
+
"""
|
|
1593
|
+
if galerkin != self.is_galerkin:
|
|
1594
|
+
return super().extract_diagonals(offsets, galerkin=galerkin)
|
|
1595
|
+
|
|
1596
|
+
# Create a result array and fill it with the requested stored diagonals
|
|
1597
|
+
result_diagonals = np.zeros((len(offsets), self.domain.dim))
|
|
1598
|
+
|
|
1599
|
+
# Create a mapping from stored offset to its data row for quick lookup
|
|
1600
|
+
stored_diagonals = dict(zip(self.offsets, self._matrix.data))
|
|
1601
|
+
|
|
1602
|
+
for i, k in enumerate(offsets):
|
|
1603
|
+
if k in stored_diagonals:
|
|
1604
|
+
result_diagonals[i, :] = stored_diagonals[k]
|
|
1605
|
+
|
|
1606
|
+
return result_diagonals, offsets
|
|
1607
|
+
|
|
1608
|
+
def __getattr__(self, name: str):
|
|
1609
|
+
"""
|
|
1610
|
+
Dynamically proxies method calls to the underlying dia_array.
|
|
1611
|
+
|
|
1612
|
+
For element-wise mathematical functions that return a new operator,
|
|
1613
|
+
this method enforces that the operator must be strictly diagonal.
|
|
1614
|
+
"""
|
|
1615
|
+
attr = getattr(self._matrix, name)
|
|
1616
|
+
|
|
1617
|
+
if callable(attr):
|
|
1618
|
+
|
|
1619
|
+
def wrapper(*args, **kwargs):
|
|
1620
|
+
result = attr(*args, **kwargs)
|
|
1621
|
+
|
|
1622
|
+
if isinstance(result, sp.sparray):
|
|
1623
|
+
if not self.is_strictly_diagonal:
|
|
1624
|
+
raise NotImplementedError(
|
|
1625
|
+
f"Element-wise function '{name}' is only defined for "
|
|
1626
|
+
"strictly diagonal operators."
|
|
1627
|
+
)
|
|
1628
|
+
|
|
1629
|
+
return DiagonalSparseMatrixLinearOperator(
|
|
1630
|
+
self.domain,
|
|
1631
|
+
self.codomain,
|
|
1632
|
+
(result.data, result.offsets),
|
|
1633
|
+
galerkin=self.is_galerkin,
|
|
1634
|
+
)
|
|
1635
|
+
else:
|
|
1636
|
+
return result
|
|
1637
|
+
|
|
1638
|
+
return wrapper
|
|
1639
|
+
else:
|
|
1640
|
+
return attr
|
|
1641
|
+
|
|
1642
|
+
def __abs__(self):
|
|
1643
|
+
"""Explicitly handle the built-in abs() function."""
|
|
1644
|
+
return self.__getattr__("__abs__")()
|
|
1645
|
+
|
|
1646
|
+
def __pow__(self, power):
|
|
1647
|
+
"""Explicitly handle the power operator (**)."""
|
|
1648
|
+
return self.__getattr__("__pow__")(power)
|
|
1649
|
+
|
|
1650
|
+
|
|
1651
|
+
class NormalSumOperator(LinearOperator):
|
|
1652
|
+
"""
|
|
1653
|
+
Represents a self-adjoint operator of the form N = A @ Q @ A.adjoint + B.
|
|
1654
|
+
|
|
1655
|
+
The operators Q and B are expected to be self-adjoint for the resulting
|
|
1656
|
+
operator to be mathematically correct.
|
|
1657
|
+
|
|
1658
|
+
Q and B are optional. If Q is None, it defaults to the identity operator.
|
|
1659
|
+
If B is None, it defaults to the zero operator.
|
|
1660
|
+
|
|
1661
|
+
This class uses operator algebra for a concise definition and provides an
|
|
1662
|
+
optimized, parallelizable method for computing its dense Galerkin matrix.
|
|
1663
|
+
"""
|
|
1664
|
+
|
|
1665
|
+
def __init__(
|
|
1666
|
+
self,
|
|
1667
|
+
A: LinearOperator,
|
|
1668
|
+
Q: Optional[LinearOperator] = None,
|
|
1669
|
+
B: Optional[LinearOperator] = None,
|
|
1670
|
+
) -> None:
|
|
1671
|
+
|
|
1672
|
+
op_domain = A.codomain
|
|
1673
|
+
|
|
1674
|
+
if Q is None:
|
|
1675
|
+
Q = A.domain.identity_operator()
|
|
1676
|
+
|
|
1677
|
+
if B is None:
|
|
1678
|
+
B = op_domain.zero_operator()
|
|
1679
|
+
|
|
1680
|
+
if A.domain != Q.domain:
|
|
1681
|
+
raise ValueError("The domain of A must match the domain of Q.")
|
|
1682
|
+
if op_domain != B.domain:
|
|
1683
|
+
raise ValueError("The domain of B must match the codomain of A.")
|
|
1684
|
+
|
|
1685
|
+
self._A = A
|
|
1686
|
+
self._Q = Q
|
|
1687
|
+
self._B = B
|
|
1688
|
+
|
|
1689
|
+
composite_op = self._A @ self._Q @ self._A.adjoint + self._B
|
|
1690
|
+
|
|
1691
|
+
super().__init__(
|
|
1692
|
+
composite_op.domain,
|
|
1693
|
+
composite_op.codomain,
|
|
1694
|
+
composite_op,
|
|
1695
|
+
adjoint_mapping=composite_op,
|
|
1696
|
+
)
|
|
1697
|
+
|
|
1698
|
+
def _compute_dense_matrix(
|
|
1699
|
+
self, galerkin: bool, parallel: bool, n_jobs: int
|
|
1700
|
+
) -> np.ndarray:
|
|
1701
|
+
"""
|
|
1702
|
+
Overloaded method using the matrix-free approach for Q and a cleaner
|
|
1703
|
+
implementation leveraging the base class's methods.
|
|
997
1704
|
"""
|
|
998
|
-
|
|
999
|
-
|
|
1705
|
+
if not galerkin:
|
|
1706
|
+
return super()._compute_dense_matrix(galerkin, parallel, n_jobs)
|
|
1707
|
+
|
|
1708
|
+
domain_Y = self._A.codomain
|
|
1709
|
+
dim = self.domain.dim
|
|
1710
|
+
jobs = n_jobs if parallel else 1
|
|
1711
|
+
|
|
1712
|
+
a_star_mat = self._A.adjoint.matrix(
|
|
1713
|
+
dense=True, galerkin=False, parallel=parallel, n_jobs=n_jobs
|
|
1714
|
+
)
|
|
1715
|
+
|
|
1716
|
+
v_vectors = [domain_Y.from_components(a_star_mat[:, j]) for j in range(dim)]
|
|
1717
|
+
w_vectors = Parallel(n_jobs=jobs)(delayed(self._Q)(v_j) for v_j in v_vectors)
|
|
1718
|
+
|
|
1719
|
+
def compute_row(i: int) -> np.ndarray:
|
|
1720
|
+
"""Computes the i-th row of the inner product matrix."""
|
|
1721
|
+
v_i = v_vectors[i]
|
|
1722
|
+
return np.array([domain_Y.inner_product(v_i, w_j) for w_j in w_vectors])
|
|
1723
|
+
|
|
1724
|
+
rows = Parallel(n_jobs=jobs)(delayed(compute_row)(i) for i in range(dim))
|
|
1725
|
+
m_aqa_mat = np.vstack(rows)
|
|
1726
|
+
|
|
1727
|
+
b_mat = self._B.matrix(
|
|
1728
|
+
dense=True, galerkin=True, parallel=parallel, n_jobs=n_jobs
|
|
1729
|
+
)
|
|
1730
|
+
|
|
1731
|
+
return m_aqa_mat + b_mat
|
|
1732
|
+
|
|
1733
|
+
def extract_diagonal(
|
|
1734
|
+
self,
|
|
1735
|
+
/,
|
|
1736
|
+
*,
|
|
1737
|
+
galerkin: bool = False,
|
|
1738
|
+
parallel: bool = False,
|
|
1739
|
+
n_jobs: int = -1,
|
|
1740
|
+
) -> np.ndarray:
|
|
1741
|
+
"""Overrides base method for efficiency."""
|
|
1742
|
+
if not galerkin:
|
|
1743
|
+
return super().extract_diagonal(
|
|
1744
|
+
galerkin=galerkin, parallel=parallel, n_jobs=n_jobs
|
|
1745
|
+
)
|
|
1746
|
+
|
|
1747
|
+
diag_B = self._B.extract_diagonal(
|
|
1748
|
+
galerkin=True, parallel=parallel, n_jobs=n_jobs
|
|
1749
|
+
)
|
|
1750
|
+
|
|
1751
|
+
dim = self.domain.dim
|
|
1752
|
+
jobs = n_jobs if parallel else 1
|
|
1753
|
+
|
|
1754
|
+
def compute_entry(i: int) -> float:
|
|
1755
|
+
e_i = self.domain.basis_vector(i)
|
|
1756
|
+
v_i = self._A.adjoint(e_i)
|
|
1757
|
+
w_i = self._Q(v_i)
|
|
1758
|
+
return self._A.domain.inner_product(v_i, w_i)
|
|
1759
|
+
|
|
1760
|
+
diag_AQA_T = Parallel(n_jobs=jobs)(
|
|
1761
|
+
delayed(compute_entry)(i) for i in range(dim)
|
|
1762
|
+
)
|
|
1763
|
+
|
|
1764
|
+
return np.array(diag_AQA_T) + diag_B
|
|
1765
|
+
|
|
1766
|
+
def extract_diagonals(
|
|
1767
|
+
self,
|
|
1768
|
+
offsets: List[int],
|
|
1769
|
+
/,
|
|
1770
|
+
*,
|
|
1771
|
+
galerkin: bool = False,
|
|
1772
|
+
parallel: bool = False,
|
|
1773
|
+
n_jobs: int = -1,
|
|
1774
|
+
) -> Tuple[np.ndarray, List[int]]:
|
|
1775
|
+
"""Overrides base method for efficiency."""
|
|
1776
|
+
if not galerkin:
|
|
1777
|
+
return super().extract_diagonals(
|
|
1778
|
+
offsets, galerkin=galerkin, parallel=parallel, n_jobs=n_jobs
|
|
1779
|
+
)
|
|
1780
|
+
|
|
1781
|
+
diagonals_B, _ = self._B.extract_diagonals(
|
|
1782
|
+
offsets, galerkin=True, parallel=parallel, n_jobs=n_jobs
|
|
1783
|
+
)
|
|
1784
|
+
|
|
1785
|
+
dim = self.domain.dim
|
|
1786
|
+
jobs = n_jobs if parallel else 1
|
|
1787
|
+
|
|
1788
|
+
# Pre-compute A*e_i for all i
|
|
1789
|
+
v_vectors = Parallel(n_jobs=jobs)(
|
|
1790
|
+
delayed(self._A.adjoint)(self.domain.basis_vector(i)) for i in range(dim)
|
|
1791
|
+
)
|
|
1792
|
+
|
|
1793
|
+
def compute_column_entries(j: int) -> Dict[int, Dict[int, float]]:
|
|
1794
|
+
col_results = defaultdict(dict)
|
|
1795
|
+
v_j = v_vectors[j]
|
|
1796
|
+
w_j = self._Q(v_j)
|
|
1797
|
+
|
|
1798
|
+
for k in offsets:
|
|
1799
|
+
i = j - k
|
|
1800
|
+
if 0 <= i < dim:
|
|
1801
|
+
v_i = v_vectors[i]
|
|
1802
|
+
val = self._A.domain.inner_product(v_i, w_j)
|
|
1803
|
+
col_results[k][i] = val
|
|
1804
|
+
return col_results
|
|
1805
|
+
|
|
1806
|
+
column_data = Parallel(n_jobs=jobs)(
|
|
1807
|
+
delayed(compute_column_entries)(j) for j in range(dim)
|
|
1808
|
+
)
|
|
1809
|
+
|
|
1810
|
+
results: Dict[int, Dict[int, float]] = defaultdict(dict)
|
|
1811
|
+
for col_dict in column_data:
|
|
1812
|
+
for k, entries in col_dict.items():
|
|
1813
|
+
results[k].update(entries)
|
|
1814
|
+
|
|
1815
|
+
diagonals_array = np.zeros((len(offsets), dim))
|
|
1816
|
+
for idx, k in enumerate(offsets):
|
|
1817
|
+
diag_entries = results[k]
|
|
1818
|
+
for i, val in diag_entries.items():
|
|
1819
|
+
j = i + k
|
|
1820
|
+
if 0 <= j < dim:
|
|
1821
|
+
diagonals_array[idx, j] = val
|
|
1822
|
+
|
|
1823
|
+
return diagonals_array + diagonals_B, offsets
|