pyMOTO 1.2.1__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyMOTO-1.2.1.dist-info → pyMOTO-1.4.0.dist-info}/METADATA +7 -8
- pyMOTO-1.4.0.dist-info/RECORD +29 -0
- {pyMOTO-1.2.1.dist-info → pyMOTO-1.4.0.dist-info}/WHEEL +1 -1
- pymoto/__init__.py +19 -13
- pymoto/common/domain.py +75 -0
- pymoto/common/dyadcarrier.py +33 -4
- pymoto/common/mma.py +83 -53
- pymoto/core_objects.py +117 -113
- pymoto/modules/aggregation.py +209 -0
- pymoto/modules/assembly.py +202 -41
- pymoto/modules/complex.py +3 -3
- pymoto/modules/filter.py +171 -24
- pymoto/modules/generic.py +12 -1
- pymoto/modules/io.py +22 -11
- pymoto/modules/linalg.py +24 -118
- pymoto/modules/scaling.py +4 -4
- pymoto/routines.py +32 -15
- pymoto/solvers/__init__.py +14 -0
- pymoto/solvers/auto_determine.py +108 -0
- pymoto/{common/solvers_dense.py → solvers/dense.py} +90 -70
- pymoto/solvers/iterative.py +361 -0
- pymoto/solvers/matrix_checks.py +56 -0
- pymoto/solvers/solvers.py +253 -0
- pymoto/{common/solvers_sparse.py → solvers/sparse.py} +41 -29
- pyMOTO-1.2.1.dist-info/RECORD +0 -24
- pymoto/common/solvers.py +0 -236
- {pyMOTO-1.2.1.dist-info → pyMOTO-1.4.0.dist-info}/LICENSE +0 -0
- {pyMOTO-1.2.1.dist-info → pyMOTO-1.4.0.dist-info}/top_level.txt +0 -0
- {pyMOTO-1.2.1.dist-info → pyMOTO-1.4.0.dist-info}/zip-safe +0 -0
@@ -1,7 +1,8 @@
|
|
1
1
|
import warnings
|
2
2
|
import numpy as np
|
3
3
|
import scipy.linalg as spla # Dense matrix solvers
|
4
|
-
from .
|
4
|
+
from .matrix_checks import matrix_is_hermitian, matrix_is_diagonal
|
5
|
+
from .solvers import LinearSolver
|
5
6
|
|
6
7
|
|
7
8
|
class SolverDiagonal(LinearSolver):
|
@@ -11,24 +12,17 @@ class SolverDiagonal(LinearSolver):
|
|
11
12
|
self.diag = A.diagonal()
|
12
13
|
return self
|
13
14
|
|
14
|
-
def solve(self, rhs):
|
15
|
+
def solve(self, rhs, x0=None, trans='N'):
|
15
16
|
r""" Solve using the diagonal only, by :math:`x_i = b_i / A_{ii}`
|
16
17
|
|
17
18
|
The right-hand-side :math:`\mathbf{b}` can be of size ``(N)`` or ``(N, K)``, where ``N`` is the size of matrix
|
18
19
|
:math:`\mathbf{A}` and ``K`` is the number of right-hand sides.
|
19
20
|
"""
|
21
|
+
d = self.diag.conj() if trans == 'H' else self.diag
|
20
22
|
if rhs.ndim == 1:
|
21
|
-
return rhs /
|
23
|
+
return rhs / d
|
22
24
|
else:
|
23
|
-
return rhs /
|
24
|
-
|
25
|
-
def adjoint(self, rhs):
|
26
|
-
r""" Solve using the diagonal only, by :math:`x_i = b_i / A_{ii}^*`
|
27
|
-
|
28
|
-
The right-hand-side :math:`\mathbf{b}` can be of size ``(N)`` or ``(N, K)``, where ``N`` is the size of matrix
|
29
|
-
:math:`\mathbf{A}` and ``K`` is the number of right-hand sides.
|
30
|
-
"""
|
31
|
-
return self.solve(rhs.conj()).conj()
|
25
|
+
return rhs / d[..., None]
|
32
26
|
|
33
27
|
|
34
28
|
# Dense QR solver
|
@@ -42,23 +36,31 @@ class SolverDenseQR(LinearSolver):
|
|
42
36
|
self.q, self.r = spla.qr(A)
|
43
37
|
return self
|
44
38
|
|
45
|
-
def solve(self, rhs):
|
46
|
-
r""" Solves the linear system of equations
|
47
|
-
:math:`\mathbf{x} = \mathbf{R}^{-1}\mathbf{Q}^\text{H}\mathbf{b}`.
|
48
|
-
|
49
|
-
The right-hand-side :math:`\mathbf{b}` can be of size ``(N)`` or ``(N, K)``, where ``N`` is the size of matrix
|
50
|
-
:math:`\mathbf{A}` and ``K`` is the number of right-hand sides.
|
51
|
-
"""
|
52
|
-
return spla.solve_triangular(self.r, self.q.T.conj()@rhs)
|
39
|
+
def solve(self, rhs, x0=None, trans='N'):
|
40
|
+
r""" Solves the linear system of equations using the QR factorization.
|
53
41
|
|
54
|
-
|
55
|
-
|
56
|
-
|
42
|
+
======= ================= =====================
|
43
|
+
`trans` Equation Solution of :math:`x`
|
44
|
+
------- ----------------- ---------------------
|
45
|
+
`N` :math:`A x = b` :math:`R^{-1} Q^H b`
|
46
|
+
`T` :math:`A^T x = b` :math:`Q^* R^{-T} b`
|
47
|
+
`H` :math:`A^H x = b` :math:`Q R^{-H} b`
|
48
|
+
======= ================= =====================
|
57
49
|
|
58
50
|
The right-hand-side :math:`\mathbf{b}` can be of size ``(N)`` or ``(N, K)``, where ``N`` is the size of matrix
|
59
51
|
:math:`\mathbf{A}` and ``K`` is the number of right-hand sides.
|
60
52
|
"""
|
61
|
-
|
53
|
+
if trans == 'N':
|
54
|
+
# A = Q R -> inv(A) = inv(R) inv(Q) = inv(R) Q^H
|
55
|
+
return spla.solve_triangular(self.r, self.q.T.conj() @ rhs)
|
56
|
+
elif trans == 'T':
|
57
|
+
# A^T = R^T Q^T -> inv(A^T) = inv(Q^T) inv(R^T) = conj(Q) inv(R^T)
|
58
|
+
return self.q.conj() @ spla.solve_triangular(self.r, rhs, trans='T')
|
59
|
+
elif trans == 'H':
|
60
|
+
# A^H = R^H Q^H -> inv(A^H) = inv(Q^H) inv(R^H) = Q inv(R^H)
|
61
|
+
return self.q @ spla.solve_triangular(self.r, rhs, trans='C')
|
62
|
+
else:
|
63
|
+
raise TypeError("Only N, T, and H transposition is possible")
|
62
64
|
|
63
65
|
|
64
66
|
# Dense LU solver
|
@@ -71,24 +73,34 @@ class SolverDenseLU(LinearSolver):
|
|
71
73
|
self.p, self.l, self.u = spla.lu(A)
|
72
74
|
return self
|
73
75
|
|
74
|
-
def solve(self, rhs):
|
75
|
-
r""" Solves the linear system of equations
|
76
|
-
substitution of :math:`\mathbf{x} = \mathbf{U}^{-1}\mathbf{L}^{-1}\mathbf{b}`.
|
76
|
+
def solve(self, rhs, x0=None, trans='N'):
|
77
|
+
r""" Solves the linear system of equations using the LU factorization.
|
77
78
|
|
78
|
-
|
79
|
-
:math:`\mathbf{
|
80
|
-
"""
|
81
|
-
return spla.solve_triangular(self.u, spla.solve_triangular(self.l, self.p.T@rhs, lower=True))
|
79
|
+
:math:`\mathbf{A} \mathbf{x} = \mathbf{b}` by forward and backward
|
80
|
+
substitution of :math:`\mathbf{x} = \mathbf{U}^{-1}\mathbf{L}^{-1}\mathbf{b}`.
|
82
81
|
|
83
|
-
|
84
|
-
|
85
|
-
|
82
|
+
======= ================= =========================
|
83
|
+
`trans` Equation Solution of :math:`x`
|
84
|
+
------- ----------------- -------------------------
|
85
|
+
`N` :math:`A x = b` :math:`x = U^{-1} L^{-1}`
|
86
|
+
`T` :math:`A^T x = b` :math:`x = L^{-1} U^{-1}`
|
87
|
+
`H` :math:`A^H x = b` :math:`x = L^{-*} U^{-*}`
|
88
|
+
======= ================= =========================
|
86
89
|
|
87
90
|
The right-hand-side :math:`\mathbf{b}` can be of size ``(N)`` or ``(N, K)``, where ``N`` is the size of matrix
|
88
91
|
:math:`\mathbf{A}` and ``K`` is the number of right-hand sides.
|
89
92
|
"""
|
90
|
-
|
91
|
-
|
93
|
+
if trans == 'N':
|
94
|
+
# A = P L U -> x = U^-1 L^-1 P^T b
|
95
|
+
return spla.solve_triangular(self.u, spla.solve_triangular(self.l, self.p.T@rhs, lower=True))
|
96
|
+
elif trans == 'T':
|
97
|
+
return self.p @ spla.solve_triangular(self.l, spla.solve_triangular(self.u, rhs, trans='T'),
|
98
|
+
lower=True, trans='T')
|
99
|
+
elif trans == 'H':
|
100
|
+
return self.p @ spla.solve_triangular(self.l, spla.solve_triangular(self.u, rhs, trans='C'),
|
101
|
+
lower=True, trans='C')
|
102
|
+
else:
|
103
|
+
raise TypeError("Only N, T, and H transposition is possible")
|
92
104
|
|
93
105
|
|
94
106
|
# Dense Cholesky solver
|
@@ -106,7 +118,7 @@ class SolverDenseCholesky(LinearSolver):
|
|
106
118
|
upper triangular matrix.
|
107
119
|
"""
|
108
120
|
try:
|
109
|
-
self.
|
121
|
+
self.U = spla.cholesky(A)
|
110
122
|
self.success = True
|
111
123
|
except np.linalg.LinAlgError as err:
|
112
124
|
warnings.warn(f"{type(self).__name__}: {err} -- using {type(self.backup_solver).__name__} instead")
|
@@ -114,7 +126,7 @@ class SolverDenseCholesky(LinearSolver):
|
|
114
126
|
self.success = False
|
115
127
|
return self
|
116
128
|
|
117
|
-
def solve(self, rhs):
|
129
|
+
def solve(self, rhs, x0=None, trans='N'):
|
118
130
|
r""" Solves the linear system of equations :math:`\mathbf{A} \mathbf{x} = \mathbf{b}` by forward and backward
|
119
131
|
substitution of :math:`\mathbf{x} = \mathbf{U}^{-1}\mathbf{U}^{-\text{H}}\mathbf{b}`.
|
120
132
|
|
@@ -124,21 +136,16 @@ class SolverDenseCholesky(LinearSolver):
|
|
124
136
|
# TODO When Cholesky factorization A = U^T U is used, symmetric complex matrices can also be solved, but this is
|
125
137
|
# not implemented in scipy
|
126
138
|
if self.success:
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
The right-hand-side :math:`\mathbf{b}` can be of size ``(N)`` or ``(N, K)``, where ``N`` is the size of matrix
|
136
|
-
:math:`\mathbf{A}` and ``K`` is the number of right-hand sides.
|
137
|
-
"""
|
138
|
-
if self.success:
|
139
|
-
return self.solve(rhs)
|
139
|
+
if trans == 'N' or trans == 'H':
|
140
|
+
# A = U^H U -> A^-1 = U^-1 U^-H
|
141
|
+
return spla.solve_triangular(self.U, spla.solve_triangular(self.U, rhs, trans='C'))
|
142
|
+
elif trans == 'T':
|
143
|
+
# A^T = U^T conj(U) -> A^-T = conj(U^-1) U^-T
|
144
|
+
return spla.solve_triangular(self.U, spla.solve_triangular(self.U, rhs, trans='T').conj()).conj()
|
145
|
+
else:
|
146
|
+
raise TypeError("Only N, T, and H transposition is possible")
|
140
147
|
else:
|
141
|
-
return self.backup_solver.
|
148
|
+
return self.backup_solver.solve(rhs, trans=trans)
|
142
149
|
|
143
150
|
|
144
151
|
# Dense LDL solver
|
@@ -171,23 +178,13 @@ class SolverDenseLDL(LinearSolver):
|
|
171
178
|
self.lp = self.l[self.p, :]
|
172
179
|
return self
|
173
180
|
|
174
|
-
def solve(self, rhs):
|
181
|
+
def solve(self, rhs, x0=None, trans='N'):
|
175
182
|
r""" Solves the linear system of equations :math:`\mathbf{A} \mathbf{x} = \mathbf{b}` by forward and backward
|
176
183
|
substitution of :math:`\mathbf{x} = \mathbf{L}^{-\text{H}}\mathbf{D}^{-1}\mathbf{L}^{-1}\mathbf{b}` in the
|
177
184
|
Hermitian case or as :math:`\mathbf{x} = \mathbf{L}^{-\text{T}}\mathbf{D}^{-1}\mathbf{L}^{-1}\mathbf{b}` in the
|
178
185
|
symmetric case.
|
179
186
|
|
180
|
-
The
|
181
|
-
:math:`\mathbf{A}` and ``K`` is the number of right-hand sides.
|
182
|
-
"""
|
183
|
-
u1 = spla.solve_triangular(self.lp, rhs[self.p], lower=True, unit_diagonal=True)
|
184
|
-
u2 = self.dinv(u1)
|
185
|
-
u = np.zeros_like(rhs, dtype=u2.dtype)
|
186
|
-
u[self.p] = spla.solve_triangular(self.lp, u2, trans='C' if self.hermitian else 'T', lower=True, unit_diagonal=True)
|
187
|
-
return u
|
188
|
-
|
189
|
-
def adjoint(self, rhs):
|
190
|
-
r""" Solves the linear system of equations :math:`\mathbf{A}^\text{H} \mathbf{x} = \mathbf{b}` by forward and
|
187
|
+
The adjoint system of equations :math:`\mathbf{A}^\text{H} \mathbf{x} = \mathbf{b}` is solved by forward and
|
191
188
|
backward substitution of
|
192
189
|
:math:`\mathbf{x} = \mathbf{L}^{-\text{H}}\mathbf{D}^{-\text{H}}\mathbf{L}^{-1}\mathbf{b}` in the Hermitian
|
193
190
|
case or as :math:`\mathbf{x} = \mathbf{L}^{-\text{H}}\mathbf{D}^{-\text{H}}\mathbf{L}^{-*}\mathbf{b}`
|
@@ -196,11 +193,34 @@ class SolverDenseLDL(LinearSolver):
|
|
196
193
|
The right-hand-side :math:`\mathbf{b}` can be of size ``(N)`` or ``(N, K)``, where ``N`` is the size of matrix
|
197
194
|
:math:`\mathbf{A}` and ``K`` is the number of right-hand sides.
|
198
195
|
"""
|
199
|
-
if
|
200
|
-
|
201
|
-
|
196
|
+
if trans == 'N':
|
197
|
+
# Hermitian matrix A: A = L D L^H -> inv(A) = inv(L^H) inv(D) inv(L)
|
198
|
+
# Symmetric matrix A: A = L D L^T -> inv(A) = inv(L^T) inv(D) inv(L)
|
202
199
|
u1 = spla.solve_triangular(self.lp, rhs[self.p], lower=True, unit_diagonal=True)
|
203
|
-
|
204
|
-
|
205
|
-
|
200
|
+
u2 = self.dinv(u1)
|
201
|
+
u = np.zeros_like(rhs, dtype=u2.dtype)
|
202
|
+
u[self.p] = spla.solve_triangular(self.lp, u2, trans='C' if self.hermitian else 'T', lower=True, unit_diagonal=True)
|
203
|
+
elif trans == 'T':
|
204
|
+
# Hermitian matrix A^T: A = conj(L) D^T L^T -> inv(A) = inv(L^T) inv(D^T) inv(L^*)
|
205
|
+
# Symmetric matrix A^T: A = L D^T L^T -> inv(A) = inv(L^T) inv(D^T) inv(L)
|
206
|
+
if self.hermitian:
|
207
|
+
u1 = spla.solve_triangular(self.lp, rhs[self.p].conj(), lower=True, unit_diagonal=True).conj()
|
208
|
+
else:
|
209
|
+
u1 = spla.solve_triangular(self.lp, rhs[self.p], lower=True, unit_diagonal=True)
|
210
|
+
|
211
|
+
u2 = self.dinvH(u1.conj()).conj()
|
212
|
+
u = np.zeros_like(rhs, dtype=u2.dtype)
|
213
|
+
u[self.p] = spla.solve_triangular(self.lp, u2, trans='T', lower=True, unit_diagonal=True)
|
214
|
+
elif trans == 'H':
|
215
|
+
# Hermitian matrix A: inv(A^H) = inv(L^H) inv(D^H) inv(L)
|
216
|
+
# Symmetric matrix A: inv(A^H) = inv(L^H) inv(D^H) inv(L^*)
|
217
|
+
if not self.hermitian:
|
218
|
+
u1 = spla.solve_triangular(self.lp, rhs[self.p].conj(), lower=True, unit_diagonal=True).conj()
|
219
|
+
else:
|
220
|
+
u1 = spla.solve_triangular(self.lp, rhs[self.p], lower=True, unit_diagonal=True)
|
221
|
+
u2 = self.dinvH(u1)
|
222
|
+
u = np.zeros_like(rhs, dtype=u2.dtype)
|
223
|
+
u[self.p] = spla.solve_triangular(self.lp, u2, trans='C', lower=True, unit_diagonal=True)
|
224
|
+
else:
|
225
|
+
raise TypeError("Only N, T, and H transposition is possible")
|
206
226
|
return u
|
@@ -0,0 +1,361 @@
|
|
1
|
+
import warnings
|
2
|
+
import time
|
3
|
+
import numpy as np
|
4
|
+
import scipy.sparse as sps
|
5
|
+
from scipy.sparse.linalg import splu, spilu
|
6
|
+
from .solvers import LinearSolver
|
7
|
+
from .auto_determine import auto_determine_solver
|
8
|
+
from pymoto import DomainDefinition
|
9
|
+
|
10
|
+
|
11
|
+
class Preconditioner(LinearSolver):
|
12
|
+
""" Abstract base class for preconditioners to inexact solvers """
|
13
|
+
def update(self, A):
|
14
|
+
pass
|
15
|
+
|
16
|
+
def solve(self, rhs, x0=None, trans='N'):
|
17
|
+
return rhs.copy()
|
18
|
+
|
19
|
+
|
20
|
+
class DampedJacobi(Preconditioner):
|
21
|
+
r""" Damped Jacobi preconditioner
|
22
|
+
:math:`M = \frac{1}{\omega} D`
|
23
|
+
Args:
|
24
|
+
A (optional): The matrix
|
25
|
+
w (optional): Weight factor :math:`0 < \omega \leq 1`
|
26
|
+
"""
|
27
|
+
def __init__(self, A=None, w=1.0):
|
28
|
+
assert 0 < w <= 1, 'w must be between 0 and 1'
|
29
|
+
self.w = w
|
30
|
+
self.D = None
|
31
|
+
super().__init__(A)
|
32
|
+
|
33
|
+
def update(self, A):
|
34
|
+
self.D = A.diagonal()
|
35
|
+
|
36
|
+
def solve(self, rhs, x0=None, trans='N'):
|
37
|
+
if trans == 'N' or trans == 'T':
|
38
|
+
return self.w * (rhs.T/self.D).T
|
39
|
+
elif trans == 'H':
|
40
|
+
return self.w * (rhs.T/self.D.conj()).T
|
41
|
+
else:
|
42
|
+
raise TypeError("Only N, T, or H transposition is possible")
|
43
|
+
|
44
|
+
|
45
|
+
class SOR(Preconditioner):
|
46
|
+
r""" Successive over-relaxation preconditioner
|
47
|
+
The matrix :math:`A = L + D + U` is split into a lower triangular, diagonal, and upper triangular part.
|
48
|
+
:math:`M = \left(\frac{D}{\omega} + L\right) \frac{\omega D^{-1}}{2-\omega} \left(\frac{D}{\omega} + U\right)`
|
49
|
+
|
50
|
+
Args:
|
51
|
+
A (optional): The matrix
|
52
|
+
w (optional): Weight factor :math:`0 < \omega < 2`
|
53
|
+
"""
|
54
|
+
def __init__(self, A=None, w=1.0):
|
55
|
+
assert 0 < w < 2, 'w must be between 0 and 2'
|
56
|
+
self.w = w
|
57
|
+
self.L = None
|
58
|
+
self.U = None
|
59
|
+
self.Dw = None
|
60
|
+
super().__init__(A)
|
61
|
+
|
62
|
+
def update(self, A):
|
63
|
+
diag = A.diagonal()
|
64
|
+
diagw = sps.diags(diag)/self.w
|
65
|
+
self.L = splu(sps.tril(A, k=-1) + diagw) # Lower triangular part including diagonal
|
66
|
+
self.U = splu(sps.triu(A, k=1) + diagw)
|
67
|
+
|
68
|
+
self.Dw = diag * (2 - self.w) / self.w
|
69
|
+
|
70
|
+
def solve(self, rhs, x0=None, trans='N'):
|
71
|
+
if trans == 'N':
|
72
|
+
# M = (D/w + L) wD^-1 / (2-w) (D/w + U)
|
73
|
+
# from scipy.sparse.linalg import spsolve_triangular
|
74
|
+
# u1 = spsolve_triangular(self.L, rhs, lower=True, overwrite_A=False) # Solve triangular is still very slow :(
|
75
|
+
u1 = self.L.solve(rhs)
|
76
|
+
u1 *= self.Dw[:, None]
|
77
|
+
# u2 = spsolve_triangular(self.U, u1, lower=False, overwrite_A=False, overwrite_b=True)
|
78
|
+
u2 = self.U.solve(u1)
|
79
|
+
return u2
|
80
|
+
elif trans == 'T':
|
81
|
+
u1 = self.U.solve(rhs, trans='T')
|
82
|
+
u1 *= self.Dw[:, None]
|
83
|
+
u2 = self.L.solve(u1, trans='T')
|
84
|
+
return u2
|
85
|
+
elif trans == 'H':
|
86
|
+
u1 = self.U.solve(rhs, trans='H')
|
87
|
+
u1 *= self.Dw[:, None].conj()
|
88
|
+
u2 = self.L.solve(u1, trans='H')
|
89
|
+
return u2
|
90
|
+
else:
|
91
|
+
raise TypeError("Only N, T, or H transposition is possible")
|
92
|
+
|
93
|
+
|
94
|
+
class ILU(Preconditioner):
|
95
|
+
""" Incomplete LU factorization
|
96
|
+
|
97
|
+
Args:
|
98
|
+
A (optional): The matrix
|
99
|
+
**kwargs (optional): Keyword arguments passed to `scipy.sparse.linalg.spilu`
|
100
|
+
"""
|
101
|
+
def __init__(self, A=None, **kwargs):
|
102
|
+
self.kwargs = kwargs
|
103
|
+
self.ilu = None
|
104
|
+
super().__init__(A)
|
105
|
+
|
106
|
+
def update(self, A):
|
107
|
+
self.ilu = spilu(A, **self.kwargs)
|
108
|
+
|
109
|
+
def solve(self, rhs, x0=None, trans='N'):
|
110
|
+
return self.ilu.solve(rhs, trans=trans)
|
111
|
+
|
112
|
+
|
113
|
+
class GeometricMultigrid(Preconditioner):
|
114
|
+
""" Geometric multigrid preconditioner
|
115
|
+
|
116
|
+
Args:
|
117
|
+
domain: The `DomainDefinition` used for the geometry
|
118
|
+
A (optional): The matrix
|
119
|
+
inner_level (optional): Inner solver for the coarse grid, for instance, a direct solver or another MG level.
|
120
|
+
The default is an automatically determined direct solver.
|
121
|
+
smoother (optional): Smoother to use to smooth the residual and solution before and after coarse level.
|
122
|
+
The default is `DampedJacobi(w=0.5)`.
|
123
|
+
smooth_steps (optional): Number of smoothing steps to execute
|
124
|
+
"""
|
125
|
+
_available_cycles = ['v', 'w']
|
126
|
+
|
127
|
+
def __init__(self, domain: DomainDefinition, A=None, cycle='V', inner_level=None, smoother=None, smooth_steps=5):
|
128
|
+
assert domain.nelx % 2 == 0 and domain.nely % 2 == 0 and domain.nelz % 2 == 0, \
|
129
|
+
f"Domain sizes {domain.nelx, domain.nely, domain.nelz} must be divisible by 2"
|
130
|
+
self.domain = domain
|
131
|
+
self.A = A
|
132
|
+
assert cycle.lower() in self._available_cycles, f"Cycle ({cycle}) is not available. Options are {self._available_cycles}"
|
133
|
+
self.cycle = cycle
|
134
|
+
self.inner_level = None if inner_level is None else inner_level
|
135
|
+
self.smoother = DampedJacobi(w=0.5) if smoother is None else None
|
136
|
+
self.smooth_steps = smooth_steps
|
137
|
+
self.R = None
|
138
|
+
self.sub_domain = DomainDefinition(domain.nelx // 2, domain.nely // 2, domain.nelz // 2,
|
139
|
+
domain.unitx * 2, domain.unity * 2, domain.unitz * 2)
|
140
|
+
|
141
|
+
super().__init__(A)
|
142
|
+
|
143
|
+
def update(self, A):
|
144
|
+
if self.R is None:
|
145
|
+
self.setup_interpolation(A)
|
146
|
+
self.A = A
|
147
|
+
self.smoother.update(A)
|
148
|
+
Ac = self.R.T @ A @ self.R
|
149
|
+
if self.inner_level is None:
|
150
|
+
self.inner_level = auto_determine_solver(Ac)
|
151
|
+
self.inner_level.update(Ac)
|
152
|
+
|
153
|
+
def setup_interpolation(self, A):
|
154
|
+
assert A.shape[0] % self.domain.nnodes == 0
|
155
|
+
ndof = int(A.shape[0] / self.domain.nnodes) # Number of dofs per node
|
156
|
+
|
157
|
+
w = np.ones((3, 3, 3))*0.125
|
158
|
+
w[1, :, :] = 0.25
|
159
|
+
w[:, 1, :] = 0.25
|
160
|
+
w[:, :, 1] = 0.25
|
161
|
+
w[1, 1, :] = 0.5
|
162
|
+
w[1, :, 1] = 0.5
|
163
|
+
w[:, 1, 1] = 0.5
|
164
|
+
w[1, 1, 1] = 1.0
|
165
|
+
|
166
|
+
rows = []
|
167
|
+
cols = []
|
168
|
+
vals = []
|
169
|
+
for i in [-1, 0, 1]:
|
170
|
+
imin, imax = max(-i, 0), min(self.sub_domain.nelx + 1 - i, self.sub_domain.nelx + 1)
|
171
|
+
ix = np.arange(imin, imax)
|
172
|
+
for j in [-1, 0, 1]:
|
173
|
+
jmin, jmax = max(-j, 0), min(self.sub_domain.nely + 1 - j, self.sub_domain.nely + 1)
|
174
|
+
iy = np.arange(jmin, jmax)
|
175
|
+
for k in ([-1, 0, 1] if self.domain.dim == 3 else [0]):
|
176
|
+
# Coarse node cartesian indices
|
177
|
+
kmin, kmax = max(-k, 0), min(self.sub_domain.nelz + 1 - k, self.sub_domain.nelz + 1)
|
178
|
+
iz = np.arange(kmin, kmax)
|
179
|
+
# Coarse node numbers
|
180
|
+
nod_c = self.sub_domain.get_nodenumber(*np.meshgrid(ix, iy, iz, indexing='ij')).flatten()
|
181
|
+
# Fine node numbers with offset
|
182
|
+
ixc, iyc, izc = ix * 2 + i, iy * 2 + j, iz * 2 + k
|
183
|
+
nod_f = self.domain.get_nodenumber(*np.meshgrid(ixc, iyc, izc, indexing='ij')).flatten()
|
184
|
+
for d in range(ndof):
|
185
|
+
rows.append(nod_f * ndof + d)
|
186
|
+
cols.append(nod_c * ndof + d)
|
187
|
+
vals.append(np.ones_like(rows[-1], dtype=w.dtype) * w[1+i, 1+j, 1+k])
|
188
|
+
|
189
|
+
rows = np.concatenate(rows)
|
190
|
+
cols = np.concatenate(cols)
|
191
|
+
vals = np.concatenate(vals)
|
192
|
+
nfine = ndof * self.domain.nnodes
|
193
|
+
ncoarse = ndof * self.sub_domain.nnodes
|
194
|
+
self.R = sps.coo_matrix((vals, (rows, cols)), shape=(nfine, ncoarse))
|
195
|
+
self.R = type(A)(self.R) # Convert to correct matrix type
|
196
|
+
|
197
|
+
def solve(self, rhs, x0=None, trans='N'):
|
198
|
+
if trans == 'N':
|
199
|
+
A = self.A
|
200
|
+
elif trans == 'T':
|
201
|
+
A = self.A.T
|
202
|
+
elif trans == 'H':
|
203
|
+
A = self.A.conj().T
|
204
|
+
else:
|
205
|
+
raise TypeError("Only N, T, or H transposition is possible")
|
206
|
+
|
207
|
+
# Pre-smoothing
|
208
|
+
if x0 is None:
|
209
|
+
u_f = self.smoother.solve(rhs, trans=trans)
|
210
|
+
else:
|
211
|
+
r = rhs - self.A @ x0
|
212
|
+
u_f = x0 + self.smoother.solve(r, trans=trans)
|
213
|
+
for i in range(self.smooth_steps-1):
|
214
|
+
r = rhs - self.A @ u_f
|
215
|
+
u_f += self.smoother.solve(r, trans=trans)
|
216
|
+
|
217
|
+
r = rhs - A @ u_f
|
218
|
+
# Restrict residual to coarse level
|
219
|
+
r_c = self.R.T @ r
|
220
|
+
|
221
|
+
# Solve at coarse level
|
222
|
+
u_c = self.inner_level.solve(r_c)
|
223
|
+
|
224
|
+
# Interpolate and correct
|
225
|
+
u_f += self.R @ u_c
|
226
|
+
|
227
|
+
# Post-smoothing
|
228
|
+
for i in range(self.smooth_steps):
|
229
|
+
r = rhs - self.A @ u_f
|
230
|
+
u_f += self.smoother.solve(r, trans=trans)
|
231
|
+
return u_f
|
232
|
+
|
233
|
+
|
234
|
+
def orth(u, normalize=True, zero_rtol=1e-15):
|
235
|
+
""" Create orthogonal basis from a set of vectors
|
236
|
+
|
237
|
+
Args:
|
238
|
+
u: Set of vectors of size (#dof, #vectors)
|
239
|
+
normalize: Also normalize the basis vectors
|
240
|
+
zero_rtol: Relative tolerance for detection of zero vectors (in case of a rank-deficient basis)
|
241
|
+
|
242
|
+
Returns:
|
243
|
+
v: Orthogonal basis vectors (#dof, #non-zero-vectors)
|
244
|
+
"""
|
245
|
+
if u.ndim == 1:
|
246
|
+
return u
|
247
|
+
elif u.ndim > 2:
|
248
|
+
raise TypeError("Only valid for 1D or 2D matrix")
|
249
|
+
|
250
|
+
def dot(a, b): # Define inner product
|
251
|
+
return a @ b.conj()
|
252
|
+
|
253
|
+
orth_vecs = []
|
254
|
+
for i in range(u.shape[-1]):
|
255
|
+
vi = np.copy(u[..., i])
|
256
|
+
beta_i = dot(vi, vi)
|
257
|
+
for vj in orth_vecs:
|
258
|
+
alpha_ij = dot(vi, vj)
|
259
|
+
alpha_jj = 1.0 if normalize else dot(vj, vj)
|
260
|
+
vi -= vj * alpha_ij / alpha_jj
|
261
|
+
beta_i_new = dot(vi, vi)
|
262
|
+
if beta_i_new / beta_i < zero_rtol: # Detect zero vector
|
263
|
+
continue
|
264
|
+
if normalize:
|
265
|
+
vi /= np.sqrt(beta_i_new)
|
266
|
+
orth_vecs.append(vi)
|
267
|
+
return np.stack(orth_vecs, axis=-1)
|
268
|
+
|
269
|
+
|
270
|
+
class CG(LinearSolver):
|
271
|
+
""" Preconditioned conjugate gradient method
|
272
|
+
Works for positive-definite self-adjoint matrices (:math:`A=A^H`)
|
273
|
+
|
274
|
+
References:
|
275
|
+
Ji & Li (2017), A breakdown-free BCG method. DOI 10.1007/s10543-016-0631-z
|
276
|
+
https://www.cs.odu.edu/~yaohang/portfolio/BIT2017.pdf
|
277
|
+
Shewchuck (1994), Introduction to CG method without the agonzing pain.
|
278
|
+
https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf
|
279
|
+
|
280
|
+
Args:
|
281
|
+
A: The matrix
|
282
|
+
preconditioner: Preconditioner to use
|
283
|
+
tol: Convergence tolerance
|
284
|
+
maxit: Maximum number of iterations
|
285
|
+
restart: Restart every Nth iteration
|
286
|
+
verbosity: Log level
|
287
|
+
"""
|
288
|
+
def __init__(self, A=None, preconditioner=Preconditioner(), tol=1e-7, maxit=10000, restart=50, verbosity=0):
|
289
|
+
self.preconditioner = preconditioner
|
290
|
+
self.A = A
|
291
|
+
self.tol = tol
|
292
|
+
self.maxit = maxit
|
293
|
+
self.restart = restart
|
294
|
+
self.verbosity = verbosity
|
295
|
+
super().__init__(A)
|
296
|
+
|
297
|
+
def update(self, A):
|
298
|
+
tstart = time.perf_counter()
|
299
|
+
self.A = A
|
300
|
+
self.preconditioner.update(A)
|
301
|
+
if self.verbosity >= 1:
|
302
|
+
print(f"Preconditioner set up in {np.round(time.perf_counter() - tstart,3)}s")
|
303
|
+
|
304
|
+
def solve(self, rhs, x0=None, trans='N'):
|
305
|
+
if trans == 'N':
|
306
|
+
A = self.A
|
307
|
+
elif trans == 'T':
|
308
|
+
A = self.A.T
|
309
|
+
elif trans == 'H':
|
310
|
+
A = self.A.conj().T
|
311
|
+
else:
|
312
|
+
raise TypeError("Only N, T, or H transposition is possible")
|
313
|
+
|
314
|
+
tstart = time.perf_counter()
|
315
|
+
if rhs.ndim == 1:
|
316
|
+
b = rhs.reshape((rhs.size, 1))
|
317
|
+
else:
|
318
|
+
b = rhs
|
319
|
+
x = np.zeros_like(rhs, dtype=np.result_type(rhs, A)) if x0 is None else x0.copy()
|
320
|
+
if x.ndim == 1:
|
321
|
+
x = x.reshape((x.size, 1))
|
322
|
+
|
323
|
+
r = b - A@x
|
324
|
+
z = self.preconditioner.solve(r, trans=trans)
|
325
|
+
p = orth(z, normalize=True)
|
326
|
+
if self.verbosity >= 2:
|
327
|
+
print(f"Initial residual = {np.linalg.norm(r, axis=0) / np.linalg.norm(b, axis=0)}")
|
328
|
+
|
329
|
+
for i in range(self.maxit):
|
330
|
+
q = A @ p
|
331
|
+
pq = p.conj().T @ q
|
332
|
+
pq_inv = np.linalg.inv(pq)
|
333
|
+
alpha = pq_inv @ (p.conj().T @ r)
|
334
|
+
|
335
|
+
x += p @ alpha
|
336
|
+
if i % 50 == 0: # Explicit restart
|
337
|
+
r = b - A@x
|
338
|
+
else:
|
339
|
+
r -= q @ alpha
|
340
|
+
|
341
|
+
if self.verbosity >= 2:
|
342
|
+
print(f"i = {i}, residuals = {np.linalg.norm(r, axis=0) / np.linalg.norm(b, axis=0)}")
|
343
|
+
|
344
|
+
tval = np.linalg.norm(r)/np.linalg.norm(b)
|
345
|
+
if tval <= self.tol:
|
346
|
+
break
|
347
|
+
|
348
|
+
z = self.preconditioner.solve(r, trans=trans)
|
349
|
+
|
350
|
+
beta = -pq_inv @ (q.conj().T @ z)
|
351
|
+
p = orth(z + p@beta, normalize=False)
|
352
|
+
|
353
|
+
if tval > self.tol:
|
354
|
+
warnings.warn(f'Maximum iterations ({self.maxit}) reached, with final residual {tval}')
|
355
|
+
elif self.verbosity >= 1:
|
356
|
+
print(f"Converged in {i} iterations and {np.round(time.perf_counter() - tstart, 3)}s, with final residuals {np.linalg.norm(r, axis=0) / np.linalg.norm(b, axis=0)}")
|
357
|
+
|
358
|
+
if rhs.ndim == 1:
|
359
|
+
return x.flatten()
|
360
|
+
else:
|
361
|
+
return x
|
@@ -0,0 +1,56 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import scipy.sparse as sps
|
3
|
+
try:
|
4
|
+
import cvxopt
|
5
|
+
_has_cvxopt = True
|
6
|
+
except ImportError:
|
7
|
+
_has_cvxopt = False
|
8
|
+
|
9
|
+
|
10
|
+
def is_cvxopt_spmatrix(A):
|
11
|
+
""" Checks if the argument is a cvxopt sparse matrix """
|
12
|
+
return isinstance(A, cvxopt.spmatrix) if _has_cvxopt else False
|
13
|
+
|
14
|
+
|
15
|
+
def matrix_is_complex(A):
|
16
|
+
""" Checks if the matrix is complex """
|
17
|
+
if is_cvxopt_spmatrix(A):
|
18
|
+
return A.typecode == 'z'
|
19
|
+
else:
|
20
|
+
return np.iscomplexobj(A)
|
21
|
+
|
22
|
+
|
23
|
+
def matrix_is_diagonal(A):
|
24
|
+
""" Checks if the matrix is diagonal"""
|
25
|
+
if sps.issparse(A):
|
26
|
+
if isinstance(A, sps.dia_matrix):
|
27
|
+
return len(A.offsets) == 1 and A.offsets[0] == 0
|
28
|
+
else:
|
29
|
+
return np.allclose((A - sps.spdiags(A.diagonal(), 0, *A.shape)).data, 0.0)
|
30
|
+
elif is_cvxopt_spmatrix(A):
|
31
|
+
return max(abs(A.I - A.J)) == 0
|
32
|
+
else:
|
33
|
+
return np.allclose(A, np.diag(np.diag(A)))
|
34
|
+
|
35
|
+
|
36
|
+
def matrix_is_symmetric(A):
|
37
|
+
""" Checks whether a matrix is numerically symmetric """
|
38
|
+
if sps.issparse(A):
|
39
|
+
return np.allclose((A-A.T).data, 0)
|
40
|
+
elif is_cvxopt_spmatrix(A):
|
41
|
+
return np.isclose(max(abs(A-A.T)), 0.0)
|
42
|
+
else:
|
43
|
+
return np.allclose(A, A.T)
|
44
|
+
|
45
|
+
|
46
|
+
def matrix_is_hermitian(A):
|
47
|
+
""" Checks whether a matrix is numerically Hermitian """
|
48
|
+
if matrix_is_complex(A):
|
49
|
+
if sps.issparse(A):
|
50
|
+
return np.allclose((A-A.T.conj()).data, 0)
|
51
|
+
elif is_cvxopt_spmatrix(A):
|
52
|
+
return np.isclose(max(abs(A-A.ctrans())), 0.0)
|
53
|
+
else:
|
54
|
+
return np.allclose(A, A.T.conj())
|
55
|
+
else:
|
56
|
+
return matrix_is_symmetric(A)
|