pygeoinf 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygeoinf/__init__.py +17 -4
- pygeoinf/backus_gilbert.py +120 -0
- pygeoinf/direct_sum.py +35 -77
- pygeoinf/forward_problem.py +39 -15
- pygeoinf/gaussian_measure.py +35 -10
- pygeoinf/hilbert_space.py +55 -9
- pygeoinf/inversion.py +101 -9
- pygeoinf/linear_bayesian.py +3 -3
- pygeoinf/linear_forms.py +137 -43
- pygeoinf/{operators.py → linear_operators.py} +279 -303
- pygeoinf/linear_optimisation.py +9 -9
- pygeoinf/linear_solvers.py +74 -7
- pygeoinf/nonlinear_forms.py +226 -0
- pygeoinf/nonlinear_operators.py +216 -0
- pygeoinf/nonlinear_optimisation.py +211 -0
- pygeoinf/parallel.py +1 -1
- pygeoinf/random_matrix.py +212 -72
- pygeoinf/symmetric_space/circle.py +12 -2
- pygeoinf/symmetric_space/sphere.py +15 -1
- pygeoinf/symmetric_space/symmetric_space.py +1 -1
- {pygeoinf-1.2.0.dist-info → pygeoinf-1.2.2.dist-info}/METADATA +1 -1
- pygeoinf-1.2.2.dist-info/RECORD +25 -0
- pygeoinf-1.2.0.dist-info/RECORD +0 -21
- {pygeoinf-1.2.0.dist-info → pygeoinf-1.2.2.dist-info}/LICENSE +0 -0
- {pygeoinf-1.2.0.dist-info → pygeoinf-1.2.2.dist-info}/WHEEL +0 -0
pygeoinf/linear_optimisation.py
CHANGED
|
@@ -21,17 +21,17 @@ Key Classes
|
|
|
21
21
|
from __future__ import annotations
|
|
22
22
|
from typing import Optional, Union
|
|
23
23
|
|
|
24
|
-
from .
|
|
25
|
-
from .inversion import
|
|
24
|
+
from .nonlinear_operators import NonLinearOperator
|
|
25
|
+
from .inversion import LinearInversion
|
|
26
26
|
|
|
27
27
|
|
|
28
28
|
from .forward_problem import LinearForwardProblem
|
|
29
|
-
from .
|
|
29
|
+
from .linear_operators import LinearOperator
|
|
30
30
|
from .linear_solvers import LinearSolver, IterativeLinearSolver
|
|
31
31
|
from .hilbert_space import Vector
|
|
32
32
|
|
|
33
33
|
|
|
34
|
-
class LinearLeastSquaresInversion(
|
|
34
|
+
class LinearLeastSquaresInversion(LinearInversion):
|
|
35
35
|
"""
|
|
36
36
|
Solves a linear inverse problem using Tikhonov-regularized least-squares.
|
|
37
37
|
|
|
@@ -89,7 +89,7 @@ class LinearLeastSquaresInversion(Inversion):
|
|
|
89
89
|
/,
|
|
90
90
|
*,
|
|
91
91
|
preconditioner: Optional[LinearOperator] = None,
|
|
92
|
-
) -> Union[
|
|
92
|
+
) -> Union[NonLinearOperator, LinearOperator]:
|
|
93
93
|
"""
|
|
94
94
|
Returns an operator that maps data to the least-squares solution.
|
|
95
95
|
|
|
@@ -131,13 +131,13 @@ class LinearLeastSquaresInversion(Inversion):
|
|
|
131
131
|
@ inverse_data_covariance
|
|
132
132
|
)(shifted_data)
|
|
133
133
|
|
|
134
|
-
return
|
|
134
|
+
return NonLinearOperator(self.data_space, self.model_space, mapping)
|
|
135
135
|
|
|
136
136
|
else:
|
|
137
137
|
return inverse_normal_operator @ forward_operator.adjoint
|
|
138
138
|
|
|
139
139
|
|
|
140
|
-
class LinearMinimumNormInversion(
|
|
140
|
+
class LinearMinimumNormInversion(LinearInversion):
|
|
141
141
|
"""
|
|
142
142
|
Finds a regularized solution using the discrepancy principle.
|
|
143
143
|
|
|
@@ -168,7 +168,7 @@ class LinearMinimumNormInversion(Inversion):
|
|
|
168
168
|
maxiter: int = 100,
|
|
169
169
|
rtol: float = 1.0e-6,
|
|
170
170
|
atol: float = 0.0,
|
|
171
|
-
) -> Union[
|
|
171
|
+
) -> Union[NonLinearOperator, LinearOperator]:
|
|
172
172
|
"""
|
|
173
173
|
Returns an operator that maps data to the minimum-norm solution.
|
|
174
174
|
|
|
@@ -262,7 +262,7 @@ class LinearMinimumNormInversion(Inversion):
|
|
|
262
262
|
|
|
263
263
|
raise RuntimeError("Bracketing search failed to converge.")
|
|
264
264
|
|
|
265
|
-
return
|
|
265
|
+
return NonLinearOperator(self.data_space, self.model_space, mapping)
|
|
266
266
|
|
|
267
267
|
else:
|
|
268
268
|
# For error-free data, compute the minimum-norm solution via A*(A*A)^-1
|
pygeoinf/linear_solvers.py
CHANGED
|
@@ -24,15 +24,10 @@ from typing import Callable, Optional, Dict, Any
|
|
|
24
24
|
|
|
25
25
|
import numpy as np
|
|
26
26
|
from scipy.sparse.linalg import LinearOperator as ScipyLinOp
|
|
27
|
-
from scipy.linalg import
|
|
28
|
-
cho_factor,
|
|
29
|
-
cho_solve,
|
|
30
|
-
lu_factor,
|
|
31
|
-
lu_solve,
|
|
32
|
-
)
|
|
27
|
+
from scipy.linalg import cho_factor, cho_solve, lu_factor, lu_solve, eigh
|
|
33
28
|
from scipy.sparse.linalg import gmres, bicgstab, cg, bicg
|
|
34
29
|
|
|
35
|
-
from .
|
|
30
|
+
from .linear_operators import LinearOperator
|
|
36
31
|
from .hilbert_space import Vector
|
|
37
32
|
|
|
38
33
|
|
|
@@ -167,6 +162,78 @@ class CholeskySolver(DirectLinearSolver):
|
|
|
167
162
|
)
|
|
168
163
|
|
|
169
164
|
|
|
165
|
+
class EigenSolver(DirectLinearSolver):
|
|
166
|
+
"""
|
|
167
|
+
A direct linear solver based on the eigendecomposition of a symmetric operator.
|
|
168
|
+
|
|
169
|
+
This solver is robust for symmetric operators that may be singular or
|
|
170
|
+
numerically ill-conditioned. In such cases, it computes a pseudo-inverse by
|
|
171
|
+
regularizing the eigenvalues, treating those close to zero (relative to the largest
|
|
172
|
+
eigenvalue) as exactly zero.
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
def __init__(
|
|
176
|
+
self,
|
|
177
|
+
/,
|
|
178
|
+
*,
|
|
179
|
+
galerkin: bool = False,
|
|
180
|
+
parallel: bool = False,
|
|
181
|
+
n_jobs: int = -1,
|
|
182
|
+
rtol: float = 1e-12,
|
|
183
|
+
) -> None:
|
|
184
|
+
"""
|
|
185
|
+
Args:
|
|
186
|
+
galerkin (bool): If True, the Galerkin matrix representation is used.
|
|
187
|
+
parallel (bool): If True, parallel computation is used.
|
|
188
|
+
n_jobs (int): Number of parallel jobs.
|
|
189
|
+
rtol (float): Relative tolerance for treating eigenvalues as zero.
|
|
190
|
+
An eigenvalue `s` is treated as zero if
|
|
191
|
+
`abs(s) < rtol * max(abs(eigenvalues))`.
|
|
192
|
+
"""
|
|
193
|
+
super().__init__(galerkin=galerkin, parallel=parallel, n_jobs=n_jobs)
|
|
194
|
+
self._rtol = rtol
|
|
195
|
+
|
|
196
|
+
def __call__(self, operator: LinearOperator) -> LinearOperator:
|
|
197
|
+
"""
|
|
198
|
+
Computes the pseudo-inverse of a self-adjoint LinearOperator.
|
|
199
|
+
"""
|
|
200
|
+
assert operator.is_automorphism
|
|
201
|
+
|
|
202
|
+
matrix = operator.matrix(
|
|
203
|
+
dense=True,
|
|
204
|
+
galerkin=self._galerkin,
|
|
205
|
+
parallel=self._parallel,
|
|
206
|
+
n_jobs=self._n_jobs,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
eigenvalues, eigenvectors = eigh(matrix)
|
|
210
|
+
|
|
211
|
+
max_abs_eigenvalue = np.max(np.abs(eigenvalues))
|
|
212
|
+
if max_abs_eigenvalue > 0:
|
|
213
|
+
threshold = self._rtol * max_abs_eigenvalue
|
|
214
|
+
else:
|
|
215
|
+
threshold = 0
|
|
216
|
+
|
|
217
|
+
inv_eigenvalues = np.where(
|
|
218
|
+
np.abs(eigenvalues) > threshold,
|
|
219
|
+
np.reciprocal(eigenvalues),
|
|
220
|
+
0.0,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
def matvec(cy: np.ndarray) -> np.ndarray:
|
|
224
|
+
z = eigenvectors.T @ cy
|
|
225
|
+
w = inv_eigenvalues * z
|
|
226
|
+
return eigenvectors @ w
|
|
227
|
+
|
|
228
|
+
inverse_matrix = ScipyLinOp(
|
|
229
|
+
(operator.domain.dim, operator.codomain.dim), matvec=matvec, rmatvec=matvec
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
return LinearOperator.from_matrix(
|
|
233
|
+
operator.domain, operator.domain, inverse_matrix, galerkin=self._galerkin
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
|
|
170
237
|
class IterativeLinearSolver(LinearSolver):
|
|
171
238
|
"""
|
|
172
239
|
An abstract base class for iterative linear solvers.
|
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Provides the `NonLinearForm` base class to represent non-linear functionals.
|
|
3
|
+
|
|
4
|
+
A non-linear form, or functional, is a mapping from a vector in a Hilbert
|
|
5
|
+
space to a scalar. This class provides a foundational structure for these
|
|
6
|
+
functionals, equipping them with algebraic operations and an interface for
|
|
7
|
+
derivatives like gradients and Hessians.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
from typing import Callable, Optional, Any, TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# This block only runs for type checkers, not at runtime
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from .hilbert_space import HilbertSpace, Vector
|
|
17
|
+
from .linear_forms import LinearForm
|
|
18
|
+
from .linear_operators import LinearOperator
|
|
19
|
+
from .nonlinear_operators import NonLinearOperator
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class NonLinearForm:
|
|
23
|
+
"""
|
|
24
|
+
Represents a general non-linear functional that maps vectors to scalars.
|
|
25
|
+
|
|
26
|
+
This class serves as the foundation for all forms. It defines the basic
|
|
27
|
+
callable interface `form(x)` and overloads arithmetic operators (`+`, `-`, `*`)
|
|
28
|
+
to create new forms. It also provides an optional framework for specifying
|
|
29
|
+
a form's gradient and Hessian.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
domain: HilbertSpace,
|
|
35
|
+
mapping: Callable[[Vector], float],
|
|
36
|
+
/,
|
|
37
|
+
*,
|
|
38
|
+
gradient: Optional[Callable[[Vector], Vector]] = None,
|
|
39
|
+
hessian: Optional[Callable[[Vector], LinearOperator]] = None,
|
|
40
|
+
) -> None:
|
|
41
|
+
"""
|
|
42
|
+
Initializes the NonLinearForm.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
domain: The Hilbert space on which the form is defined.
|
|
46
|
+
mapping: The function `f(x)` that defines the action of the form.
|
|
47
|
+
gradient: An optional function that computes the gradient of the form.
|
|
48
|
+
hessian: An optional function that computes the Hessian of the form.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
self._domain: HilbertSpace = domain
|
|
52
|
+
self._mapping = mapping
|
|
53
|
+
self._gradient = gradient
|
|
54
|
+
self._hessian = hessian
|
|
55
|
+
|
|
56
|
+
@property
|
|
57
|
+
def domain(self) -> HilbertSpace:
|
|
58
|
+
"""The Hilbert space on which the form is defined."""
|
|
59
|
+
return self._domain
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def has_gradient(self) -> bool:
|
|
63
|
+
"""True if the form has a gradient."""
|
|
64
|
+
return self._gradient is not None
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
def has_hessian(self) -> bool:
|
|
68
|
+
"""True if the form has a Hessian."""
|
|
69
|
+
return self._hessian is not None
|
|
70
|
+
|
|
71
|
+
def __call__(self, x: Any) -> float:
|
|
72
|
+
"""Applies the linear form to a vector."""
|
|
73
|
+
return self._mapping(x)
|
|
74
|
+
|
|
75
|
+
def gradient(self, x: Any) -> Vector:
|
|
76
|
+
"""
|
|
77
|
+
Computes the gradient of the form at a given point.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
x: The vector at which to evaluate the gradient.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
The gradient of the form as a vector in the domain space.
|
|
84
|
+
|
|
85
|
+
Raises:
|
|
86
|
+
NotImplementedError: If a gradient function was not provided
|
|
87
|
+
during initialization.
|
|
88
|
+
"""
|
|
89
|
+
if self._gradient is None:
|
|
90
|
+
raise NotImplementedError("Gradient not implemented for this form.")
|
|
91
|
+
return self._gradient(x)
|
|
92
|
+
|
|
93
|
+
def derivative(self, x: Vector) -> LinearForm:
|
|
94
|
+
"""
|
|
95
|
+
Computes the derivative of the form at a given point.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
x: The vector at which to evaluate the derivative.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
The derivative of the form as a `LinearForm`.
|
|
102
|
+
|
|
103
|
+
Raises:
|
|
104
|
+
NotImplementedError: If a gradient function was not provided
|
|
105
|
+
during initialization.
|
|
106
|
+
"""
|
|
107
|
+
return self.domain.to_dual(self.gradient(x))
|
|
108
|
+
|
|
109
|
+
def hessian(self, x: Any) -> LinearOperator:
|
|
110
|
+
"""
|
|
111
|
+
Computes the Hessian of the form at a given point.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
x: The vector at which to evaluate the Hessian.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
The Hessian of the form as a LinearOperator mapping the domain to itself.
|
|
118
|
+
|
|
119
|
+
Raises:
|
|
120
|
+
NotImplementedError: If a Hessian function was not provided
|
|
121
|
+
during initialization.
|
|
122
|
+
"""
|
|
123
|
+
if self._hessian is None:
|
|
124
|
+
raise NotImplementedError("Hessian not implemented for this form.")
|
|
125
|
+
return self._hessian(x)
|
|
126
|
+
|
|
127
|
+
def __neg__(self) -> NonLinearForm:
|
|
128
|
+
"""Returns the additive inverse of the form."""
|
|
129
|
+
|
|
130
|
+
if self._gradient is None:
|
|
131
|
+
gradient = None
|
|
132
|
+
else:
|
|
133
|
+
|
|
134
|
+
def gradient(x: Vector) -> Vector:
|
|
135
|
+
return self.domain.negative(self.gradient(x))
|
|
136
|
+
|
|
137
|
+
if self._hessian is None:
|
|
138
|
+
hessian = None
|
|
139
|
+
else:
|
|
140
|
+
|
|
141
|
+
def hessian(x: Vector) -> LinearOperator:
|
|
142
|
+
return -self.hessian(x)
|
|
143
|
+
|
|
144
|
+
return NonLinearForm(
|
|
145
|
+
self.domain, lambda x: -self(x), gradient=gradient, hessian=hessian
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
def __mul__(self, a: float) -> NonLinearForm:
|
|
149
|
+
"""Returns the product of the form and a scalar."""
|
|
150
|
+
|
|
151
|
+
if self._gradient is None:
|
|
152
|
+
gradient = None
|
|
153
|
+
else:
|
|
154
|
+
|
|
155
|
+
def gradient(x: Vector) -> Vector:
|
|
156
|
+
return self.domain.multiply(a, self.gradient(x))
|
|
157
|
+
|
|
158
|
+
if self._hessian is None:
|
|
159
|
+
hessian = None
|
|
160
|
+
else:
|
|
161
|
+
|
|
162
|
+
def hessian(x: Vector) -> LinearOperator:
|
|
163
|
+
return a * self.hessian(x)
|
|
164
|
+
|
|
165
|
+
return NonLinearForm(
|
|
166
|
+
self.domain,
|
|
167
|
+
lambda x: a * self(x),
|
|
168
|
+
gradient=gradient,
|
|
169
|
+
hessian=hessian,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
def __rmul__(self, a: float) -> NonLinearForm:
|
|
173
|
+
"""Returns the product of the form and a scalar."""
|
|
174
|
+
return self * a
|
|
175
|
+
|
|
176
|
+
def __truediv__(self, a: float) -> NonLinearForm:
|
|
177
|
+
"""Returns the division of the form by a scalar."""
|
|
178
|
+
return self * (1.0 / a)
|
|
179
|
+
|
|
180
|
+
def __add__(self, other: NonLinearForm) -> NonLinearForm:
|
|
181
|
+
"""Returns the sum of this form and another."""
|
|
182
|
+
|
|
183
|
+
if self._gradient is None or other._gradient is None:
|
|
184
|
+
gradient = None
|
|
185
|
+
else:
|
|
186
|
+
|
|
187
|
+
def gradient(x: Vector) -> Vector:
|
|
188
|
+
return self.domain.add(self.gradient(x), other.gradient(x))
|
|
189
|
+
|
|
190
|
+
if self._hessian is None or other._hessian is None:
|
|
191
|
+
hessian = None
|
|
192
|
+
else:
|
|
193
|
+
|
|
194
|
+
def hessian(x: Vector) -> LinearOperator:
|
|
195
|
+
return self.hessian(x) + other.hessian(x)
|
|
196
|
+
|
|
197
|
+
return NonLinearForm(
|
|
198
|
+
self.domain,
|
|
199
|
+
lambda x: self(x) + other(x),
|
|
200
|
+
gradient=gradient,
|
|
201
|
+
hessian=hessian,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
def __sub__(self, other: NonLinearForm) -> NonLinearForm:
|
|
205
|
+
"""Returns the difference between this form and another."""
|
|
206
|
+
|
|
207
|
+
if self._gradient is None or other._gradient is None:
|
|
208
|
+
gradient = None
|
|
209
|
+
else:
|
|
210
|
+
|
|
211
|
+
def gradient(x: Vector) -> Vector:
|
|
212
|
+
return self.domain.subtract(self.gradient(x), other.gradient(x))
|
|
213
|
+
|
|
214
|
+
if self._hessian is None or other._hessian is None:
|
|
215
|
+
hessian = None
|
|
216
|
+
else:
|
|
217
|
+
|
|
218
|
+
def hessian(x: Vector) -> LinearOperator:
|
|
219
|
+
return self.hessian(x) - other.hessian(x)
|
|
220
|
+
|
|
221
|
+
return NonLinearForm(
|
|
222
|
+
self.domain,
|
|
223
|
+
lambda x: self(x) - other(x),
|
|
224
|
+
gradient=gradient,
|
|
225
|
+
hessian=hessian,
|
|
226
|
+
)
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Provides the `NonLinearOperator` base class for mappings between Hilbert spaces.
|
|
3
|
+
|
|
4
|
+
A non-linear operator is a general mapping `F(x)` from a vector `x` in a
|
|
5
|
+
domain Hilbert space to a vector `y` in a codomain Hilbert space. This class
|
|
6
|
+
provides a foundational structure for these mappings, equipping them with
|
|
7
|
+
algebraic operations and an interface for the Frécher derivative.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
from typing import Callable, Optional, Any, TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# This block only runs for type checkers, not at runtime
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from .hilbert_space import HilbertSpace, EuclideanSpace, Vector
|
|
17
|
+
from .linear_operators import LinearOperator
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class NonLinearOperator:
|
|
21
|
+
"""
|
|
22
|
+
Represents a general non-linear operator that maps vectors to vectors.
|
|
23
|
+
|
|
24
|
+
This class provides a functional representation for an operator `F(x)`,
|
|
25
|
+
and includes an interface for its Fréchet derivative, F'(x), which is the
|
|
26
|
+
linear operator that best approximates F at a given point x. It serves
|
|
27
|
+
as the base class for the more specialized `LinearOperator`.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
domain: HilbertSpace,
|
|
33
|
+
codomain: HilbertSpace,
|
|
34
|
+
mapping: Callable[[Vector], Any],
|
|
35
|
+
/,
|
|
36
|
+
*,
|
|
37
|
+
derivative: Callable[[Vector], LinearOperator] = None,
|
|
38
|
+
) -> None:
|
|
39
|
+
"""Initializes the NonLinearOperator.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
domain: The Hilbert space from which the operator maps.
|
|
43
|
+
codomain: The Hilbert space to which the operator maps.
|
|
44
|
+
mapping: The function `F(x)` that defines the mapping.
|
|
45
|
+
derivative: An optional function that takes a vector `x` and
|
|
46
|
+
returns the Fréchet derivative (a `LinearOperator`) at
|
|
47
|
+
that point.
|
|
48
|
+
"""
|
|
49
|
+
self._domain: HilbertSpace = domain
|
|
50
|
+
self._codomain: HilbertSpace = codomain
|
|
51
|
+
self._mapping: Callable[[Any], Any] = mapping
|
|
52
|
+
self._derivative: Callable[[Any], LinearOperator] = derivative
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def domain(self) -> HilbertSpace:
|
|
56
|
+
"""The domain of the operator."""
|
|
57
|
+
return self._domain
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def codomain(self) -> HilbertSpace:
|
|
61
|
+
"""The codomain of the operator."""
|
|
62
|
+
return self._codomain
|
|
63
|
+
|
|
64
|
+
@property
|
|
65
|
+
def is_automorphism(self) -> bool:
|
|
66
|
+
"""True if the operator maps a space into itself."""
|
|
67
|
+
return self.domain == self.codomain
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def is_square(self) -> bool:
|
|
71
|
+
"""True if the operator's domain and codomain have the same dimension."""
|
|
72
|
+
return self.domain.dim == self.codomain.dim
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def has_derivative(self) -> bool:
|
|
76
|
+
"""
|
|
77
|
+
Returns true if the operators derivative is implemented.
|
|
78
|
+
"""
|
|
79
|
+
return self._derivative is not None
|
|
80
|
+
|
|
81
|
+
def __call__(self, x: Any) -> Any:
|
|
82
|
+
"""Applies the operator's mapping to a vector."""
|
|
83
|
+
return self._mapping(x)
|
|
84
|
+
|
|
85
|
+
def derivative(self, x: Vector) -> LinearOperator:
|
|
86
|
+
"""Computes the Fréchet derivative of the operator at a given point.
|
|
87
|
+
|
|
88
|
+
The Fréchet derivative is the linear operator that best approximates
|
|
89
|
+
the non-linear operator in the neighborhood of the point `x`.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
x: The point at which to compute the derivative.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
The derivative as a `LinearOperator`.
|
|
96
|
+
|
|
97
|
+
Raises:
|
|
98
|
+
NotImplementedError: If a derivative function was not provided.
|
|
99
|
+
"""
|
|
100
|
+
if self._derivative is None:
|
|
101
|
+
raise NotImplementedError("Derivative not implemented")
|
|
102
|
+
return self._derivative(x)
|
|
103
|
+
|
|
104
|
+
def __neg__(self) -> NonLinearOperator:
|
|
105
|
+
domain = self.domain
|
|
106
|
+
codomain = self.codomain
|
|
107
|
+
|
|
108
|
+
def mapping(x: Any) -> Any:
|
|
109
|
+
return codomain.negative(self(x))
|
|
110
|
+
|
|
111
|
+
if self._derivative is not None:
|
|
112
|
+
|
|
113
|
+
def derivative(x: Vector) -> LinearOperator:
|
|
114
|
+
return -self.derivative(x)
|
|
115
|
+
|
|
116
|
+
else:
|
|
117
|
+
derivative = None
|
|
118
|
+
|
|
119
|
+
return NonLinearOperator(domain, codomain, mapping, derivative=derivative)
|
|
120
|
+
|
|
121
|
+
def __mul__(self, a: float) -> NonLinearOperator:
|
|
122
|
+
domain = self.domain
|
|
123
|
+
codomain = self.codomain
|
|
124
|
+
|
|
125
|
+
def mapping(x: Any) -> Any:
|
|
126
|
+
return codomain.multiply(a, self(x))
|
|
127
|
+
|
|
128
|
+
if self._derivative is not None:
|
|
129
|
+
|
|
130
|
+
def derivative(x: Vector) -> LinearOperator:
|
|
131
|
+
return a * self.derivative(x)
|
|
132
|
+
|
|
133
|
+
else:
|
|
134
|
+
derivative = None
|
|
135
|
+
|
|
136
|
+
return NonLinearOperator(domain, codomain, mapping, derivative=derivative)
|
|
137
|
+
|
|
138
|
+
def __rmul__(self, a: float) -> NonLinearOperator:
|
|
139
|
+
return self * a
|
|
140
|
+
|
|
141
|
+
def __truediv__(self, a: float) -> NonLinearOperator:
|
|
142
|
+
return self * (1.0 / a)
|
|
143
|
+
|
|
144
|
+
def __add__(self, other: NonLinearOperator) -> NonLinearOperator:
|
|
145
|
+
|
|
146
|
+
if not isinstance(other, NonLinearOperator):
|
|
147
|
+
raise TypeError("Operand must be a NonLinearOperator")
|
|
148
|
+
|
|
149
|
+
domain = self.domain
|
|
150
|
+
codomain = self.codomain
|
|
151
|
+
|
|
152
|
+
def mapping(x: Any) -> Any:
|
|
153
|
+
return codomain.add(self(x), other(x))
|
|
154
|
+
|
|
155
|
+
if self._derivative is not None and other._derivative is not None:
|
|
156
|
+
|
|
157
|
+
def derivative(x: Vector) -> LinearOperator:
|
|
158
|
+
return self.derivative(x) + other.derivative(x)
|
|
159
|
+
|
|
160
|
+
else:
|
|
161
|
+
derivative = None
|
|
162
|
+
|
|
163
|
+
return NonLinearOperator(domain, codomain, mapping, derivative=derivative)
|
|
164
|
+
|
|
165
|
+
def __sub__(self, other: NonLinearOperator) -> NonLinearOperator:
|
|
166
|
+
|
|
167
|
+
if not isinstance(other, NonLinearOperator):
|
|
168
|
+
raise TypeError("Operand must be a NonLinearOperator")
|
|
169
|
+
|
|
170
|
+
domain = self.domain
|
|
171
|
+
codomain = self.codomain
|
|
172
|
+
|
|
173
|
+
def mapping(x: Any) -> Any:
|
|
174
|
+
return codomain.subtract(self(x), other(x))
|
|
175
|
+
|
|
176
|
+
if self._derivative is not None and other._derivative is not None:
|
|
177
|
+
|
|
178
|
+
def derivative(x: Vector) -> LinearOperator:
|
|
179
|
+
return self.derivative(x) - other.derivative(x)
|
|
180
|
+
|
|
181
|
+
else:
|
|
182
|
+
derivative = None
|
|
183
|
+
|
|
184
|
+
return NonLinearOperator(domain, codomain, mapping, derivative=derivative)
|
|
185
|
+
|
|
186
|
+
def __matmul__(self, other: NonLinearOperator) -> NonLinearOperator:
|
|
187
|
+
"""Composes this operator with another: `(self @ other)(x) = self(other(x))`.
|
|
188
|
+
|
|
189
|
+
The derivative of the composed operator is computed using the chain rule:
|
|
190
|
+
`(F o G)'(x) = F'(G(x)) @ G'(x)`.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
other: The operator to apply before this one.
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
A new `NonLinearOperator` representing the composition.
|
|
197
|
+
"""
|
|
198
|
+
|
|
199
|
+
if not isinstance(other, NonLinearOperator):
|
|
200
|
+
raise TypeError("Operand must be a NonLinearOperator")
|
|
201
|
+
|
|
202
|
+
domain = other.domain
|
|
203
|
+
codomain = self.codomain
|
|
204
|
+
|
|
205
|
+
def mapping(x: Any) -> Any:
|
|
206
|
+
return self(other(x))
|
|
207
|
+
|
|
208
|
+
if self._derivative is not None and other._derivative is not None:
|
|
209
|
+
|
|
210
|
+
def derivative(x: Vector) -> LinearOperator:
|
|
211
|
+
return self.derivative(other(x)) @ other.derivative(x)
|
|
212
|
+
|
|
213
|
+
else:
|
|
214
|
+
derivative = None
|
|
215
|
+
|
|
216
|
+
return NonLinearOperator(domain, codomain, mapping, derivative=derivative)
|