pygeoinf 1.0.9__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygeoinf/__init__.py +5 -1
- pygeoinf/direct_sum.py +101 -75
- pygeoinf/forward_problem.py +33 -32
- pygeoinf/gaussian_measure.py +97 -71
- pygeoinf/hilbert_space.py +517 -241
- pygeoinf/inversion.py +16 -4
- pygeoinf/linear_bayesian.py +57 -36
- pygeoinf/linear_forms.py +169 -0
- pygeoinf/linear_optimisation.py +34 -23
- pygeoinf/linear_solvers.py +74 -247
- pygeoinf/operators.py +175 -36
- pygeoinf/random_matrix.py +36 -32
- pygeoinf/symmetric_space/circle.py +347 -202
- pygeoinf/symmetric_space/sphere.py +335 -448
- pygeoinf/symmetric_space/symmetric_space.py +330 -142
- {pygeoinf-1.0.9.dist-info → pygeoinf-1.1.1.dist-info}/METADATA +11 -9
- pygeoinf-1.1.1.dist-info/RECORD +20 -0
- pygeoinf/forms.py +0 -128
- pygeoinf/symmetric_space/line.py +0 -384
- pygeoinf-1.0.9.dist-info/RECORD +0 -21
- {pygeoinf-1.0.9.dist-info → pygeoinf-1.1.1.dist-info}/LICENSE +0 -0
- {pygeoinf-1.0.9.dist-info → pygeoinf-1.1.1.dist-info}/WHEEL +0 -0
pygeoinf/inversion.py
CHANGED
|
@@ -1,5 +1,14 @@
|
|
|
1
1
|
"""
|
|
2
|
-
|
|
2
|
+
Provides the abstract base class for all inversion algorithms.
|
|
3
|
+
|
|
4
|
+
This module defines the `Inversion` class, which serves as a common
|
|
5
|
+
foundation for various methods that solve an inverse problem. Its primary role
|
|
6
|
+
is to maintain a reference to the `ForwardProblem` being solved, providing a
|
|
7
|
+
consistent interface and convenient access to the problem's core components like
|
|
8
|
+
the model space and data space.
|
|
9
|
+
|
|
10
|
+
It also includes helper methods to assert preconditions required by different
|
|
11
|
+
inversion techniques, such as the existence of a data error measure.
|
|
3
12
|
"""
|
|
4
13
|
|
|
5
14
|
from __future__ import annotations
|
|
@@ -13,8 +22,10 @@ class Inversion:
|
|
|
13
22
|
An abstract base class for inversion methods.
|
|
14
23
|
|
|
15
24
|
This class provides a common structure for different inversion algorithms
|
|
16
|
-
(e.g., Bayesian, Least
|
|
17
|
-
forward problem being solved and provide convenient access to its
|
|
25
|
+
(e.g., Bayesian, Least Squares). Its main purpose is to hold a reference
|
|
26
|
+
to the forward problem being solved and provide convenient access to its
|
|
27
|
+
properties. Subclasses should inherit from this class to implement a
|
|
28
|
+
specific inversion technique.
|
|
18
29
|
"""
|
|
19
30
|
|
|
20
31
|
def __init__(self, forward_problem: "LinearForwardProblem", /) -> None:
|
|
@@ -60,7 +71,8 @@ class Inversion:
|
|
|
60
71
|
"""
|
|
61
72
|
Checks if the data error measure has an inverse covariance.
|
|
62
73
|
|
|
63
|
-
This is a precondition for methods that require the data precision
|
|
74
|
+
This is a precondition for methods that require the data precision
|
|
75
|
+
matrix (the inverse of the data error covariance).
|
|
64
76
|
|
|
65
77
|
Raises:
|
|
66
78
|
AttributeError: If no data error measure is set, or if the measure
|
pygeoinf/linear_bayesian.py
CHANGED
|
@@ -1,5 +1,23 @@
|
|
|
1
1
|
"""
|
|
2
|
-
|
|
2
|
+
Implements the Bayesian framework for solving linear inverse problems.
|
|
3
|
+
|
|
4
|
+
This module treats the inverse problem from a statistical perspective, aiming to
|
|
5
|
+
determine the full posterior probability distribution of the unknown model
|
|
6
|
+
parameters, rather than a single best-fit solution.
|
|
7
|
+
|
|
8
|
+
It assumes that the prior knowledge about the model and the statistics of the
|
|
9
|
+
data errors can be described by Gaussian measures. For a linear forward problem,
|
|
10
|
+
the resulting posterior distribution for the model is also Gaussian, allowing
|
|
11
|
+
for an analytical solution.
|
|
12
|
+
|
|
13
|
+
Key Classes
|
|
14
|
+
-----------
|
|
15
|
+
- `LinearBayesianInversion`: Computes the posterior Gaussian measure `p(u|d)`
|
|
16
|
+
for the model `u` given observed data `d`. This provides not only a mean
|
|
17
|
+
estimate for the model but also its uncertainty (covariance).
|
|
18
|
+
- `LinearBayesianInference`: Extends the framework to compute the posterior
|
|
19
|
+
distribution for a derived property of the model, `p(B(u)|d)`, where `B` is
|
|
20
|
+
some linear operator.
|
|
3
21
|
"""
|
|
4
22
|
|
|
5
23
|
from __future__ import annotations
|
|
@@ -12,22 +30,23 @@ from .gaussian_measure import GaussianMeasure
|
|
|
12
30
|
from .forward_problem import LinearForwardProblem
|
|
13
31
|
from .operators import LinearOperator
|
|
14
32
|
from .linear_solvers import LinearSolver, IterativeLinearSolver
|
|
15
|
-
from .hilbert_space import HilbertSpace,
|
|
33
|
+
from .hilbert_space import HilbertSpace, Vector
|
|
16
34
|
|
|
17
35
|
|
|
18
36
|
class LinearBayesianInversion(Inversion):
|
|
19
37
|
"""
|
|
20
38
|
Solves a linear inverse problem using Bayesian methods.
|
|
21
39
|
|
|
22
|
-
This class applies to problems of the form `d = A(u) + e`, where the
|
|
23
|
-
`u` and the error `e` are described
|
|
24
|
-
|
|
40
|
+
This class applies to problems of the form `d = A(u) + e`, where the prior
|
|
41
|
+
knowledge of the model `u` and the statistics of the error `e` are described
|
|
42
|
+
by Gaussian distributions. It computes the full posterior probability
|
|
43
|
+
distribution `p(u|d)` for the model parameters given an observation `d`.
|
|
25
44
|
"""
|
|
26
45
|
|
|
27
46
|
def __init__(
|
|
28
47
|
self,
|
|
29
|
-
forward_problem:
|
|
30
|
-
model_prior_measure:
|
|
48
|
+
forward_problem: LinearForwardProblem,
|
|
49
|
+
model_prior_measure: GaussianMeasure,
|
|
31
50
|
/,
|
|
32
51
|
) -> None:
|
|
33
52
|
"""
|
|
@@ -36,21 +55,22 @@ class LinearBayesianInversion(Inversion):
|
|
|
36
55
|
model_prior_measure: The prior Gaussian measure on the model space.
|
|
37
56
|
"""
|
|
38
57
|
super().__init__(forward_problem)
|
|
39
|
-
self._model_prior_measure:
|
|
58
|
+
self._model_prior_measure: GaussianMeasure = model_prior_measure
|
|
40
59
|
|
|
41
60
|
@property
|
|
42
|
-
def model_prior_measure(self) ->
|
|
61
|
+
def model_prior_measure(self) -> GaussianMeasure:
|
|
43
62
|
"""The prior Gaussian measure on the model space."""
|
|
44
63
|
return self._model_prior_measure
|
|
45
64
|
|
|
46
65
|
@property
|
|
47
|
-
def normal_operator(self) ->
|
|
66
|
+
def normal_operator(self) -> LinearOperator:
|
|
48
67
|
"""
|
|
49
|
-
Returns the covariance of the prior predictive distribution
|
|
68
|
+
Returns the covariance of the prior predictive distribution, `p(d)`.
|
|
50
69
|
|
|
51
|
-
This operator, `C_d = A
|
|
70
|
+
This operator, `C_d = A @ C_u @ A* + C_e`, represents the total
|
|
52
71
|
expected covariance in the data space before any data is observed.
|
|
53
|
-
|
|
72
|
+
Its inverse is central to calculating the posterior distribution and is
|
|
73
|
+
often referred to as the Bayesian normal operator.
|
|
54
74
|
"""
|
|
55
75
|
forward_operator = self.forward_problem.forward_operator
|
|
56
76
|
prior_model_covariance = self.model_prior_measure.covariance
|
|
@@ -63,7 +83,7 @@ class LinearBayesianInversion(Inversion):
|
|
|
63
83
|
else:
|
|
64
84
|
return forward_operator @ prior_model_covariance @ forward_operator.adjoint
|
|
65
85
|
|
|
66
|
-
def data_prior_measure(self) ->
|
|
86
|
+
def data_prior_measure(self) -> GaussianMeasure:
|
|
67
87
|
"""
|
|
68
88
|
Returns the prior predictive distribution on the data, `p(d)`.
|
|
69
89
|
|
|
@@ -87,17 +107,18 @@ class LinearBayesianInversion(Inversion):
|
|
|
87
107
|
|
|
88
108
|
def model_posterior_measure(
|
|
89
109
|
self,
|
|
90
|
-
data:
|
|
91
|
-
solver:
|
|
110
|
+
data: Vector,
|
|
111
|
+
solver: LinearSolver,
|
|
92
112
|
/,
|
|
93
113
|
*,
|
|
94
|
-
preconditioner: Optional[
|
|
95
|
-
) ->
|
|
114
|
+
preconditioner: Optional[LinearOperator] = None,
|
|
115
|
+
) -> GaussianMeasure:
|
|
96
116
|
"""
|
|
97
117
|
Returns the posterior Gaussian measure for the model, `p(u|d)`.
|
|
98
118
|
|
|
99
|
-
|
|
100
|
-
|
|
119
|
+
This measure represents our updated state of knowledge about the model
|
|
120
|
+
`u` after observing the data `d`. Its expectation is the most likely
|
|
121
|
+
model, and its covariance quantifies the remaining uncertainty.
|
|
101
122
|
|
|
102
123
|
Args:
|
|
103
124
|
data: The observed data vector.
|
|
@@ -148,19 +169,19 @@ class LinearBayesianInversion(Inversion):
|
|
|
148
169
|
|
|
149
170
|
class LinearBayesianInference(LinearBayesianInversion):
|
|
150
171
|
"""
|
|
172
|
+
Performs Bayesian inference on a derived property of the model.
|
|
151
173
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
operator acting on the model `u`.
|
|
174
|
+
While `LinearBayesianInversion` solves for the model `u` itself, this class
|
|
175
|
+
computes the posterior distribution for a property `p = B(u)`, where `B` is a
|
|
176
|
+
linear operator acting on the model `u`. This is useful for uncertainty
|
|
177
|
+
quantification of derived quantities (e.g., the average value of a field).
|
|
157
178
|
"""
|
|
158
179
|
|
|
159
180
|
def __init__(
|
|
160
181
|
self,
|
|
161
|
-
forward_problem:
|
|
162
|
-
model_prior_measure:
|
|
163
|
-
property_operator:
|
|
182
|
+
forward_problem: LinearForwardProblem,
|
|
183
|
+
model_prior_measure: GaussianMeasure,
|
|
184
|
+
property_operator: LinearOperator,
|
|
164
185
|
/,
|
|
165
186
|
) -> None:
|
|
166
187
|
"""
|
|
@@ -173,19 +194,19 @@ class LinearBayesianInference(LinearBayesianInversion):
|
|
|
173
194
|
super().__init__(forward_problem, model_prior_measure)
|
|
174
195
|
if property_operator.domain != self.forward_problem.model_space:
|
|
175
196
|
raise ValueError("Property operator domain must match the model space.")
|
|
176
|
-
self._property_operator:
|
|
197
|
+
self._property_operator: LinearOperator = property_operator
|
|
177
198
|
|
|
178
199
|
@property
|
|
179
|
-
def property_space(self) ->
|
|
200
|
+
def property_space(self) -> HilbertSpace:
|
|
180
201
|
"""The Hilbert space in which the property `p` resides."""
|
|
181
202
|
return self._property_operator.codomain
|
|
182
203
|
|
|
183
204
|
@property
|
|
184
|
-
def property_operator(self) ->
|
|
205
|
+
def property_operator(self) -> LinearOperator:
|
|
185
206
|
"""The linear operator `B` that defines the property."""
|
|
186
207
|
return self._property_operator
|
|
187
208
|
|
|
188
|
-
def property_prior_measure(self) ->
|
|
209
|
+
def property_prior_measure(self) -> GaussianMeasure:
|
|
189
210
|
"""
|
|
190
211
|
Returns the prior measure on the property space, `p(p)`.
|
|
191
212
|
|
|
@@ -196,12 +217,12 @@ class LinearBayesianInference(LinearBayesianInversion):
|
|
|
196
217
|
|
|
197
218
|
def property_posterior_measure(
|
|
198
219
|
self,
|
|
199
|
-
data:
|
|
200
|
-
solver:
|
|
220
|
+
data: Vector,
|
|
221
|
+
solver: LinearSolver,
|
|
201
222
|
/,
|
|
202
223
|
*,
|
|
203
|
-
preconditioner: Optional[
|
|
204
|
-
) ->
|
|
224
|
+
preconditioner: Optional[LinearOperator] = None,
|
|
225
|
+
) -> GaussianMeasure:
|
|
205
226
|
"""
|
|
206
227
|
Returns the posterior measure on the property space, `p(p|d)`.
|
|
207
228
|
|
pygeoinf/linear_forms.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Provides the `LinearForm` class to represent linear functionals.
|
|
3
|
+
|
|
4
|
+
A linear form is a linear mapping from a vector in a Hilbert space to a
|
|
5
|
+
scalar (a real number). This class provides a concrete representation for
|
|
6
|
+
elements of the dual space of a `HilbertSpace`.
|
|
7
|
+
|
|
8
|
+
A `LinearForm` can be thought of as a dual vector and is a fundamental component
|
|
9
|
+
for defining inner products and adjoint operators within the library.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
from typing import Callable, Optional, Any, TYPE_CHECKING
|
|
14
|
+
|
|
15
|
+
import numpy as np
|
|
16
|
+
|
|
17
|
+
# This block only runs for type checkers, not at runtime
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from .hilbert_space import HilbertSpace, EuclideanSpace
|
|
20
|
+
from .operators import LinearOperator
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class LinearForm:
|
|
24
|
+
"""
|
|
25
|
+
Represents a linear form, a functional that maps vectors to scalars.
|
|
26
|
+
|
|
27
|
+
A `LinearForm` is an element of a dual `HilbertSpace`. It is defined by its
|
|
28
|
+
action on vectors from its `domain` space. Internally, this action is
|
|
29
|
+
represented by a component vector, which when dotted with the component
|
|
30
|
+
vector of a primal space element, produces the scalar result.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
domain: HilbertSpace,
|
|
36
|
+
/,
|
|
37
|
+
*,
|
|
38
|
+
mapping: Optional[Callable[[Any], float]] = None,
|
|
39
|
+
components: Optional[np.ndarray] = None,
|
|
40
|
+
) -> None:
|
|
41
|
+
"""
|
|
42
|
+
Initializes the LinearForm.
|
|
43
|
+
|
|
44
|
+
A form can be defined either by its functional mapping or directly
|
|
45
|
+
by its component vector. If a mapping is provided without components,
|
|
46
|
+
the components will be computed by evaluating the mapping on the
|
|
47
|
+
basis vectors of the domain.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
domain: The Hilbert space on which the form is defined.
|
|
51
|
+
mapping: A function `f(x)` defining the action of the form.
|
|
52
|
+
components: The component representation of the form.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
self._domain: HilbertSpace = domain
|
|
56
|
+
|
|
57
|
+
if components is None:
|
|
58
|
+
if mapping is None:
|
|
59
|
+
raise AssertionError("Neither mapping nor components specified.")
|
|
60
|
+
self._compute_components(mapping)
|
|
61
|
+
else:
|
|
62
|
+
self._components: np.ndarray = components
|
|
63
|
+
|
|
64
|
+
@staticmethod
|
|
65
|
+
def from_linear_operator(operator: "LinearOperator") -> LinearForm:
|
|
66
|
+
"""
|
|
67
|
+
Creates a LinearForm from an operator that maps to a 1D Euclidean space.
|
|
68
|
+
"""
|
|
69
|
+
from .hilbert_space import EuclideanSpace
|
|
70
|
+
|
|
71
|
+
assert operator.codomain == EuclideanSpace(1)
|
|
72
|
+
return LinearForm(operator.domain, mapping=lambda x: operator(x)[0])
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def domain(self) -> HilbertSpace:
|
|
76
|
+
"""The Hilbert space on which the form is defined."""
|
|
77
|
+
return self._domain
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def components(self) -> np.ndarray:
|
|
81
|
+
"""
|
|
82
|
+
The component vector of the form.
|
|
83
|
+
"""
|
|
84
|
+
return self._components
|
|
85
|
+
|
|
86
|
+
@property
|
|
87
|
+
def as_linear_operator(self) -> "LinearOperator":
|
|
88
|
+
"""
|
|
89
|
+
Represents the linear form as a `LinearOperator`.
|
|
90
|
+
|
|
91
|
+
The resulting operator maps from the form's original domain to a
|
|
92
|
+
1-dimensional `EuclideanSpace`, where the single component of the output
|
|
93
|
+
is the scalar result of the form's action.
|
|
94
|
+
"""
|
|
95
|
+
from .hilbert_space import EuclideanSpace
|
|
96
|
+
from .operators import LinearOperator
|
|
97
|
+
|
|
98
|
+
return LinearOperator(
|
|
99
|
+
self.domain,
|
|
100
|
+
EuclideanSpace(1),
|
|
101
|
+
lambda x: np.array([self(x)]),
|
|
102
|
+
dual_mapping=lambda y: y * self,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
def copy(self) -> LinearForm:
|
|
106
|
+
"""
|
|
107
|
+
Creates a deep copy of the linear form.
|
|
108
|
+
"""
|
|
109
|
+
return LinearForm(self.domain, components=self.components.copy())
|
|
110
|
+
|
|
111
|
+
def __call__(self, x: Any) -> float:
|
|
112
|
+
"""Applies the linear form to a vector."""
|
|
113
|
+
return np.dot(self._components, self.domain.to_components(x))
|
|
114
|
+
|
|
115
|
+
def __neg__(self) -> LinearForm:
|
|
116
|
+
"""Returns the additive inverse of the form."""
|
|
117
|
+
return LinearForm(self.domain, components=-self._components)
|
|
118
|
+
|
|
119
|
+
def __mul__(self, a: float) -> LinearForm:
|
|
120
|
+
"""Returns the product of the form and a scalar."""
|
|
121
|
+
return LinearForm(self.domain, components=a * self._components)
|
|
122
|
+
|
|
123
|
+
def __rmul__(self, a: float) -> LinearForm:
|
|
124
|
+
"""Returns the product of the form and a scalar."""
|
|
125
|
+
return self * a
|
|
126
|
+
|
|
127
|
+
def __truediv__(self, a: float) -> LinearForm:
|
|
128
|
+
"""Returns the division of the form by a scalar."""
|
|
129
|
+
return self * (1.0 / a)
|
|
130
|
+
|
|
131
|
+
def __add__(self, other: LinearForm) -> LinearForm:
|
|
132
|
+
"""Returns the sum of this form and another."""
|
|
133
|
+
return LinearForm(self.domain, components=self.components + other.components)
|
|
134
|
+
|
|
135
|
+
def __sub__(self, other: LinearForm) -> LinearForm:
|
|
136
|
+
"""Returns the difference between this form and another."""
|
|
137
|
+
return LinearForm(self.domain, components=self.components - other.components)
|
|
138
|
+
|
|
139
|
+
def __imul__(self, a: float) -> "LinearForm":
|
|
140
|
+
"""
|
|
141
|
+
Performs in-place scalar multiplication: self *= a.
|
|
142
|
+
"""
|
|
143
|
+
self._components *= a
|
|
144
|
+
return self
|
|
145
|
+
|
|
146
|
+
def __iadd__(self, other: "LinearForm") -> "LinearForm":
|
|
147
|
+
"""
|
|
148
|
+
Performs in-place addition with another form: self += other.
|
|
149
|
+
"""
|
|
150
|
+
if self.domain != other.domain:
|
|
151
|
+
raise ValueError("Linear forms must share the same domain for addition.")
|
|
152
|
+
self._components += other.components
|
|
153
|
+
return self
|
|
154
|
+
|
|
155
|
+
def __str__(self) -> str:
|
|
156
|
+
"""Returns the string representation of the form's components."""
|
|
157
|
+
return self.components.__str__()
|
|
158
|
+
|
|
159
|
+
def _compute_components(self, mapping: Callable[[Any], float]):
|
|
160
|
+
"""
|
|
161
|
+
Computes the component vector of the form.
|
|
162
|
+
"""
|
|
163
|
+
self._components = np.zeros(self.domain.dim)
|
|
164
|
+
cx = np.zeros(self.domain.dim)
|
|
165
|
+
for i in range(self.domain.dim):
|
|
166
|
+
cx[i] = 1
|
|
167
|
+
x = self.domain.from_components(cx)
|
|
168
|
+
self._components[i] = mapping(x)
|
|
169
|
+
cx[i] = 0
|
pygeoinf/linear_optimisation.py
CHANGED
|
@@ -1,8 +1,21 @@
|
|
|
1
1
|
"""
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
This module provides
|
|
5
|
-
|
|
2
|
+
Implements optimisation-based methods for solving linear inverse problems.
|
|
3
|
+
|
|
4
|
+
This module provides classical, deterministic approaches to inversion that seek
|
|
5
|
+
a single "best-fit" model. These methods are typically formulated as finding
|
|
6
|
+
the model `u` that minimizes a cost functional.
|
|
7
|
+
|
|
8
|
+
The primary goal is to find a stable solution to an ill-posed problem by
|
|
9
|
+
incorporating regularization, which balances fitting the data with controlling
|
|
10
|
+
the complexity or norm of the solution.
|
|
11
|
+
|
|
12
|
+
Key Classes
|
|
13
|
+
-----------
|
|
14
|
+
- `LinearLeastSquaresInversion`: Solves the inverse problem by minimizing a
|
|
15
|
+
Tikhonov-regularized least-squares functional.
|
|
16
|
+
- `LinearMinimumNormInversion`: Finds the model with the smallest norm that
|
|
17
|
+
fits the data to a statistically acceptable degree using the discrepancy
|
|
18
|
+
principle.
|
|
6
19
|
"""
|
|
7
20
|
|
|
8
21
|
from __future__ import annotations
|
|
@@ -15,7 +28,7 @@ from .inversion import Inversion
|
|
|
15
28
|
from .forward_problem import LinearForwardProblem
|
|
16
29
|
from .operators import LinearOperator
|
|
17
30
|
from .linear_solvers import LinearSolver, IterativeLinearSolver
|
|
18
|
-
from .hilbert_space import
|
|
31
|
+
from .hilbert_space import Vector
|
|
19
32
|
|
|
20
33
|
|
|
21
34
|
class LinearLeastSquaresInversion(Inversion):
|
|
@@ -23,8 +36,9 @@ class LinearLeastSquaresInversion(Inversion):
|
|
|
23
36
|
Solves a linear inverse problem using Tikhonov-regularized least-squares.
|
|
24
37
|
|
|
25
38
|
This method finds the model `u` that minimizes the functional:
|
|
26
|
-
`J(u) = ||A(u) - d
|
|
27
|
-
where
|
|
39
|
+
`J(u) = ||A(u) - d||² + α² * ||u||²`
|
|
40
|
+
where `α` is the damping parameter. If a data error covariance is provided,
|
|
41
|
+
the data misfit norm is appropriately weighted by the inverse covariance.
|
|
28
42
|
"""
|
|
29
43
|
|
|
30
44
|
def __init__(self, forward_problem: "LinearForwardProblem", /) -> None:
|
|
@@ -41,19 +55,15 @@ class LinearLeastSquaresInversion(Inversion):
|
|
|
41
55
|
"""
|
|
42
56
|
Returns the Tikhonov-regularized normal operator.
|
|
43
57
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
`N = A^T * A + alpha * I`
|
|
58
|
+
This operator, often written as `(A* @ W @ A + α*I)`, forms the left-hand
|
|
59
|
+
side of the normal equations that must be solved to find the least-squares
|
|
60
|
+
solution. `W` is the inverse data covariance (or identity).
|
|
48
61
|
|
|
49
62
|
Args:
|
|
50
|
-
damping: The Tikhonov damping parameter,
|
|
63
|
+
damping: The Tikhonov damping parameter, `α`. Must be non-negative.
|
|
51
64
|
|
|
52
65
|
Returns:
|
|
53
66
|
The normal operator as a `LinearOperator`.
|
|
54
|
-
|
|
55
|
-
Raises:
|
|
56
|
-
ValueError: If the damping parameter is negative.
|
|
57
67
|
"""
|
|
58
68
|
if damping < 0:
|
|
59
69
|
raise ValueError("Damping parameter must be non-negative.")
|
|
@@ -111,7 +121,7 @@ class LinearLeastSquaresInversion(Inversion):
|
|
|
111
121
|
)
|
|
112
122
|
|
|
113
123
|
# This mapping is affine, not linear, if the error measure has a non-zero mean.
|
|
114
|
-
def mapping(data: "
|
|
124
|
+
def mapping(data: "Vector") -> "Vector":
|
|
115
125
|
shifted_data = self.forward_problem.data_space.subtract(
|
|
116
126
|
data, self.forward_problem.data_error_measure.expectation
|
|
117
127
|
)
|
|
@@ -129,11 +139,12 @@ class LinearLeastSquaresInversion(Inversion):
|
|
|
129
139
|
|
|
130
140
|
class LinearMinimumNormInversion(Inversion):
|
|
131
141
|
"""
|
|
132
|
-
Finds
|
|
142
|
+
Finds a regularized solution using the discrepancy principle.
|
|
133
143
|
|
|
134
|
-
This method
|
|
135
|
-
the
|
|
136
|
-
|
|
144
|
+
This method automatically selects a Tikhonov damping parameter `α` such that
|
|
145
|
+
the resulting solution `u_α` fits the data to a statistically acceptable
|
|
146
|
+
level. It finds the model with the smallest norm `||u||` that satisfies
|
|
147
|
+
the target misfit, as determined by a chi-squared test.
|
|
137
148
|
"""
|
|
138
149
|
|
|
139
150
|
def __init__(self, forward_problem: "LinearForwardProblem", /) -> None:
|
|
@@ -185,8 +196,8 @@ class LinearMinimumNormInversion(Inversion):
|
|
|
185
196
|
lsq_inversion = LinearLeastSquaresInversion(self.forward_problem)
|
|
186
197
|
|
|
187
198
|
def get_model_for_damping(
|
|
188
|
-
damping: float, data: "
|
|
189
|
-
) -> tuple["
|
|
199
|
+
damping: float, data: "Vector", model0: Optional["Vector"] = None
|
|
200
|
+
) -> tuple["Vector", float]:
|
|
190
201
|
"""Computes the LS model and its chi-squared for a given damping."""
|
|
191
202
|
op = lsq_inversion.least_squares_operator(
|
|
192
203
|
damping, solver, preconditioner=preconditioner
|
|
@@ -195,7 +206,7 @@ class LinearMinimumNormInversion(Inversion):
|
|
|
195
206
|
chi_squared = self.forward_problem.chi_squared(model, data)
|
|
196
207
|
return model, chi_squared
|
|
197
208
|
|
|
198
|
-
def mapping(data: "
|
|
209
|
+
def mapping(data: "Vector") -> "Vector":
|
|
199
210
|
"""The non-linear mapping from data to the minimum-norm model."""
|
|
200
211
|
model = self.model_space.zero
|
|
201
212
|
chi_squared = self.forward_problem.chi_squared(model, data)
|