pygeoinf 1.3.4__tar.gz → 1.3.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/PKG-INFO +1 -1
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/__init__.py +15 -2
- pygeoinf-1.3.5/pygeoinf/linear_bayesian.py +303 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/linear_optimisation.py +218 -0
- pygeoinf-1.3.5/pygeoinf/subspaces.py +311 -0
- pygeoinf-1.3.5/pygeoinf/symmetric_space/__init__.py +0 -0
- pygeoinf-1.3.5/pygeoinf/symmetric_space/sh_tools.py +95 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/symmetric_space/sphere.py +151 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pyproject.toml +2 -1
- pygeoinf-1.3.4/pygeoinf/linear_bayesian.py +0 -245
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/LICENSE +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/README.md +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/auxiliary.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/backus_gilbert.py +0 -0
- {pygeoinf-1.3.4/pygeoinf/symmetric_space → pygeoinf-1.3.5/pygeoinf/checks}/__init__.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/checks/hilbert_space.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/checks/linear_operators.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/checks/nonlinear_operators.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/direct_sum.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/forward_problem.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/gaussian_measure.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/hilbert_space.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/inversion.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/linear_forms.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/linear_operators.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/linear_solvers.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/nonlinear_forms.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/nonlinear_operators.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/nonlinear_optimisation.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/parallel.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/plot.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/random_matrix.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/symmetric_space/circle.py +0 -0
- {pygeoinf-1.3.4 → pygeoinf-1.3.5}/pygeoinf/symmetric_space/symmetric_space.py +0 -0
|
@@ -76,9 +76,14 @@ from .forward_problem import ForwardProblem, LinearForwardProblem
|
|
|
76
76
|
from .linear_optimisation import (
|
|
77
77
|
LinearLeastSquaresInversion,
|
|
78
78
|
LinearMinimumNormInversion,
|
|
79
|
+
ConstrainedLinearLeastSquaresInversion,
|
|
80
|
+
ConstrainedLinearMinimumNormInversion,
|
|
79
81
|
)
|
|
80
82
|
|
|
81
|
-
from .linear_bayesian import
|
|
83
|
+
from .linear_bayesian import (
|
|
84
|
+
LinearBayesianInversion,
|
|
85
|
+
ConstrainedLinearBayesianInversion,
|
|
86
|
+
)
|
|
82
87
|
|
|
83
88
|
from .backus_gilbert import HyperEllipsoid
|
|
84
89
|
|
|
@@ -87,6 +92,8 @@ from .nonlinear_optimisation import (
|
|
|
87
92
|
)
|
|
88
93
|
|
|
89
94
|
|
|
95
|
+
from .subspaces import OrthogonalProjector, AffineSubspace, LinearSubspace
|
|
96
|
+
|
|
90
97
|
__all__ = [
|
|
91
98
|
# random_matrix
|
|
92
99
|
"fixed_rank_random_range",
|
|
@@ -144,11 +151,17 @@ __all__ = [
|
|
|
144
151
|
# linear_optimisation
|
|
145
152
|
"LinearLeastSquaresInversion",
|
|
146
153
|
"LinearMinimumNormInversion",
|
|
154
|
+
"ConstrainedLinearLeastSquaresInversion",
|
|
155
|
+
"ConstrainedLinearMinimumNormInversion",
|
|
147
156
|
# linear_bayesian
|
|
148
157
|
"LinearBayesianInversion",
|
|
149
|
-
"
|
|
158
|
+
"ConstrainedLinearBayesianInversion",
|
|
150
159
|
# backus_gilbert
|
|
151
160
|
"HyperEllipsoid",
|
|
152
161
|
# nonlinear_optimisation
|
|
153
162
|
"ScipyUnconstrainedOptimiser",
|
|
163
|
+
# Subspaces
|
|
164
|
+
"OrthogonalProjector",
|
|
165
|
+
"AffineSubspace",
|
|
166
|
+
"LinearSubspace",
|
|
154
167
|
]
|
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Implements the Bayesian framework for solving linear inverse problems.
|
|
3
|
+
|
|
4
|
+
This module treats the inverse problem from a statistical perspective, aiming to
|
|
5
|
+
determine the full posterior probability distribution of the unknown model
|
|
6
|
+
parameters, rather than a single best-fit solution.
|
|
7
|
+
|
|
8
|
+
Key Classes
|
|
9
|
+
-----------
|
|
10
|
+
- `LinearBayesianInversion`: Computes the posterior Gaussian measure `p(u|d)`
|
|
11
|
+
for the model `u` given observed data `d`.
|
|
12
|
+
- `LinearBayesianInference`: Extends the framework to compute the posterior
|
|
13
|
+
distribution for a derived property of the model.
|
|
14
|
+
- `ConstrainedLinearBayesianInversion`: Solves the inverse problem subject to
|
|
15
|
+
a hard affine constraint `u in A`, interpreting it as conditioning the prior.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from __future__ import annotations
|
|
19
|
+
from typing import Optional
|
|
20
|
+
|
|
21
|
+
from .inversion import LinearInversion
|
|
22
|
+
from .gaussian_measure import GaussianMeasure
|
|
23
|
+
from .forward_problem import LinearForwardProblem
|
|
24
|
+
from .linear_operators import LinearOperator, NormalSumOperator
|
|
25
|
+
from .linear_solvers import LinearSolver, IterativeLinearSolver
|
|
26
|
+
from .hilbert_space import Vector
|
|
27
|
+
from .subspaces import AffineSubspace
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class LinearBayesianInversion(LinearInversion):
|
|
31
|
+
"""
|
|
32
|
+
Solves a linear inverse problem using Bayesian methods.
|
|
33
|
+
|
|
34
|
+
This class applies to problems of the form `d = A(u) + e`. It computes the
|
|
35
|
+
full posterior probability distribution `p(u|d)`.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
forward_problem: LinearForwardProblem,
|
|
41
|
+
model_prior_measure: GaussianMeasure,
|
|
42
|
+
/,
|
|
43
|
+
) -> None:
|
|
44
|
+
super().__init__(forward_problem)
|
|
45
|
+
self._model_prior_measure: GaussianMeasure = model_prior_measure
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def model_prior_measure(self) -> GaussianMeasure:
|
|
49
|
+
"""The prior Gaussian measure on the model space."""
|
|
50
|
+
return self._model_prior_measure
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def normal_operator(self) -> LinearOperator:
|
|
54
|
+
"""
|
|
55
|
+
Returns the Bayesian Norm operator:
|
|
56
|
+
|
|
57
|
+
N = A Q A* + R
|
|
58
|
+
|
|
59
|
+
with A the forward operator (with A* its adjoint), Q the model
|
|
60
|
+
prior covariance, and R the data error covariance. For error-free
|
|
61
|
+
problems this operator is reduced to:
|
|
62
|
+
|
|
63
|
+
N = A Q A*
|
|
64
|
+
|
|
65
|
+
"""
|
|
66
|
+
forward_operator = self.forward_problem.forward_operator
|
|
67
|
+
model_prior_covariance = self.model_prior_measure.covariance
|
|
68
|
+
|
|
69
|
+
if self.forward_problem.data_error_measure_set:
|
|
70
|
+
return (
|
|
71
|
+
forward_operator @ model_prior_covariance @ forward_operator.adjoint
|
|
72
|
+
+ self.forward_problem.data_error_measure.covariance
|
|
73
|
+
)
|
|
74
|
+
else:
|
|
75
|
+
return NormalSumOperator(forward_operator, model_prior_covariance)
|
|
76
|
+
|
|
77
|
+
def kalman_operator(
|
|
78
|
+
self,
|
|
79
|
+
solver: LinearSolver,
|
|
80
|
+
/,
|
|
81
|
+
*,
|
|
82
|
+
preconditioner: Optional[LinearOperator] = None,
|
|
83
|
+
):
|
|
84
|
+
"""
|
|
85
|
+
Returns the Kalman gain operator for the problem:
|
|
86
|
+
|
|
87
|
+
K = Q A* Ni
|
|
88
|
+
|
|
89
|
+
where Q is the model prior covariance, A the forward operator
|
|
90
|
+
(with adjoint A*), and Ni is the inverse of the normal operator.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
solver: A linear solver for inverting the normal operator.
|
|
94
|
+
preconditioner: An optional preconditioner for.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
A LinearOperator for the Kalman gain.
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
forward_operator = self.forward_problem.forward_operator
|
|
101
|
+
model_prior_covariance = self.model_prior_measure.covariance
|
|
102
|
+
normal_operator = self.normal_operator
|
|
103
|
+
|
|
104
|
+
if isinstance(solver, IterativeLinearSolver):
|
|
105
|
+
inverse_normal_operator = solver(
|
|
106
|
+
normal_operator, preconditioner=preconditioner
|
|
107
|
+
)
|
|
108
|
+
else:
|
|
109
|
+
inverse_normal_operator = solver(normal_operator)
|
|
110
|
+
|
|
111
|
+
return (
|
|
112
|
+
model_prior_covariance @ forward_operator.adjoint @ inverse_normal_operator
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
def model_posterior_measure(
|
|
116
|
+
self,
|
|
117
|
+
data: Vector,
|
|
118
|
+
solver: LinearSolver,
|
|
119
|
+
/,
|
|
120
|
+
*,
|
|
121
|
+
preconditioner: Optional[LinearOperator] = None,
|
|
122
|
+
) -> GaussianMeasure:
|
|
123
|
+
"""
|
|
124
|
+
Returns the posterior Gaussian measure for the model conditions on the data.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
data: The observed data vector.
|
|
128
|
+
solver: A linear solver for inverting the normal operator C_d.
|
|
129
|
+
preconditioner: An optional preconditioner for C_d.
|
|
130
|
+
"""
|
|
131
|
+
data_space = self.data_space
|
|
132
|
+
model_space = self.model_space
|
|
133
|
+
forward_operator = self.forward_problem.forward_operator
|
|
134
|
+
model_prior_covariance = self.model_prior_measure.covariance
|
|
135
|
+
|
|
136
|
+
kalman_gain = self.kalman_operator(solver, preconditioner=preconditioner)
|
|
137
|
+
|
|
138
|
+
# u_bar_post = u_bar + K (v - A u_bar - v_bar)
|
|
139
|
+
shifted_data = data_space.subtract(
|
|
140
|
+
data, forward_operator(self.model_prior_measure.expectation)
|
|
141
|
+
)
|
|
142
|
+
if self.forward_problem.data_error_measure_set:
|
|
143
|
+
shifted_data = data_space.subtract(
|
|
144
|
+
shifted_data, self.forward_problem.data_error_measure.expectation
|
|
145
|
+
)
|
|
146
|
+
mean_update = kalman_gain(shifted_data)
|
|
147
|
+
expectation = model_space.add(self.model_prior_measure.expectation, mean_update)
|
|
148
|
+
|
|
149
|
+
# Q_post = Q - K A Q
|
|
150
|
+
covariance = model_prior_covariance - (
|
|
151
|
+
kalman_gain @ forward_operator @ model_prior_covariance
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Add in a sampling method if that is possible.
|
|
155
|
+
can_sample_prior = self.model_prior_measure.sample_set
|
|
156
|
+
can_sample_noise = (
|
|
157
|
+
not self.forward_problem.data_error_measure_set
|
|
158
|
+
or self.forward_problem.data_error_measure.sample_set
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
if can_sample_prior and can_sample_noise:
|
|
162
|
+
|
|
163
|
+
if self.forward_problem.data_error_measure_set:
|
|
164
|
+
error_expectation = self.forward_problem.data_error_measure.expectation
|
|
165
|
+
|
|
166
|
+
def sample():
|
|
167
|
+
model_sample = self.model_prior_measure.sample()
|
|
168
|
+
prediction = forward_operator(model_sample)
|
|
169
|
+
data_residual = data_space.subtract(data, prediction)
|
|
170
|
+
|
|
171
|
+
if self.forward_problem.data_error_measure_set:
|
|
172
|
+
noise_raw = self.forward_problem.data_error_measure.sample()
|
|
173
|
+
epsilon = data_space.subtract(noise_raw, error_expectation)
|
|
174
|
+
data_space.axpy(1.0, epsilon, data_residual)
|
|
175
|
+
|
|
176
|
+
correction = kalman_gain(data_residual)
|
|
177
|
+
return model_space.add(model_sample, correction)
|
|
178
|
+
|
|
179
|
+
return GaussianMeasure(
|
|
180
|
+
covariance=covariance, expectation=expectation, sample=sample
|
|
181
|
+
)
|
|
182
|
+
else:
|
|
183
|
+
return GaussianMeasure(covariance=covariance, expectation=expectation)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class ConstrainedLinearBayesianInversion(LinearInversion):
|
|
187
|
+
"""
|
|
188
|
+
Solves a linear inverse problem using Bayesian methods subject to an
|
|
189
|
+
affine subspace constraint `u in A`.
|
|
190
|
+
|
|
191
|
+
This interprets the constraint as conditioning the prior on the subspace.
|
|
192
|
+
The subspace must be defined by a linear equation B(u) = w.
|
|
193
|
+
"""
|
|
194
|
+
|
|
195
|
+
def __init__(
|
|
196
|
+
self,
|
|
197
|
+
forward_problem: LinearForwardProblem,
|
|
198
|
+
model_prior_measure: GaussianMeasure,
|
|
199
|
+
constraint: AffineSubspace,
|
|
200
|
+
/,
|
|
201
|
+
*,
|
|
202
|
+
geometric: bool = False,
|
|
203
|
+
) -> None:
|
|
204
|
+
"""
|
|
205
|
+
Args:
|
|
206
|
+
forward_problem: The forward problem.
|
|
207
|
+
model_prior_measure: The unconstrained prior Gaussian measure.
|
|
208
|
+
constraint: The affine subspace A = {u | Bu = w}.
|
|
209
|
+
geometric: If True, uses orthogonal projection to enforce the constraint.
|
|
210
|
+
If False (default), uses Bayesian conditioning.
|
|
211
|
+
"""
|
|
212
|
+
super().__init__(forward_problem)
|
|
213
|
+
self._unconstrained_prior = model_prior_measure
|
|
214
|
+
self._constraint = constraint
|
|
215
|
+
self._geometric = geometric
|
|
216
|
+
|
|
217
|
+
if not constraint.has_constraint_equation:
|
|
218
|
+
raise ValueError(
|
|
219
|
+
"For Bayesian inversion, the subspace must be defined by a linear "
|
|
220
|
+
"equation (constraint operator). Use AffineSubspace.from_linear_equation."
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
def conditioned_prior_measure(
|
|
224
|
+
self,
|
|
225
|
+
solver: LinearSolver,
|
|
226
|
+
preconditioner: Optional[LinearOperator] = None,
|
|
227
|
+
) -> GaussianMeasure:
|
|
228
|
+
"""
|
|
229
|
+
Computes the prior measure conditioned on the constraint B(u) = w.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
solver: Linear solver used to invert the normal operator, BQB*.
|
|
233
|
+
preconditioner: Optional preconditioner for the constraint solver.
|
|
234
|
+
"""
|
|
235
|
+
|
|
236
|
+
constraint_op = self._constraint.constraint_operator
|
|
237
|
+
constraint_val = self._constraint.constraint_value
|
|
238
|
+
|
|
239
|
+
if self._geometric:
|
|
240
|
+
# --- Geometric Approach (Affine Mapping) ---
|
|
241
|
+
# Map: u -> P u + v
|
|
242
|
+
# P = I - B* (B B*)^-1 B
|
|
243
|
+
# v = B* (B B*)^-1 w
|
|
244
|
+
|
|
245
|
+
gram_operator = constraint_op @ constraint_op.adjoint
|
|
246
|
+
|
|
247
|
+
if isinstance(solver, IterativeLinearSolver):
|
|
248
|
+
inv_gram_operator = solver(gram_operator, preconditioner=preconditioner)
|
|
249
|
+
else:
|
|
250
|
+
inv_gram_operator = solver(gram_operator)
|
|
251
|
+
|
|
252
|
+
pseudo_inverse = constraint_op.adjoint @ inv_gram_operator
|
|
253
|
+
identity = self._unconstrained_prior.domain.identity_operator()
|
|
254
|
+
projector = identity - pseudo_inverse @ constraint_op
|
|
255
|
+
translation = pseudo_inverse(constraint_val)
|
|
256
|
+
|
|
257
|
+
return self._unconstrained_prior.affine_mapping(
|
|
258
|
+
operator=projector, translation=translation
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
else:
|
|
262
|
+
# --- Bayesian Approach (Statistical Conditioning) ---
|
|
263
|
+
# Treat the constraint as a noiseless observation: w = B(u)
|
|
264
|
+
|
|
265
|
+
constraint_problem = LinearForwardProblem(constraint_op)
|
|
266
|
+
constraint_inversion = LinearBayesianInversion(
|
|
267
|
+
constraint_problem, self._unconstrained_prior
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
return constraint_inversion.model_posterior_measure(
|
|
271
|
+
constraint_val, solver, preconditioner=preconditioner
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
def model_posterior_measure(
|
|
275
|
+
self,
|
|
276
|
+
data: Vector,
|
|
277
|
+
solver: LinearSolver,
|
|
278
|
+
constraint_solver: LinearSolver,
|
|
279
|
+
*,
|
|
280
|
+
preconditioner: Optional[LinearOperator] = None,
|
|
281
|
+
constraint_preconditioner: Optional[LinearOperator] = None,
|
|
282
|
+
) -> GaussianMeasure:
|
|
283
|
+
"""
|
|
284
|
+
Returns the posterior Gaussian measure for the model given the constraint and the data.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
data: Observed data vector.
|
|
288
|
+
solver: Solver for the data update (inverts A C_cond A* + Ce).
|
|
289
|
+
constraint_solver: Solver for the prior conditioning (inverts B C_prior B*).
|
|
290
|
+
preconditioner: Preconditioner for the data update (acts on Data Space).
|
|
291
|
+
constraint_preconditioner: Preconditioner for the constraint update (acts on Property Space).
|
|
292
|
+
"""
|
|
293
|
+
# 1. Condition Prior (Uses constraint_solver and constraint_preconditioner)
|
|
294
|
+
cond_prior = self.conditioned_prior_measure(
|
|
295
|
+
constraint_solver, preconditioner=constraint_preconditioner
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
# 2. Solve Bayesian Inverse Problem (Uses solver and preconditioner)
|
|
299
|
+
bayes_inv = LinearBayesianInversion(self.forward_problem, cond_prior)
|
|
300
|
+
|
|
301
|
+
return bayes_inv.model_posterior_measure(
|
|
302
|
+
data, solver, preconditioner=preconditioner
|
|
303
|
+
)
|
|
@@ -16,6 +16,8 @@ Key Classes
|
|
|
16
16
|
- `LinearMinimumNormInversion`: Finds the model with the smallest norm that
|
|
17
17
|
fits the data to a statistically acceptable degree using the discrepancy
|
|
18
18
|
principle.
|
|
19
|
+
- `ConstrainedLinearLeastSquaresInversion`: Solves a linear inverse problem
|
|
20
|
+
subject to an affine subspace constraint.
|
|
19
21
|
"""
|
|
20
22
|
|
|
21
23
|
from __future__ import annotations
|
|
@@ -30,6 +32,7 @@ from .forward_problem import LinearForwardProblem
|
|
|
30
32
|
from .linear_operators import LinearOperator
|
|
31
33
|
from .linear_solvers import LinearSolver, IterativeLinearSolver
|
|
32
34
|
from .hilbert_space import Vector
|
|
35
|
+
from .subspaces import AffineSubspace
|
|
33
36
|
|
|
34
37
|
|
|
35
38
|
class LinearLeastSquaresInversion(LinearInversion):
|
|
@@ -160,6 +163,113 @@ class LinearLeastSquaresInversion(LinearInversion):
|
|
|
160
163
|
return inverse_normal_operator @ forward_operator.adjoint
|
|
161
164
|
|
|
162
165
|
|
|
166
|
+
class ConstrainedLinearLeastSquaresInversion(LinearInversion):
|
|
167
|
+
"""
|
|
168
|
+
Solves a linear inverse problem subject to an affine subspace constraint.
|
|
169
|
+
|
|
170
|
+
Problem:
|
|
171
|
+
Minimize J(u) = || A(u) - d ||_D^2 + alpha * || u ||_M^2
|
|
172
|
+
Subject to u in A (Affine Subspace)
|
|
173
|
+
|
|
174
|
+
Method:
|
|
175
|
+
The problem is reduced to an unconstrained minimization in the subspace.
|
|
176
|
+
We decompose the model as u = u_base + w, where u_base is the element
|
|
177
|
+
of the affine subspace closest to the origin (orthogonal to the tangent space),
|
|
178
|
+
and w is a perturbation in the tangent space.
|
|
179
|
+
|
|
180
|
+
The cost function separates (due to orthogonality) into:
|
|
181
|
+
J(w) = || A(w) - (d - A(u_base)) ||^2 + alpha * || w ||^2 + (alpha * ||u_base||^2)
|
|
182
|
+
|
|
183
|
+
This is solved using the standard LinearLeastSquaresInversion on a
|
|
184
|
+
reduced forward problem.
|
|
185
|
+
"""
|
|
186
|
+
|
|
187
|
+
def __init__(
|
|
188
|
+
self, forward_problem: LinearForwardProblem, constraint: AffineSubspace
|
|
189
|
+
) -> None:
|
|
190
|
+
"""
|
|
191
|
+
Args:
|
|
192
|
+
forward_problem: The original unconstrained forward problem.
|
|
193
|
+
constraint: The affine subspace A where the solution must lie.
|
|
194
|
+
"""
|
|
195
|
+
super().__init__(forward_problem)
|
|
196
|
+
self._constraint = constraint
|
|
197
|
+
|
|
198
|
+
# 1. Compute the Orthogonal Base Vector (u_base)
|
|
199
|
+
# u_base = (I - P) * translation
|
|
200
|
+
# This is the unique vector in the affine space that is orthogonal to the tangent space.
|
|
201
|
+
# It ensures ||u||^2 = ||u_base||^2 + ||w||^2, decoupling the regularization.
|
|
202
|
+
self._u_base = constraint.domain.subtract(
|
|
203
|
+
constraint.translation, constraint.projector(constraint.translation)
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# 2. Construct Reduced Forward Problem
|
|
207
|
+
# Operator: A_tilde = A @ P
|
|
208
|
+
reduced_operator = forward_problem.forward_operator @ constraint.projector
|
|
209
|
+
|
|
210
|
+
# The error measure on the data remains valid for the reduced problem
|
|
211
|
+
# because the noise model is additive and independent of the model parameters.
|
|
212
|
+
self._reduced_forward_problem = LinearForwardProblem(
|
|
213
|
+
reduced_operator,
|
|
214
|
+
data_error_measure=(
|
|
215
|
+
forward_problem.data_error_measure
|
|
216
|
+
if forward_problem.data_error_measure_set
|
|
217
|
+
else None
|
|
218
|
+
),
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# 3. Initialize the internal unconstrained solver
|
|
222
|
+
self._unconstrained_inversion = LinearLeastSquaresInversion(
|
|
223
|
+
self._reduced_forward_problem
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
def least_squares_operator(
|
|
227
|
+
self,
|
|
228
|
+
damping: float,
|
|
229
|
+
solver: LinearSolver,
|
|
230
|
+
/,
|
|
231
|
+
**kwargs,
|
|
232
|
+
) -> NonLinearOperator:
|
|
233
|
+
"""
|
|
234
|
+
Returns an operator that maps data to the constrained least-squares solution.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
damping: The Tikhonov damping parameter.
|
|
238
|
+
solver: The linear solver for the reduced normal equations.
|
|
239
|
+
**kwargs: Additional arguments passed to the solver (e.g., preconditioner).
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
A NonLinearOperator mapping d -> u_constrained.
|
|
243
|
+
"""
|
|
244
|
+
|
|
245
|
+
# Get the operator L_tilde such that w = L_tilde(d_tilde)
|
|
246
|
+
reduced_op = self._unconstrained_inversion.least_squares_operator(
|
|
247
|
+
damping, solver, **kwargs
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
# Precompute A(u_base) to shift the data efficiently
|
|
251
|
+
# This represents the data predicted by the "base" model.
|
|
252
|
+
data_offset = self.forward_problem.forward_operator(self._u_base)
|
|
253
|
+
|
|
254
|
+
domain = self.data_space
|
|
255
|
+
codomain = self.model_space
|
|
256
|
+
|
|
257
|
+
def mapping(d: Vector) -> Vector:
|
|
258
|
+
# 1. Shift Data: d_tilde = d - A(u_base)
|
|
259
|
+
d_tilde = domain.subtract(d, data_offset)
|
|
260
|
+
|
|
261
|
+
# 2. Solve for perturbation w in the tangent space
|
|
262
|
+
# w = (P A* A P + alpha I)^-1 P A* d_tilde
|
|
263
|
+
w = reduced_op(d_tilde)
|
|
264
|
+
|
|
265
|
+
# 3. Reconstruct full model: u = u_base + w
|
|
266
|
+
# Note: w is guaranteed to be in the tangent space (Range of P)
|
|
267
|
+
# because of the structure of the reduced normal equations.
|
|
268
|
+
return codomain.add(self._u_base, w)
|
|
269
|
+
|
|
270
|
+
return NonLinearOperator(domain, codomain, mapping)
|
|
271
|
+
|
|
272
|
+
|
|
163
273
|
class LinearMinimumNormInversion(LinearInversion):
|
|
164
274
|
"""
|
|
165
275
|
Finds a regularized solution using the discrepancy principle.
|
|
@@ -309,3 +419,111 @@ class LinearMinimumNormInversion(LinearInversion):
|
|
|
309
419
|
normal_operator = forward_operator @ forward_operator.adjoint
|
|
310
420
|
inverse_normal_operator = solver(normal_operator)
|
|
311
421
|
return forward_operator.adjoint @ inverse_normal_operator
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
class ConstrainedLinearMinimumNormInversion(LinearInversion):
|
|
425
|
+
"""
|
|
426
|
+
Finds the minimum-norm solution subject to an affine subspace constraint
|
|
427
|
+
using the discrepancy principle.
|
|
428
|
+
|
|
429
|
+
Problem:
|
|
430
|
+
Minimize ||u||
|
|
431
|
+
Subject to u in A (Affine Subspace)
|
|
432
|
+
And chi_squared(u, d) <= critical_value
|
|
433
|
+
|
|
434
|
+
Method:
|
|
435
|
+
We decompose the model as u = u_base + w, where u_base is the element
|
|
436
|
+
of the affine subspace with the smallest norm (orthogonal to the tangent
|
|
437
|
+
space), and w is a perturbation in the tangent space.
|
|
438
|
+
|
|
439
|
+
Because u_base and w are orthogonal, ||u||^2 = ||u_base||^2 + ||w||^2.
|
|
440
|
+
Minimizing ||u|| is therefore equivalent to minimizing ||w||.
|
|
441
|
+
|
|
442
|
+
The problem reduces to finding the minimum norm w such that:
|
|
443
|
+
|| A(w) - (d - A(u_base)) ||_D^2 <= critical_value
|
|
444
|
+
|
|
445
|
+
This is solved using the standard LinearMinimumNormInversion on a
|
|
446
|
+
reduced forward problem.
|
|
447
|
+
"""
|
|
448
|
+
|
|
449
|
+
def __init__(
|
|
450
|
+
self,
|
|
451
|
+
forward_problem: LinearForwardProblem,
|
|
452
|
+
constraint: AffineSubspace,
|
|
453
|
+
) -> None:
|
|
454
|
+
"""
|
|
455
|
+
Args:
|
|
456
|
+
forward_problem: The original unconstrained forward problem.
|
|
457
|
+
constraint: The affine subspace A where the solution must lie.
|
|
458
|
+
"""
|
|
459
|
+
super().__init__(forward_problem)
|
|
460
|
+
if self.forward_problem.data_error_measure_set:
|
|
461
|
+
self.assert_inverse_data_covariance()
|
|
462
|
+
|
|
463
|
+
self._constraint = constraint
|
|
464
|
+
|
|
465
|
+
# 1. Compute the Orthogonal Base Vector (u_base)
|
|
466
|
+
# u_base = (I - P) * translation
|
|
467
|
+
# This is the vector in the affine space closest to the origin.
|
|
468
|
+
self._u_base = constraint.domain.subtract(
|
|
469
|
+
constraint.translation, constraint.projector(constraint.translation)
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
# 2. Construct Reduced Forward Problem
|
|
473
|
+
# Operator: A_tilde = A @ P
|
|
474
|
+
reduced_operator = forward_problem.forward_operator @ constraint.projector
|
|
475
|
+
|
|
476
|
+
self._reduced_forward_problem = LinearForwardProblem(
|
|
477
|
+
reduced_operator,
|
|
478
|
+
data_error_measure=(
|
|
479
|
+
forward_problem.data_error_measure
|
|
480
|
+
if forward_problem.data_error_measure_set
|
|
481
|
+
else None
|
|
482
|
+
),
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
# 3. Initialize the internal unconstrained solver
|
|
486
|
+
self._unconstrained_inversion = LinearMinimumNormInversion(
|
|
487
|
+
self._reduced_forward_problem
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
def minimum_norm_operator(
|
|
491
|
+
self,
|
|
492
|
+
solver: LinearSolver,
|
|
493
|
+
/,
|
|
494
|
+
**kwargs,
|
|
495
|
+
) -> NonLinearOperator:
|
|
496
|
+
"""
|
|
497
|
+
Returns an operator that maps data to the constrained minimum-norm solution.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
solver: The linear solver for the reduced normal equations.
|
|
501
|
+
**kwargs: Arguments passed to LinearMinimumNormInversion (e.g.,
|
|
502
|
+
significance_level, rtol, maxiter).
|
|
503
|
+
|
|
504
|
+
Returns:
|
|
505
|
+
A NonLinearOperator mapping d -> u_constrained.
|
|
506
|
+
"""
|
|
507
|
+
|
|
508
|
+
# Get the operator L_tilde such that w = L_tilde(d_tilde)
|
|
509
|
+
reduced_op = self._unconstrained_inversion.minimum_norm_operator(
|
|
510
|
+
solver, **kwargs
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
# Precompute A(u_base) to shift the data
|
|
514
|
+
data_offset = self.forward_problem.forward_operator(self._u_base)
|
|
515
|
+
|
|
516
|
+
domain = self.data_space
|
|
517
|
+
codomain = self.model_space
|
|
518
|
+
|
|
519
|
+
def mapping(d: Vector) -> Vector:
|
|
520
|
+
# 1. Shift Data: d_tilde = d - A(u_base)
|
|
521
|
+
d_tilde = domain.subtract(d, data_offset)
|
|
522
|
+
|
|
523
|
+
# 2. Solve for perturbation w in the tangent space
|
|
524
|
+
w = reduced_op(d_tilde)
|
|
525
|
+
|
|
526
|
+
# 3. Reconstruct full model: u = u_base + w
|
|
527
|
+
return codomain.add(self._u_base, w)
|
|
528
|
+
|
|
529
|
+
return NonLinearOperator(domain, codomain, mapping)
|