pygeoinf 1.3.4__py3-none-any.whl → 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygeoinf/__init__.py +15 -2
- pygeoinf/checks/__init__.py +0 -0
- pygeoinf/linear_bayesian.py +121 -121
- pygeoinf/linear_optimisation.py +218 -0
- pygeoinf/subspaces.py +403 -0
- pygeoinf/symmetric_space/sh_tools.py +107 -0
- pygeoinf/symmetric_space/sphere.py +139 -0
- {pygeoinf-1.3.4.dist-info → pygeoinf-1.3.6.dist-info}/METADATA +1 -1
- {pygeoinf-1.3.4.dist-info → pygeoinf-1.3.6.dist-info}/RECORD +11 -8
- {pygeoinf-1.3.4.dist-info → pygeoinf-1.3.6.dist-info}/WHEEL +0 -0
- {pygeoinf-1.3.4.dist-info → pygeoinf-1.3.6.dist-info}/licenses/LICENSE +0 -0
pygeoinf/__init__.py
CHANGED
|
@@ -76,9 +76,14 @@ from .forward_problem import ForwardProblem, LinearForwardProblem
|
|
|
76
76
|
from .linear_optimisation import (
|
|
77
77
|
LinearLeastSquaresInversion,
|
|
78
78
|
LinearMinimumNormInversion,
|
|
79
|
+
ConstrainedLinearLeastSquaresInversion,
|
|
80
|
+
ConstrainedLinearMinimumNormInversion,
|
|
79
81
|
)
|
|
80
82
|
|
|
81
|
-
from .linear_bayesian import
|
|
83
|
+
from .linear_bayesian import (
|
|
84
|
+
LinearBayesianInversion,
|
|
85
|
+
ConstrainedLinearBayesianInversion,
|
|
86
|
+
)
|
|
82
87
|
|
|
83
88
|
from .backus_gilbert import HyperEllipsoid
|
|
84
89
|
|
|
@@ -87,6 +92,8 @@ from .nonlinear_optimisation import (
|
|
|
87
92
|
)
|
|
88
93
|
|
|
89
94
|
|
|
95
|
+
from .subspaces import OrthogonalProjector, AffineSubspace, LinearSubspace
|
|
96
|
+
|
|
90
97
|
__all__ = [
|
|
91
98
|
# random_matrix
|
|
92
99
|
"fixed_rank_random_range",
|
|
@@ -144,11 +151,17 @@ __all__ = [
|
|
|
144
151
|
# linear_optimisation
|
|
145
152
|
"LinearLeastSquaresInversion",
|
|
146
153
|
"LinearMinimumNormInversion",
|
|
154
|
+
"ConstrainedLinearLeastSquaresInversion",
|
|
155
|
+
"ConstrainedLinearMinimumNormInversion",
|
|
147
156
|
# linear_bayesian
|
|
148
157
|
"LinearBayesianInversion",
|
|
149
|
-
"
|
|
158
|
+
"ConstrainedLinearBayesianInversion",
|
|
150
159
|
# backus_gilbert
|
|
151
160
|
"HyperEllipsoid",
|
|
152
161
|
# nonlinear_optimisation
|
|
153
162
|
"ScipyUnconstrainedOptimiser",
|
|
163
|
+
# Subspaces
|
|
164
|
+
"OrthogonalProjector",
|
|
165
|
+
"AffineSubspace",
|
|
166
|
+
"LinearSubspace",
|
|
154
167
|
]
|
|
File without changes
|
pygeoinf/linear_bayesian.py
CHANGED
|
@@ -5,19 +5,12 @@ This module treats the inverse problem from a statistical perspective, aiming to
|
|
|
5
5
|
determine the full posterior probability distribution of the unknown model
|
|
6
6
|
parameters, rather than a single best-fit solution.
|
|
7
7
|
|
|
8
|
-
It assumes that the prior knowledge about the model and the statistics of the
|
|
9
|
-
data errors can be described by Gaussian measures. For a linear forward problem,
|
|
10
|
-
the resulting posterior distribution for the model is also Gaussian, allowing
|
|
11
|
-
for an analytical solution.
|
|
12
|
-
|
|
13
8
|
Key Classes
|
|
14
9
|
-----------
|
|
15
10
|
- `LinearBayesianInversion`: Computes the posterior Gaussian measure `p(u|d)`
|
|
16
|
-
for the model `u` given observed data `d`.
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
distribution for a derived property of the model, `p(B(u)|d)`, where `B` is
|
|
20
|
-
some linear operator.
|
|
11
|
+
for the model `u` given observed data `d`.
|
|
12
|
+
- `ConstrainedLinearBayesianInversion`: Solves the inverse problem subject to
|
|
13
|
+
an affine constraint `u in A`.
|
|
21
14
|
"""
|
|
22
15
|
|
|
23
16
|
from __future__ import annotations
|
|
@@ -25,22 +18,19 @@ from typing import Optional
|
|
|
25
18
|
|
|
26
19
|
from .inversion import LinearInversion
|
|
27
20
|
from .gaussian_measure import GaussianMeasure
|
|
28
|
-
|
|
29
|
-
|
|
30
21
|
from .forward_problem import LinearForwardProblem
|
|
31
|
-
from .linear_operators import LinearOperator
|
|
22
|
+
from .linear_operators import LinearOperator, NormalSumOperator
|
|
32
23
|
from .linear_solvers import LinearSolver, IterativeLinearSolver
|
|
33
|
-
from .hilbert_space import
|
|
24
|
+
from .hilbert_space import Vector
|
|
25
|
+
from .subspaces import AffineSubspace
|
|
34
26
|
|
|
35
27
|
|
|
36
28
|
class LinearBayesianInversion(LinearInversion):
|
|
37
29
|
"""
|
|
38
30
|
Solves a linear inverse problem using Bayesian methods.
|
|
39
31
|
|
|
40
|
-
This class applies to problems of the form `d = A(u) + e
|
|
41
|
-
|
|
42
|
-
by Gaussian distributions. It computes the full posterior probability
|
|
43
|
-
distribution `p(u|d)` for the model parameters given an observation `d`.
|
|
32
|
+
This class applies to problems of the form `d = A(u) + e`. It computes the
|
|
33
|
+
full posterior probability distribution `p(u|d)`.
|
|
44
34
|
"""
|
|
45
35
|
|
|
46
36
|
def __init__(
|
|
@@ -65,45 +55,43 @@ class LinearBayesianInversion(LinearInversion):
|
|
|
65
55
|
@property
|
|
66
56
|
def normal_operator(self) -> LinearOperator:
|
|
67
57
|
"""
|
|
68
|
-
Returns the
|
|
69
|
-
|
|
70
|
-
This operator, `C_d = A @ C_u @ A* + C_e`, represents the total
|
|
71
|
-
expected covariance in the data space before any data is observed.
|
|
72
|
-
Its inverse is central to calculating the posterior distribution and is
|
|
73
|
-
often referred to as the Bayesian normal operator.
|
|
58
|
+
Returns the Bayesian Normal operator: N = A Q A* + R.
|
|
74
59
|
"""
|
|
75
60
|
forward_operator = self.forward_problem.forward_operator
|
|
76
|
-
|
|
61
|
+
model_prior_covariance = self.model_prior_measure.covariance
|
|
77
62
|
|
|
78
63
|
if self.forward_problem.data_error_measure_set:
|
|
79
64
|
return (
|
|
80
|
-
forward_operator @
|
|
65
|
+
forward_operator @ model_prior_covariance @ forward_operator.adjoint
|
|
81
66
|
+ self.forward_problem.data_error_measure.covariance
|
|
82
67
|
)
|
|
83
68
|
else:
|
|
84
|
-
return forward_operator
|
|
69
|
+
return NormalSumOperator(forward_operator, model_prior_covariance)
|
|
85
70
|
|
|
86
|
-
def
|
|
71
|
+
def kalman_operator(
|
|
72
|
+
self,
|
|
73
|
+
solver: LinearSolver,
|
|
74
|
+
/,
|
|
75
|
+
*,
|
|
76
|
+
preconditioner: Optional[LinearOperator] = None,
|
|
77
|
+
) -> LinearOperator:
|
|
87
78
|
"""
|
|
88
|
-
Returns the
|
|
89
|
-
|
|
90
|
-
This measure describes the expected distribution of the data before any
|
|
91
|
-
specific observation is made, combining the uncertainty from the prior
|
|
92
|
-
model and the data errors.
|
|
79
|
+
Returns the Kalman gain operator K = Q A* N^-1.
|
|
93
80
|
"""
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
81
|
+
forward_operator = self.forward_problem.forward_operator
|
|
82
|
+
model_prior_covariance = self.model_prior_measure.covariance
|
|
83
|
+
normal_operator = self.normal_operator
|
|
84
|
+
|
|
85
|
+
if isinstance(solver, IterativeLinearSolver):
|
|
86
|
+
inverse_normal_operator = solver(
|
|
87
|
+
normal_operator, preconditioner=preconditioner
|
|
101
88
|
)
|
|
102
89
|
else:
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
90
|
+
inverse_normal_operator = solver(normal_operator)
|
|
91
|
+
|
|
92
|
+
return (
|
|
93
|
+
model_prior_covariance @ forward_operator.adjoint @ inverse_normal_operator
|
|
94
|
+
)
|
|
107
95
|
|
|
108
96
|
def model_posterior_measure(
|
|
109
97
|
self,
|
|
@@ -114,108 +102,120 @@ class LinearBayesianInversion(LinearInversion):
|
|
|
114
102
|
preconditioner: Optional[LinearOperator] = None,
|
|
115
103
|
) -> GaussianMeasure:
|
|
116
104
|
"""
|
|
117
|
-
Returns the posterior Gaussian measure
|
|
118
|
-
|
|
119
|
-
This measure represents our updated state of knowledge about the model
|
|
120
|
-
`u` after observing the data `d`. Its expectation is the most likely
|
|
121
|
-
model, and its covariance quantifies the remaining uncertainty.
|
|
105
|
+
Returns the posterior Gaussian measure p(u|d).
|
|
122
106
|
|
|
123
107
|
Args:
|
|
124
108
|
data: The observed data vector.
|
|
125
109
|
solver: A linear solver for inverting the normal operator.
|
|
126
|
-
preconditioner: An optional preconditioner
|
|
127
|
-
|
|
128
|
-
Returns:
|
|
129
|
-
The posterior `GaussianMeasure` on the model space.
|
|
110
|
+
preconditioner: An optional preconditioner.
|
|
130
111
|
"""
|
|
131
112
|
data_space = self.data_space
|
|
132
113
|
model_space = self.model_space
|
|
133
114
|
forward_operator = self.forward_problem.forward_operator
|
|
134
|
-
|
|
135
|
-
normal_operator = self.normal_operator
|
|
115
|
+
model_prior_covariance = self.model_prior_measure.covariance
|
|
136
116
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
normal_operator, preconditioner=preconditioner
|
|
140
|
-
)
|
|
141
|
-
else:
|
|
142
|
-
inverse_normal_operator = solver(normal_operator)
|
|
117
|
+
# 1. Compute Kalman Gain
|
|
118
|
+
kalman_gain = self.kalman_operator(solver, preconditioner=preconditioner)
|
|
143
119
|
|
|
144
|
-
#
|
|
120
|
+
# 2. Compute Posterior Mean
|
|
121
|
+
# Shift data: d - A(mu_u)
|
|
145
122
|
shifted_data = data_space.subtract(
|
|
146
123
|
data, forward_operator(self.model_prior_measure.expectation)
|
|
147
124
|
)
|
|
125
|
+
|
|
126
|
+
# Shift for noise mean: d - A(mu_u) - mu_e
|
|
148
127
|
if self.forward_problem.data_error_measure_set:
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
128
|
+
error_expectation = self.forward_problem.data_error_measure.expectation
|
|
129
|
+
shifted_data = data_space.subtract(shifted_data, error_expectation)
|
|
130
|
+
else:
|
|
131
|
+
error_expectation = data_space.zero
|
|
152
132
|
|
|
153
|
-
mean_update = (
|
|
154
|
-
prior_model_covariance @ forward_operator.adjoint @ inverse_normal_operator
|
|
155
|
-
)(shifted_data)
|
|
133
|
+
mean_update = kalman_gain(shifted_data)
|
|
156
134
|
expectation = model_space.add(self.model_prior_measure.expectation, mean_update)
|
|
157
135
|
|
|
158
|
-
#
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
@ forward_operator
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
136
|
+
# 3. Compute Posterior Covariance (Implicitly)
|
|
137
|
+
# C_post = C_u - K A C_u
|
|
138
|
+
covariance = model_prior_covariance - (
|
|
139
|
+
kalman_gain @ forward_operator @ model_prior_covariance
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# 4. Set up Posterior Sampling
|
|
143
|
+
# Logic: Can sample if prior is samplable AND (noise is absent OR samplable)
|
|
144
|
+
can_sample_prior = self.model_prior_measure.sample_set
|
|
145
|
+
can_sample_noise = (
|
|
146
|
+
not self.forward_problem.data_error_measure_set
|
|
147
|
+
or self.forward_problem.data_error_measure.sample_set
|
|
165
148
|
)
|
|
166
149
|
|
|
167
|
-
|
|
150
|
+
if can_sample_prior and can_sample_noise:
|
|
151
|
+
|
|
152
|
+
def sample():
|
|
153
|
+
# a. Sample Prior
|
|
154
|
+
model_sample = self.model_prior_measure.sample()
|
|
168
155
|
|
|
156
|
+
# b. Calculate Residual
|
|
157
|
+
prediction = forward_operator(model_sample)
|
|
158
|
+
data_residual = data_space.subtract(data, prediction)
|
|
169
159
|
|
|
170
|
-
|
|
160
|
+
# c. Perturb Residual
|
|
161
|
+
if self.forward_problem.data_error_measure_set:
|
|
162
|
+
noise_raw = self.forward_problem.data_error_measure.sample()
|
|
163
|
+
epsilon = data_space.subtract(noise_raw, error_expectation)
|
|
164
|
+
data_space.axpy(1.0, epsilon, data_residual)
|
|
165
|
+
|
|
166
|
+
# d. Update
|
|
167
|
+
correction = kalman_gain(data_residual)
|
|
168
|
+
return model_space.add(model_sample, correction)
|
|
169
|
+
|
|
170
|
+
return GaussianMeasure(
|
|
171
|
+
covariance=covariance, expectation=expectation, sample=sample
|
|
172
|
+
)
|
|
173
|
+
else:
|
|
174
|
+
return GaussianMeasure(covariance=covariance, expectation=expectation)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
class ConstrainedLinearBayesianInversion(LinearInversion):
|
|
171
178
|
"""
|
|
172
|
-
|
|
179
|
+
Solves a linear inverse problem subject to an affine subspace constraint.
|
|
173
180
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
181
|
+
This class enforces the constraint `u in A` using either:
|
|
182
|
+
1. Bayesian Conditioning (Default): p(u | d, u in A).
|
|
183
|
+
If A is defined geometrically (no explicit equation), an implicit
|
|
184
|
+
operator (I-P) is used, which requires a robust solver in the subspace.
|
|
185
|
+
2. Geometric Projection: Projects the unconstrained posterior onto A.
|
|
178
186
|
"""
|
|
179
187
|
|
|
180
188
|
def __init__(
|
|
181
189
|
self,
|
|
182
190
|
forward_problem: LinearForwardProblem,
|
|
183
191
|
model_prior_measure: GaussianMeasure,
|
|
184
|
-
|
|
192
|
+
constraint: AffineSubspace,
|
|
185
193
|
/,
|
|
194
|
+
*,
|
|
195
|
+
geometric: bool = False,
|
|
186
196
|
) -> None:
|
|
187
197
|
"""
|
|
188
198
|
Args:
|
|
189
|
-
forward_problem: The forward problem
|
|
190
|
-
model_prior_measure: The prior Gaussian measure
|
|
191
|
-
|
|
192
|
-
|
|
199
|
+
forward_problem: The forward problem.
|
|
200
|
+
model_prior_measure: The unconstrained prior Gaussian measure.
|
|
201
|
+
constraint: The affine subspace A.
|
|
202
|
+
geometric: If True, uses orthogonal projection (Euclidean metric).
|
|
203
|
+
If False (default), uses Bayesian conditioning.
|
|
193
204
|
"""
|
|
194
|
-
super().__init__(forward_problem
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
self.
|
|
198
|
-
|
|
199
|
-
@property
|
|
200
|
-
def property_space(self) -> HilbertSpace:
|
|
201
|
-
"""The Hilbert space in which the property `p` resides."""
|
|
202
|
-
return self._property_operator.codomain
|
|
203
|
-
|
|
204
|
-
@property
|
|
205
|
-
def property_operator(self) -> LinearOperator:
|
|
206
|
-
"""The linear operator `B` that defines the property."""
|
|
207
|
-
return self._property_operator
|
|
205
|
+
super().__init__(forward_problem)
|
|
206
|
+
self._unconstrained_prior = model_prior_measure
|
|
207
|
+
self._constraint = constraint
|
|
208
|
+
self._geometric = geometric
|
|
208
209
|
|
|
209
|
-
def
|
|
210
|
+
def conditioned_prior_measure(self) -> GaussianMeasure:
|
|
210
211
|
"""
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
This is computed by propagating the model prior through the property
|
|
214
|
-
operator.
|
|
212
|
+
Computes the prior measure conditioned on the constraint.
|
|
215
213
|
"""
|
|
216
|
-
return self.
|
|
214
|
+
return self._constraint.condition_gaussian_measure(
|
|
215
|
+
self._unconstrained_prior, geometric=self._geometric
|
|
216
|
+
)
|
|
217
217
|
|
|
218
|
-
def
|
|
218
|
+
def model_posterior_measure(
|
|
219
219
|
self,
|
|
220
220
|
data: Vector,
|
|
221
221
|
solver: LinearSolver,
|
|
@@ -224,22 +224,22 @@ class LinearBayesianInference(LinearBayesianInversion):
|
|
|
224
224
|
preconditioner: Optional[LinearOperator] = None,
|
|
225
225
|
) -> GaussianMeasure:
|
|
226
226
|
"""
|
|
227
|
-
Returns the posterior measure
|
|
228
|
-
|
|
229
|
-
This is computed by first finding the posterior measure for the model,
|
|
230
|
-
`p(u|d)`, and then propagating it through the property operator `B`.
|
|
227
|
+
Returns the posterior Gaussian measure p(u | d, u in A).
|
|
231
228
|
|
|
232
229
|
Args:
|
|
233
|
-
data:
|
|
234
|
-
solver:
|
|
235
|
-
preconditioner:
|
|
230
|
+
data: Observed data vector.
|
|
231
|
+
solver: Solver for the data update (inverts A C_cond A* + Ce).
|
|
232
|
+
preconditioner: Preconditioner for the data update.
|
|
236
233
|
|
|
237
|
-
|
|
238
|
-
|
|
234
|
+
Note: The solver for the constraint update is managed internally by
|
|
235
|
+
the AffineSubspace object passed at initialization.
|
|
239
236
|
"""
|
|
240
|
-
#
|
|
241
|
-
|
|
237
|
+
# 1. Condition Prior
|
|
238
|
+
cond_prior = self.conditioned_prior_measure()
|
|
239
|
+
|
|
240
|
+
# 2. Solve Bayesian Inverse Problem with the new prior
|
|
241
|
+
bayes_inv = LinearBayesianInversion(self.forward_problem, cond_prior)
|
|
242
|
+
|
|
243
|
+
return bayes_inv.model_posterior_measure(
|
|
242
244
|
data, solver, preconditioner=preconditioner
|
|
243
245
|
)
|
|
244
|
-
# Then, map that distribution to the property space.
|
|
245
|
-
return model_posterior.affine_mapping(operator=self.property_operator)
|
pygeoinf/linear_optimisation.py
CHANGED
|
@@ -16,6 +16,8 @@ Key Classes
|
|
|
16
16
|
- `LinearMinimumNormInversion`: Finds the model with the smallest norm that
|
|
17
17
|
fits the data to a statistically acceptable degree using the discrepancy
|
|
18
18
|
principle.
|
|
19
|
+
- `ConstrainedLinearLeastSquaresInversion`: Solves a linear inverse problem
|
|
20
|
+
subject to an affine subspace constraint.
|
|
19
21
|
"""
|
|
20
22
|
|
|
21
23
|
from __future__ import annotations
|
|
@@ -30,6 +32,7 @@ from .forward_problem import LinearForwardProblem
|
|
|
30
32
|
from .linear_operators import LinearOperator
|
|
31
33
|
from .linear_solvers import LinearSolver, IterativeLinearSolver
|
|
32
34
|
from .hilbert_space import Vector
|
|
35
|
+
from .subspaces import AffineSubspace
|
|
33
36
|
|
|
34
37
|
|
|
35
38
|
class LinearLeastSquaresInversion(LinearInversion):
|
|
@@ -160,6 +163,113 @@ class LinearLeastSquaresInversion(LinearInversion):
|
|
|
160
163
|
return inverse_normal_operator @ forward_operator.adjoint
|
|
161
164
|
|
|
162
165
|
|
|
166
|
+
class ConstrainedLinearLeastSquaresInversion(LinearInversion):
|
|
167
|
+
"""
|
|
168
|
+
Solves a linear inverse problem subject to an affine subspace constraint.
|
|
169
|
+
|
|
170
|
+
Problem:
|
|
171
|
+
Minimize J(u) = || A(u) - d ||_D^2 + alpha * || u ||_M^2
|
|
172
|
+
Subject to u in A (Affine Subspace)
|
|
173
|
+
|
|
174
|
+
Method:
|
|
175
|
+
The problem is reduced to an unconstrained minimization in the subspace.
|
|
176
|
+
We decompose the model as u = u_base + w, where u_base is the element
|
|
177
|
+
of the affine subspace closest to the origin (orthogonal to the tangent space),
|
|
178
|
+
and w is a perturbation in the tangent space.
|
|
179
|
+
|
|
180
|
+
The cost function separates (due to orthogonality) into:
|
|
181
|
+
J(w) = || A(w) - (d - A(u_base)) ||^2 + alpha * || w ||^2 + (alpha * ||u_base||^2)
|
|
182
|
+
|
|
183
|
+
This is solved using the standard LinearLeastSquaresInversion on a
|
|
184
|
+
reduced forward problem.
|
|
185
|
+
"""
|
|
186
|
+
|
|
187
|
+
def __init__(
|
|
188
|
+
self, forward_problem: LinearForwardProblem, constraint: AffineSubspace
|
|
189
|
+
) -> None:
|
|
190
|
+
"""
|
|
191
|
+
Args:
|
|
192
|
+
forward_problem: The original unconstrained forward problem.
|
|
193
|
+
constraint: The affine subspace A where the solution must lie.
|
|
194
|
+
"""
|
|
195
|
+
super().__init__(forward_problem)
|
|
196
|
+
self._constraint = constraint
|
|
197
|
+
|
|
198
|
+
# 1. Compute the Orthogonal Base Vector (u_base)
|
|
199
|
+
# u_base = (I - P) * translation
|
|
200
|
+
# This is the unique vector in the affine space that is orthogonal to the tangent space.
|
|
201
|
+
# It ensures ||u||^2 = ||u_base||^2 + ||w||^2, decoupling the regularization.
|
|
202
|
+
self._u_base = constraint.domain.subtract(
|
|
203
|
+
constraint.translation, constraint.projector(constraint.translation)
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# 2. Construct Reduced Forward Problem
|
|
207
|
+
# Operator: A_tilde = A @ P
|
|
208
|
+
reduced_operator = forward_problem.forward_operator @ constraint.projector
|
|
209
|
+
|
|
210
|
+
# The error measure on the data remains valid for the reduced problem
|
|
211
|
+
# because the noise model is additive and independent of the model parameters.
|
|
212
|
+
self._reduced_forward_problem = LinearForwardProblem(
|
|
213
|
+
reduced_operator,
|
|
214
|
+
data_error_measure=(
|
|
215
|
+
forward_problem.data_error_measure
|
|
216
|
+
if forward_problem.data_error_measure_set
|
|
217
|
+
else None
|
|
218
|
+
),
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# 3. Initialize the internal unconstrained solver
|
|
222
|
+
self._unconstrained_inversion = LinearLeastSquaresInversion(
|
|
223
|
+
self._reduced_forward_problem
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
def least_squares_operator(
|
|
227
|
+
self,
|
|
228
|
+
damping: float,
|
|
229
|
+
solver: LinearSolver,
|
|
230
|
+
/,
|
|
231
|
+
**kwargs,
|
|
232
|
+
) -> NonLinearOperator:
|
|
233
|
+
"""
|
|
234
|
+
Returns an operator that maps data to the constrained least-squares solution.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
damping: The Tikhonov damping parameter.
|
|
238
|
+
solver: The linear solver for the reduced normal equations.
|
|
239
|
+
**kwargs: Additional arguments passed to the solver (e.g., preconditioner).
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
A NonLinearOperator mapping d -> u_constrained.
|
|
243
|
+
"""
|
|
244
|
+
|
|
245
|
+
# Get the operator L_tilde such that w = L_tilde(d_tilde)
|
|
246
|
+
reduced_op = self._unconstrained_inversion.least_squares_operator(
|
|
247
|
+
damping, solver, **kwargs
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
# Precompute A(u_base) to shift the data efficiently
|
|
251
|
+
# This represents the data predicted by the "base" model.
|
|
252
|
+
data_offset = self.forward_problem.forward_operator(self._u_base)
|
|
253
|
+
|
|
254
|
+
domain = self.data_space
|
|
255
|
+
codomain = self.model_space
|
|
256
|
+
|
|
257
|
+
def mapping(d: Vector) -> Vector:
|
|
258
|
+
# 1. Shift Data: d_tilde = d - A(u_base)
|
|
259
|
+
d_tilde = domain.subtract(d, data_offset)
|
|
260
|
+
|
|
261
|
+
# 2. Solve for perturbation w in the tangent space
|
|
262
|
+
# w = (P A* A P + alpha I)^-1 P A* d_tilde
|
|
263
|
+
w = reduced_op(d_tilde)
|
|
264
|
+
|
|
265
|
+
# 3. Reconstruct full model: u = u_base + w
|
|
266
|
+
# Note: w is guaranteed to be in the tangent space (Range of P)
|
|
267
|
+
# because of the structure of the reduced normal equations.
|
|
268
|
+
return codomain.add(self._u_base, w)
|
|
269
|
+
|
|
270
|
+
return NonLinearOperator(domain, codomain, mapping)
|
|
271
|
+
|
|
272
|
+
|
|
163
273
|
class LinearMinimumNormInversion(LinearInversion):
|
|
164
274
|
"""
|
|
165
275
|
Finds a regularized solution using the discrepancy principle.
|
|
@@ -309,3 +419,111 @@ class LinearMinimumNormInversion(LinearInversion):
|
|
|
309
419
|
normal_operator = forward_operator @ forward_operator.adjoint
|
|
310
420
|
inverse_normal_operator = solver(normal_operator)
|
|
311
421
|
return forward_operator.adjoint @ inverse_normal_operator
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
class ConstrainedLinearMinimumNormInversion(LinearInversion):
|
|
425
|
+
"""
|
|
426
|
+
Finds the minimum-norm solution subject to an affine subspace constraint
|
|
427
|
+
using the discrepancy principle.
|
|
428
|
+
|
|
429
|
+
Problem:
|
|
430
|
+
Minimize ||u||
|
|
431
|
+
Subject to u in A (Affine Subspace)
|
|
432
|
+
And chi_squared(u, d) <= critical_value
|
|
433
|
+
|
|
434
|
+
Method:
|
|
435
|
+
We decompose the model as u = u_base + w, where u_base is the element
|
|
436
|
+
of the affine subspace with the smallest norm (orthogonal to the tangent
|
|
437
|
+
space), and w is a perturbation in the tangent space.
|
|
438
|
+
|
|
439
|
+
Because u_base and w are orthogonal, ||u||^2 = ||u_base||^2 + ||w||^2.
|
|
440
|
+
Minimizing ||u|| is therefore equivalent to minimizing ||w||.
|
|
441
|
+
|
|
442
|
+
The problem reduces to finding the minimum norm w such that:
|
|
443
|
+
|| A(w) - (d - A(u_base)) ||_D^2 <= critical_value
|
|
444
|
+
|
|
445
|
+
This is solved using the standard LinearMinimumNormInversion on a
|
|
446
|
+
reduced forward problem.
|
|
447
|
+
"""
|
|
448
|
+
|
|
449
|
+
def __init__(
|
|
450
|
+
self,
|
|
451
|
+
forward_problem: LinearForwardProblem,
|
|
452
|
+
constraint: AffineSubspace,
|
|
453
|
+
) -> None:
|
|
454
|
+
"""
|
|
455
|
+
Args:
|
|
456
|
+
forward_problem: The original unconstrained forward problem.
|
|
457
|
+
constraint: The affine subspace A where the solution must lie.
|
|
458
|
+
"""
|
|
459
|
+
super().__init__(forward_problem)
|
|
460
|
+
if self.forward_problem.data_error_measure_set:
|
|
461
|
+
self.assert_inverse_data_covariance()
|
|
462
|
+
|
|
463
|
+
self._constraint = constraint
|
|
464
|
+
|
|
465
|
+
# 1. Compute the Orthogonal Base Vector (u_base)
|
|
466
|
+
# u_base = (I - P) * translation
|
|
467
|
+
# This is the vector in the affine space closest to the origin.
|
|
468
|
+
self._u_base = constraint.domain.subtract(
|
|
469
|
+
constraint.translation, constraint.projector(constraint.translation)
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
# 2. Construct Reduced Forward Problem
|
|
473
|
+
# Operator: A_tilde = A @ P
|
|
474
|
+
reduced_operator = forward_problem.forward_operator @ constraint.projector
|
|
475
|
+
|
|
476
|
+
self._reduced_forward_problem = LinearForwardProblem(
|
|
477
|
+
reduced_operator,
|
|
478
|
+
data_error_measure=(
|
|
479
|
+
forward_problem.data_error_measure
|
|
480
|
+
if forward_problem.data_error_measure_set
|
|
481
|
+
else None
|
|
482
|
+
),
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
# 3. Initialize the internal unconstrained solver
|
|
486
|
+
self._unconstrained_inversion = LinearMinimumNormInversion(
|
|
487
|
+
self._reduced_forward_problem
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
def minimum_norm_operator(
|
|
491
|
+
self,
|
|
492
|
+
solver: LinearSolver,
|
|
493
|
+
/,
|
|
494
|
+
**kwargs,
|
|
495
|
+
) -> NonLinearOperator:
|
|
496
|
+
"""
|
|
497
|
+
Returns an operator that maps data to the constrained minimum-norm solution.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
solver: The linear solver for the reduced normal equations.
|
|
501
|
+
**kwargs: Arguments passed to LinearMinimumNormInversion (e.g.,
|
|
502
|
+
significance_level, rtol, maxiter).
|
|
503
|
+
|
|
504
|
+
Returns:
|
|
505
|
+
A NonLinearOperator mapping d -> u_constrained.
|
|
506
|
+
"""
|
|
507
|
+
|
|
508
|
+
# Get the operator L_tilde such that w = L_tilde(d_tilde)
|
|
509
|
+
reduced_op = self._unconstrained_inversion.minimum_norm_operator(
|
|
510
|
+
solver, **kwargs
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
# Precompute A(u_base) to shift the data
|
|
514
|
+
data_offset = self.forward_problem.forward_operator(self._u_base)
|
|
515
|
+
|
|
516
|
+
domain = self.data_space
|
|
517
|
+
codomain = self.model_space
|
|
518
|
+
|
|
519
|
+
def mapping(d: Vector) -> Vector:
|
|
520
|
+
# 1. Shift Data: d_tilde = d - A(u_base)
|
|
521
|
+
d_tilde = domain.subtract(d, data_offset)
|
|
522
|
+
|
|
523
|
+
# 2. Solve for perturbation w in the tangent space
|
|
524
|
+
w = reduced_op(d_tilde)
|
|
525
|
+
|
|
526
|
+
# 3. Reconstruct full model: u = u_base + w
|
|
527
|
+
return codomain.add(self._u_base, w)
|
|
528
|
+
|
|
529
|
+
return NonLinearOperator(domain, codomain, mapping)
|