pygeoinf 1.3.4__tar.gz → 1.3.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/PKG-INFO +1 -1
  2. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/__init__.py +15 -2
  3. pygeoinf-1.3.6/pygeoinf/linear_bayesian.py +245 -0
  4. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/linear_optimisation.py +218 -0
  5. pygeoinf-1.3.6/pygeoinf/subspaces.py +403 -0
  6. pygeoinf-1.3.6/pygeoinf/symmetric_space/__init__.py +0 -0
  7. pygeoinf-1.3.6/pygeoinf/symmetric_space/sh_tools.py +107 -0
  8. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/symmetric_space/sphere.py +139 -0
  9. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pyproject.toml +2 -1
  10. pygeoinf-1.3.4/pygeoinf/linear_bayesian.py +0 -245
  11. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/LICENSE +0 -0
  12. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/README.md +0 -0
  13. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/auxiliary.py +0 -0
  14. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/backus_gilbert.py +0 -0
  15. {pygeoinf-1.3.4/pygeoinf/symmetric_space → pygeoinf-1.3.6/pygeoinf/checks}/__init__.py +0 -0
  16. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/checks/hilbert_space.py +0 -0
  17. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/checks/linear_operators.py +0 -0
  18. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/checks/nonlinear_operators.py +0 -0
  19. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/direct_sum.py +0 -0
  20. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/forward_problem.py +0 -0
  21. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/gaussian_measure.py +0 -0
  22. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/hilbert_space.py +0 -0
  23. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/inversion.py +0 -0
  24. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/linear_forms.py +0 -0
  25. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/linear_operators.py +0 -0
  26. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/linear_solvers.py +0 -0
  27. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/nonlinear_forms.py +0 -0
  28. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/nonlinear_operators.py +0 -0
  29. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/nonlinear_optimisation.py +0 -0
  30. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/parallel.py +0 -0
  31. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/plot.py +0 -0
  32. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/random_matrix.py +0 -0
  33. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/symmetric_space/circle.py +0 -0
  34. {pygeoinf-1.3.4 → pygeoinf-1.3.6}/pygeoinf/symmetric_space/symmetric_space.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pygeoinf
3
- Version: 1.3.4
3
+ Version: 1.3.6
4
4
  Summary: A package for solving geophysical inference and inverse problems
5
5
  License: BSD-3-Clause
6
6
  License-File: LICENSE
@@ -76,9 +76,14 @@ from .forward_problem import ForwardProblem, LinearForwardProblem
76
76
  from .linear_optimisation import (
77
77
  LinearLeastSquaresInversion,
78
78
  LinearMinimumNormInversion,
79
+ ConstrainedLinearLeastSquaresInversion,
80
+ ConstrainedLinearMinimumNormInversion,
79
81
  )
80
82
 
81
- from .linear_bayesian import LinearBayesianInversion, LinearBayesianInference
83
+ from .linear_bayesian import (
84
+ LinearBayesianInversion,
85
+ ConstrainedLinearBayesianInversion,
86
+ )
82
87
 
83
88
  from .backus_gilbert import HyperEllipsoid
84
89
 
@@ -87,6 +92,8 @@ from .nonlinear_optimisation import (
87
92
  )
88
93
 
89
94
 
95
+ from .subspaces import OrthogonalProjector, AffineSubspace, LinearSubspace
96
+
90
97
  __all__ = [
91
98
  # random_matrix
92
99
  "fixed_rank_random_range",
@@ -144,11 +151,17 @@ __all__ = [
144
151
  # linear_optimisation
145
152
  "LinearLeastSquaresInversion",
146
153
  "LinearMinimumNormInversion",
154
+ "ConstrainedLinearLeastSquaresInversion",
155
+ "ConstrainedLinearMinimumNormInversion",
147
156
  # linear_bayesian
148
157
  "LinearBayesianInversion",
149
- "LinearBayesianInference",
158
+ "ConstrainedLinearBayesianInversion",
150
159
  # backus_gilbert
151
160
  "HyperEllipsoid",
152
161
  # nonlinear_optimisation
153
162
  "ScipyUnconstrainedOptimiser",
163
+ # Subspaces
164
+ "OrthogonalProjector",
165
+ "AffineSubspace",
166
+ "LinearSubspace",
154
167
  ]
@@ -0,0 +1,245 @@
1
+ """
2
+ Implements the Bayesian framework for solving linear inverse problems.
3
+
4
+ This module treats the inverse problem from a statistical perspective, aiming to
5
+ determine the full posterior probability distribution of the unknown model
6
+ parameters, rather than a single best-fit solution.
7
+
8
+ Key Classes
9
+ -----------
10
+ - `LinearBayesianInversion`: Computes the posterior Gaussian measure `p(u|d)`
11
+ for the model `u` given observed data `d`.
12
+ - `ConstrainedLinearBayesianInversion`: Solves the inverse problem subject to
13
+ an affine constraint `u in A`.
14
+ """
15
+
16
+ from __future__ import annotations
17
+ from typing import Optional
18
+
19
+ from .inversion import LinearInversion
20
+ from .gaussian_measure import GaussianMeasure
21
+ from .forward_problem import LinearForwardProblem
22
+ from .linear_operators import LinearOperator, NormalSumOperator
23
+ from .linear_solvers import LinearSolver, IterativeLinearSolver
24
+ from .hilbert_space import Vector
25
+ from .subspaces import AffineSubspace
26
+
27
+
28
+ class LinearBayesianInversion(LinearInversion):
29
+ """
30
+ Solves a linear inverse problem using Bayesian methods.
31
+
32
+ This class applies to problems of the form `d = A(u) + e`. It computes the
33
+ full posterior probability distribution `p(u|d)`.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ forward_problem: LinearForwardProblem,
39
+ model_prior_measure: GaussianMeasure,
40
+ /,
41
+ ) -> None:
42
+ """
43
+ Args:
44
+ forward_problem: The forward problem linking the model to the data.
45
+ model_prior_measure: The prior Gaussian measure on the model space.
46
+ """
47
+ super().__init__(forward_problem)
48
+ self._model_prior_measure: GaussianMeasure = model_prior_measure
49
+
50
+ @property
51
+ def model_prior_measure(self) -> GaussianMeasure:
52
+ """The prior Gaussian measure on the model space."""
53
+ return self._model_prior_measure
54
+
55
+ @property
56
+ def normal_operator(self) -> LinearOperator:
57
+ """
58
+ Returns the Bayesian Normal operator: N = A Q A* + R.
59
+ """
60
+ forward_operator = self.forward_problem.forward_operator
61
+ model_prior_covariance = self.model_prior_measure.covariance
62
+
63
+ if self.forward_problem.data_error_measure_set:
64
+ return (
65
+ forward_operator @ model_prior_covariance @ forward_operator.adjoint
66
+ + self.forward_problem.data_error_measure.covariance
67
+ )
68
+ else:
69
+ return NormalSumOperator(forward_operator, model_prior_covariance)
70
+
71
+ def kalman_operator(
72
+ self,
73
+ solver: LinearSolver,
74
+ /,
75
+ *,
76
+ preconditioner: Optional[LinearOperator] = None,
77
+ ) -> LinearOperator:
78
+ """
79
+ Returns the Kalman gain operator K = Q A* N^-1.
80
+ """
81
+ forward_operator = self.forward_problem.forward_operator
82
+ model_prior_covariance = self.model_prior_measure.covariance
83
+ normal_operator = self.normal_operator
84
+
85
+ if isinstance(solver, IterativeLinearSolver):
86
+ inverse_normal_operator = solver(
87
+ normal_operator, preconditioner=preconditioner
88
+ )
89
+ else:
90
+ inverse_normal_operator = solver(normal_operator)
91
+
92
+ return (
93
+ model_prior_covariance @ forward_operator.adjoint @ inverse_normal_operator
94
+ )
95
+
96
+ def model_posterior_measure(
97
+ self,
98
+ data: Vector,
99
+ solver: LinearSolver,
100
+ /,
101
+ *,
102
+ preconditioner: Optional[LinearOperator] = None,
103
+ ) -> GaussianMeasure:
104
+ """
105
+ Returns the posterior Gaussian measure p(u|d).
106
+
107
+ Args:
108
+ data: The observed data vector.
109
+ solver: A linear solver for inverting the normal operator.
110
+ preconditioner: An optional preconditioner.
111
+ """
112
+ data_space = self.data_space
113
+ model_space = self.model_space
114
+ forward_operator = self.forward_problem.forward_operator
115
+ model_prior_covariance = self.model_prior_measure.covariance
116
+
117
+ # 1. Compute Kalman Gain
118
+ kalman_gain = self.kalman_operator(solver, preconditioner=preconditioner)
119
+
120
+ # 2. Compute Posterior Mean
121
+ # Shift data: d - A(mu_u)
122
+ shifted_data = data_space.subtract(
123
+ data, forward_operator(self.model_prior_measure.expectation)
124
+ )
125
+
126
+ # Shift for noise mean: d - A(mu_u) - mu_e
127
+ if self.forward_problem.data_error_measure_set:
128
+ error_expectation = self.forward_problem.data_error_measure.expectation
129
+ shifted_data = data_space.subtract(shifted_data, error_expectation)
130
+ else:
131
+ error_expectation = data_space.zero
132
+
133
+ mean_update = kalman_gain(shifted_data)
134
+ expectation = model_space.add(self.model_prior_measure.expectation, mean_update)
135
+
136
+ # 3. Compute Posterior Covariance (Implicitly)
137
+ # C_post = C_u - K A C_u
138
+ covariance = model_prior_covariance - (
139
+ kalman_gain @ forward_operator @ model_prior_covariance
140
+ )
141
+
142
+ # 4. Set up Posterior Sampling
143
+ # Logic: Can sample if prior is samplable AND (noise is absent OR samplable)
144
+ can_sample_prior = self.model_prior_measure.sample_set
145
+ can_sample_noise = (
146
+ not self.forward_problem.data_error_measure_set
147
+ or self.forward_problem.data_error_measure.sample_set
148
+ )
149
+
150
+ if can_sample_prior and can_sample_noise:
151
+
152
+ def sample():
153
+ # a. Sample Prior
154
+ model_sample = self.model_prior_measure.sample()
155
+
156
+ # b. Calculate Residual
157
+ prediction = forward_operator(model_sample)
158
+ data_residual = data_space.subtract(data, prediction)
159
+
160
+ # c. Perturb Residual
161
+ if self.forward_problem.data_error_measure_set:
162
+ noise_raw = self.forward_problem.data_error_measure.sample()
163
+ epsilon = data_space.subtract(noise_raw, error_expectation)
164
+ data_space.axpy(1.0, epsilon, data_residual)
165
+
166
+ # d. Update
167
+ correction = kalman_gain(data_residual)
168
+ return model_space.add(model_sample, correction)
169
+
170
+ return GaussianMeasure(
171
+ covariance=covariance, expectation=expectation, sample=sample
172
+ )
173
+ else:
174
+ return GaussianMeasure(covariance=covariance, expectation=expectation)
175
+
176
+
177
+ class ConstrainedLinearBayesianInversion(LinearInversion):
178
+ """
179
+ Solves a linear inverse problem subject to an affine subspace constraint.
180
+
181
+ This class enforces the constraint `u in A` using either:
182
+ 1. Bayesian Conditioning (Default): p(u | d, u in A).
183
+ If A is defined geometrically (no explicit equation), an implicit
184
+ operator (I-P) is used, which requires a robust solver in the subspace.
185
+ 2. Geometric Projection: Projects the unconstrained posterior onto A.
186
+ """
187
+
188
+ def __init__(
189
+ self,
190
+ forward_problem: LinearForwardProblem,
191
+ model_prior_measure: GaussianMeasure,
192
+ constraint: AffineSubspace,
193
+ /,
194
+ *,
195
+ geometric: bool = False,
196
+ ) -> None:
197
+ """
198
+ Args:
199
+ forward_problem: The forward problem.
200
+ model_prior_measure: The unconstrained prior Gaussian measure.
201
+ constraint: The affine subspace A.
202
+ geometric: If True, uses orthogonal projection (Euclidean metric).
203
+ If False (default), uses Bayesian conditioning.
204
+ """
205
+ super().__init__(forward_problem)
206
+ self._unconstrained_prior = model_prior_measure
207
+ self._constraint = constraint
208
+ self._geometric = geometric
209
+
210
+ def conditioned_prior_measure(self) -> GaussianMeasure:
211
+ """
212
+ Computes the prior measure conditioned on the constraint.
213
+ """
214
+ return self._constraint.condition_gaussian_measure(
215
+ self._unconstrained_prior, geometric=self._geometric
216
+ )
217
+
218
+ def model_posterior_measure(
219
+ self,
220
+ data: Vector,
221
+ solver: LinearSolver,
222
+ /,
223
+ *,
224
+ preconditioner: Optional[LinearOperator] = None,
225
+ ) -> GaussianMeasure:
226
+ """
227
+ Returns the posterior Gaussian measure p(u | d, u in A).
228
+
229
+ Args:
230
+ data: Observed data vector.
231
+ solver: Solver for the data update (inverts A C_cond A* + Ce).
232
+ preconditioner: Preconditioner for the data update.
233
+
234
+ Note: The solver for the constraint update is managed internally by
235
+ the AffineSubspace object passed at initialization.
236
+ """
237
+ # 1. Condition Prior
238
+ cond_prior = self.conditioned_prior_measure()
239
+
240
+ # 2. Solve Bayesian Inverse Problem with the new prior
241
+ bayes_inv = LinearBayesianInversion(self.forward_problem, cond_prior)
242
+
243
+ return bayes_inv.model_posterior_measure(
244
+ data, solver, preconditioner=preconditioner
245
+ )
@@ -16,6 +16,8 @@ Key Classes
16
16
  - `LinearMinimumNormInversion`: Finds the model with the smallest norm that
17
17
  fits the data to a statistically acceptable degree using the discrepancy
18
18
  principle.
19
+ - `ConstrainedLinearLeastSquaresInversion`: Solves a linear inverse problem
20
+ subject to an affine subspace constraint.
19
21
  """
20
22
 
21
23
  from __future__ import annotations
@@ -30,6 +32,7 @@ from .forward_problem import LinearForwardProblem
30
32
  from .linear_operators import LinearOperator
31
33
  from .linear_solvers import LinearSolver, IterativeLinearSolver
32
34
  from .hilbert_space import Vector
35
+ from .subspaces import AffineSubspace
33
36
 
34
37
 
35
38
  class LinearLeastSquaresInversion(LinearInversion):
@@ -160,6 +163,113 @@ class LinearLeastSquaresInversion(LinearInversion):
160
163
  return inverse_normal_operator @ forward_operator.adjoint
161
164
 
162
165
 
166
+ class ConstrainedLinearLeastSquaresInversion(LinearInversion):
167
+ """
168
+ Solves a linear inverse problem subject to an affine subspace constraint.
169
+
170
+ Problem:
171
+ Minimize J(u) = || A(u) - d ||_D^2 + alpha * || u ||_M^2
172
+ Subject to u in A (Affine Subspace)
173
+
174
+ Method:
175
+ The problem is reduced to an unconstrained minimization in the subspace.
176
+ We decompose the model as u = u_base + w, where u_base is the element
177
+ of the affine subspace closest to the origin (orthogonal to the tangent space),
178
+ and w is a perturbation in the tangent space.
179
+
180
+ The cost function separates (due to orthogonality) into:
181
+ J(w) = || A(w) - (d - A(u_base)) ||^2 + alpha * || w ||^2 + (alpha * ||u_base||^2)
182
+
183
+ This is solved using the standard LinearLeastSquaresInversion on a
184
+ reduced forward problem.
185
+ """
186
+
187
+ def __init__(
188
+ self, forward_problem: LinearForwardProblem, constraint: AffineSubspace
189
+ ) -> None:
190
+ """
191
+ Args:
192
+ forward_problem: The original unconstrained forward problem.
193
+ constraint: The affine subspace A where the solution must lie.
194
+ """
195
+ super().__init__(forward_problem)
196
+ self._constraint = constraint
197
+
198
+ # 1. Compute the Orthogonal Base Vector (u_base)
199
+ # u_base = (I - P) * translation
200
+ # This is the unique vector in the affine space that is orthogonal to the tangent space.
201
+ # It ensures ||u||^2 = ||u_base||^2 + ||w||^2, decoupling the regularization.
202
+ self._u_base = constraint.domain.subtract(
203
+ constraint.translation, constraint.projector(constraint.translation)
204
+ )
205
+
206
+ # 2. Construct Reduced Forward Problem
207
+ # Operator: A_tilde = A @ P
208
+ reduced_operator = forward_problem.forward_operator @ constraint.projector
209
+
210
+ # The error measure on the data remains valid for the reduced problem
211
+ # because the noise model is additive and independent of the model parameters.
212
+ self._reduced_forward_problem = LinearForwardProblem(
213
+ reduced_operator,
214
+ data_error_measure=(
215
+ forward_problem.data_error_measure
216
+ if forward_problem.data_error_measure_set
217
+ else None
218
+ ),
219
+ )
220
+
221
+ # 3. Initialize the internal unconstrained solver
222
+ self._unconstrained_inversion = LinearLeastSquaresInversion(
223
+ self._reduced_forward_problem
224
+ )
225
+
226
+ def least_squares_operator(
227
+ self,
228
+ damping: float,
229
+ solver: LinearSolver,
230
+ /,
231
+ **kwargs,
232
+ ) -> NonLinearOperator:
233
+ """
234
+ Returns an operator that maps data to the constrained least-squares solution.
235
+
236
+ Args:
237
+ damping: The Tikhonov damping parameter.
238
+ solver: The linear solver for the reduced normal equations.
239
+ **kwargs: Additional arguments passed to the solver (e.g., preconditioner).
240
+
241
+ Returns:
242
+ A NonLinearOperator mapping d -> u_constrained.
243
+ """
244
+
245
+ # Get the operator L_tilde such that w = L_tilde(d_tilde)
246
+ reduced_op = self._unconstrained_inversion.least_squares_operator(
247
+ damping, solver, **kwargs
248
+ )
249
+
250
+ # Precompute A(u_base) to shift the data efficiently
251
+ # This represents the data predicted by the "base" model.
252
+ data_offset = self.forward_problem.forward_operator(self._u_base)
253
+
254
+ domain = self.data_space
255
+ codomain = self.model_space
256
+
257
+ def mapping(d: Vector) -> Vector:
258
+ # 1. Shift Data: d_tilde = d - A(u_base)
259
+ d_tilde = domain.subtract(d, data_offset)
260
+
261
+ # 2. Solve for perturbation w in the tangent space
262
+ # w = (P A* A P + alpha I)^-1 P A* d_tilde
263
+ w = reduced_op(d_tilde)
264
+
265
+ # 3. Reconstruct full model: u = u_base + w
266
+ # Note: w is guaranteed to be in the tangent space (Range of P)
267
+ # because of the structure of the reduced normal equations.
268
+ return codomain.add(self._u_base, w)
269
+
270
+ return NonLinearOperator(domain, codomain, mapping)
271
+
272
+
163
273
  class LinearMinimumNormInversion(LinearInversion):
164
274
  """
165
275
  Finds a regularized solution using the discrepancy principle.
@@ -309,3 +419,111 @@ class LinearMinimumNormInversion(LinearInversion):
309
419
  normal_operator = forward_operator @ forward_operator.adjoint
310
420
  inverse_normal_operator = solver(normal_operator)
311
421
  return forward_operator.adjoint @ inverse_normal_operator
422
+
423
+
424
+ class ConstrainedLinearMinimumNormInversion(LinearInversion):
425
+ """
426
+ Finds the minimum-norm solution subject to an affine subspace constraint
427
+ using the discrepancy principle.
428
+
429
+ Problem:
430
+ Minimize ||u||
431
+ Subject to u in A (Affine Subspace)
432
+ And chi_squared(u, d) <= critical_value
433
+
434
+ Method:
435
+ We decompose the model as u = u_base + w, where u_base is the element
436
+ of the affine subspace with the smallest norm (orthogonal to the tangent
437
+ space), and w is a perturbation in the tangent space.
438
+
439
+ Because u_base and w are orthogonal, ||u||^2 = ||u_base||^2 + ||w||^2.
440
+ Minimizing ||u|| is therefore equivalent to minimizing ||w||.
441
+
442
+ The problem reduces to finding the minimum norm w such that:
443
+ || A(w) - (d - A(u_base)) ||_D^2 <= critical_value
444
+
445
+ This is solved using the standard LinearMinimumNormInversion on a
446
+ reduced forward problem.
447
+ """
448
+
449
+ def __init__(
450
+ self,
451
+ forward_problem: LinearForwardProblem,
452
+ constraint: AffineSubspace,
453
+ ) -> None:
454
+ """
455
+ Args:
456
+ forward_problem: The original unconstrained forward problem.
457
+ constraint: The affine subspace A where the solution must lie.
458
+ """
459
+ super().__init__(forward_problem)
460
+ if self.forward_problem.data_error_measure_set:
461
+ self.assert_inverse_data_covariance()
462
+
463
+ self._constraint = constraint
464
+
465
+ # 1. Compute the Orthogonal Base Vector (u_base)
466
+ # u_base = (I - P) * translation
467
+ # This is the vector in the affine space closest to the origin.
468
+ self._u_base = constraint.domain.subtract(
469
+ constraint.translation, constraint.projector(constraint.translation)
470
+ )
471
+
472
+ # 2. Construct Reduced Forward Problem
473
+ # Operator: A_tilde = A @ P
474
+ reduced_operator = forward_problem.forward_operator @ constraint.projector
475
+
476
+ self._reduced_forward_problem = LinearForwardProblem(
477
+ reduced_operator,
478
+ data_error_measure=(
479
+ forward_problem.data_error_measure
480
+ if forward_problem.data_error_measure_set
481
+ else None
482
+ ),
483
+ )
484
+
485
+ # 3. Initialize the internal unconstrained solver
486
+ self._unconstrained_inversion = LinearMinimumNormInversion(
487
+ self._reduced_forward_problem
488
+ )
489
+
490
+ def minimum_norm_operator(
491
+ self,
492
+ solver: LinearSolver,
493
+ /,
494
+ **kwargs,
495
+ ) -> NonLinearOperator:
496
+ """
497
+ Returns an operator that maps data to the constrained minimum-norm solution.
498
+
499
+ Args:
500
+ solver: The linear solver for the reduced normal equations.
501
+ **kwargs: Arguments passed to LinearMinimumNormInversion (e.g.,
502
+ significance_level, rtol, maxiter).
503
+
504
+ Returns:
505
+ A NonLinearOperator mapping d -> u_constrained.
506
+ """
507
+
508
+ # Get the operator L_tilde such that w = L_tilde(d_tilde)
509
+ reduced_op = self._unconstrained_inversion.minimum_norm_operator(
510
+ solver, **kwargs
511
+ )
512
+
513
+ # Precompute A(u_base) to shift the data
514
+ data_offset = self.forward_problem.forward_operator(self._u_base)
515
+
516
+ domain = self.data_space
517
+ codomain = self.model_space
518
+
519
+ def mapping(d: Vector) -> Vector:
520
+ # 1. Shift Data: d_tilde = d - A(u_base)
521
+ d_tilde = domain.subtract(d, data_offset)
522
+
523
+ # 2. Solve for perturbation w in the tangent space
524
+ w = reduced_op(d_tilde)
525
+
526
+ # 3. Reconstruct full model: u = u_base + w
527
+ return codomain.add(self._u_base, w)
528
+
529
+ return NonLinearOperator(domain, codomain, mapping)