pygeoinf 1.3.6__tar.gz → 1.3.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/PKG-INFO +1 -1
  2. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/__init__.py +23 -0
  3. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/linear_optimisation.py +45 -226
  4. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/linear_solvers.py +430 -0
  5. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/plot.py +178 -116
  6. pygeoinf-1.3.8/pygeoinf/preconditioners.py +140 -0
  7. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/random_matrix.py +8 -5
  8. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/symmetric_space/sh_tools.py +1 -1
  9. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pyproject.toml +1 -1
  10. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/LICENSE +0 -0
  11. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/README.md +0 -0
  12. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/auxiliary.py +0 -0
  13. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/backus_gilbert.py +0 -0
  14. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/checks/__init__.py +0 -0
  15. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/checks/hilbert_space.py +0 -0
  16. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/checks/linear_operators.py +0 -0
  17. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/checks/nonlinear_operators.py +0 -0
  18. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/direct_sum.py +0 -0
  19. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/forward_problem.py +0 -0
  20. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/gaussian_measure.py +0 -0
  21. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/hilbert_space.py +0 -0
  22. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/inversion.py +0 -0
  23. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/linear_bayesian.py +0 -0
  24. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/linear_forms.py +0 -0
  25. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/linear_operators.py +0 -0
  26. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/nonlinear_forms.py +0 -0
  27. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/nonlinear_operators.py +0 -0
  28. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/nonlinear_optimisation.py +0 -0
  29. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/parallel.py +0 -0
  30. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/subspaces.py +0 -0
  31. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/symmetric_space/__init__.py +0 -0
  32. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/symmetric_space/circle.py +0 -0
  33. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/symmetric_space/sphere.py +0 -0
  34. {pygeoinf-1.3.6 → pygeoinf-1.3.8}/pygeoinf/symmetric_space/symmetric_space.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pygeoinf
3
- Version: 1.3.6
3
+ Version: 1.3.8
4
4
  Summary: A package for solving geophysical inference and inverse problems
5
5
  License: BSD-3-Clause
6
6
  License-File: LICENSE
@@ -69,6 +69,16 @@ from .linear_solvers import (
69
69
  BICGStabMatrixSolver,
70
70
  GMRESMatrixSolver,
71
71
  CGSolver,
72
+ MinResSolver,
73
+ BICGStabSolver,
74
+ FCGSolver,
75
+ )
76
+
77
+ from .preconditioners import (
78
+ JacobiPreconditioningMethod,
79
+ SpectralPreconditioningMethod,
80
+ IdentityPreconditioningMethod,
81
+ IterativePreconditioningMethod,
72
82
  )
73
83
 
74
84
  from .forward_problem import ForwardProblem, LinearForwardProblem
@@ -94,6 +104,8 @@ from .nonlinear_optimisation import (
94
104
 
95
105
  from .subspaces import OrthogonalProjector, AffineSubspace, LinearSubspace
96
106
 
107
+ from .plot import plot_1d_distributions, plot_corner_distributions
108
+
97
109
  __all__ = [
98
110
  # random_matrix
99
111
  "fixed_rank_random_range",
@@ -145,6 +157,14 @@ __all__ = [
145
157
  "BICGStabMatrixSolver",
146
158
  "GMRESMatrixSolver",
147
159
  "CGSolver",
160
+ "MinResSolver",
161
+ "BICGStabSolver",
162
+ "FCGSolver",
163
+ # preconditioners
164
+ "IdentityPreconditioningMethod",
165
+ "JacobiPreconditioningMethod",
166
+ "SpectralPreconditioningMethod",
167
+ "IterativePreconditioningMethod",
148
168
  # forward_problem
149
169
  "ForwardProblem",
150
170
  "LinearForwardProblem",
@@ -164,4 +184,7 @@ __all__ = [
164
184
  "OrthogonalProjector",
165
185
  "AffineSubspace",
166
186
  "LinearSubspace",
187
+ # plot
188
+ "plot_1d_distributions",
189
+ "plot_corner_distributions",
167
190
  ]
@@ -5,10 +5,6 @@ This module provides classical, deterministic approaches to inversion that seek
5
5
  a single "best-fit" model. These methods are typically formulated as finding
6
6
  the model `u` that minimizes a cost functional.
7
7
 
8
- The primary goal is to find a stable solution to an ill-posed problem by
9
- incorporating regularization, which balances fitting the data with controlling
10
- the complexity or norm of the solution.
11
-
12
8
  Key Classes
13
9
  -----------
14
10
  - `LinearLeastSquaresInversion`: Solves the inverse problem by minimizing a
@@ -23,11 +19,8 @@ Key Classes
23
19
  from __future__ import annotations
24
20
  from typing import Optional, Union
25
21
 
26
-
27
22
  from .nonlinear_operators import NonLinearOperator
28
23
  from .inversion import LinearInversion
29
-
30
-
31
24
  from .forward_problem import LinearForwardProblem
32
25
  from .linear_operators import LinearOperator
33
26
  from .linear_solvers import LinearSolver, IterativeLinearSolver
@@ -41,34 +34,15 @@ class LinearLeastSquaresInversion(LinearInversion):
41
34
 
42
35
  This method finds the model `u` that minimizes the functional:
43
36
  `J(u) = ||A(u) - d||² + α² * ||u||²`
44
- where `α` is the damping parameter. If a data error covariance is provided,
45
- the data misfit norm is appropriately weighted by the inverse covariance.
46
37
  """
47
38
 
48
39
  def __init__(self, forward_problem: "LinearForwardProblem", /) -> None:
49
- """
50
- Args:
51
- forward_problem: The forward problem. If it includes a data error
52
- measure, the measure's inverse covariance must be defined.
53
- """
54
40
  super().__init__(forward_problem)
55
41
  if self.forward_problem.data_error_measure_set:
56
42
  self.assert_inverse_data_covariance()
57
43
 
58
44
  def normal_operator(self, damping: float) -> LinearOperator:
59
- """
60
- Returns the Tikhonov-regularized normal operator.
61
-
62
- This operator, often written as `(A* @ W @ A + α*I)`, forms the left-hand
63
- side of the normal equations that must be solved to find the least-squares
64
- solution. `W` is the inverse data covariance (or identity).
65
-
66
- Args:
67
- damping: The Tikhonov damping parameter, `α`. Must be non-negative.
68
-
69
- Returns:
70
- The normal operator as a `LinearOperator`.
71
- """
45
+ """Returns the Tikhonov-regularized normal operator (A*WA + αI)."""
72
46
  if damping < 0:
73
47
  raise ValueError("Damping parameter must be non-negative.")
74
48
 
@@ -87,23 +61,17 @@ class LinearLeastSquaresInversion(LinearInversion):
87
61
  return forward_operator.adjoint @ forward_operator + damping * identity
88
62
 
89
63
  def normal_rhs(self, data: Vector) -> Vector:
90
- """
91
- Returns the right hand side of the normal equations for given data.
92
- """
93
-
64
+ """Returns the right hand side of the normal equations (A*W d)."""
94
65
  forward_operator = self.forward_problem.forward_operator
95
66
 
96
67
  if self.forward_problem.data_error_measure_set:
97
68
  inverse_data_covariance = (
98
69
  self.forward_problem.data_error_measure.inverse_covariance
99
70
  )
100
-
101
71
  shifted_data = self.forward_problem.data_space.subtract(
102
72
  data, self.forward_problem.data_error_measure.expectation
103
73
  )
104
-
105
74
  return (forward_operator.adjoint @ inverse_data_covariance)(shifted_data)
106
-
107
75
  else:
108
76
  return forward_operator.adjoint(data)
109
77
 
@@ -113,30 +81,36 @@ class LinearLeastSquaresInversion(LinearInversion):
113
81
  solver: "LinearSolver",
114
82
  /,
115
83
  *,
116
- preconditioner: Optional[LinearOperator] = None,
84
+ preconditioner: Optional[Union[LinearOperator, LinearSolver]] = None,
117
85
  ) -> Union[NonLinearOperator, LinearOperator]:
118
86
  """
119
87
  Returns an operator that maps data to the least-squares solution.
120
88
 
121
- The returned operator `L` gives the solution `u = L(d)`. If the data has
122
- errors with a non-zero mean, `L` is a general non-linear `Operator`.
123
- Otherwise, it is a `LinearOperator`.
124
-
125
89
  Args:
126
- damping: The Tikhonov damping parameter, `alpha`.
90
+ damping: The Tikhonov damping parameter, alpha.
127
91
  solver: The linear solver for inverting the normal operator.
128
- preconditioner: An optional preconditioner for iterative solvers.
129
-
130
- Returns:
131
- An operator that maps from the data space to the model space.
92
+ preconditioner: Either a direct LinearOperator or a LinearSolver
93
+ method (factory) used to generate the preconditioner.
132
94
  """
133
-
134
95
  forward_operator = self.forward_problem.forward_operator
135
96
  normal_operator = self.normal_operator(damping)
136
97
 
98
+ # Resolve the preconditioner if a method (LinearSolver) is provided
99
+ resolved_preconditioner = None
100
+ if preconditioner is not None:
101
+ if isinstance(preconditioner, LinearOperator):
102
+ resolved_preconditioner = preconditioner
103
+ elif isinstance(preconditioner, LinearSolver):
104
+ # Call the preconditioning method on the normal operator
105
+ resolved_preconditioner = preconditioner(normal_operator)
106
+ else:
107
+ raise TypeError(
108
+ "Preconditioner must be a LinearOperator or LinearSolver."
109
+ )
110
+
137
111
  if isinstance(solver, IterativeLinearSolver):
138
112
  inverse_normal_operator = solver(
139
- normal_operator, preconditioner=preconditioner
113
+ normal_operator, preconditioner=resolved_preconditioner
140
114
  )
141
115
  else:
142
116
  inverse_normal_operator = solver(normal_operator)
@@ -146,7 +120,6 @@ class LinearLeastSquaresInversion(LinearInversion):
146
120
  self.forward_problem.data_error_measure.inverse_covariance
147
121
  )
148
122
 
149
- # This mapping is affine, not linear, if the error measure has a non-zero mean.
150
123
  def mapping(data: Vector) -> Vector:
151
124
  shifted_data = self.forward_problem.data_space.subtract(
152
125
  data, self.forward_problem.data_error_measure.expectation
@@ -158,57 +131,23 @@ class LinearLeastSquaresInversion(LinearInversion):
158
131
  )(shifted_data)
159
132
 
160
133
  return NonLinearOperator(self.data_space, self.model_space, mapping)
161
-
162
134
  else:
163
135
  return inverse_normal_operator @ forward_operator.adjoint
164
136
 
165
137
 
166
138
  class ConstrainedLinearLeastSquaresInversion(LinearInversion):
167
- """
168
- Solves a linear inverse problem subject to an affine subspace constraint.
169
-
170
- Problem:
171
- Minimize J(u) = || A(u) - d ||_D^2 + alpha * || u ||_M^2
172
- Subject to u in A (Affine Subspace)
173
-
174
- Method:
175
- The problem is reduced to an unconstrained minimization in the subspace.
176
- We decompose the model as u = u_base + w, where u_base is the element
177
- of the affine subspace closest to the origin (orthogonal to the tangent space),
178
- and w is a perturbation in the tangent space.
179
-
180
- The cost function separates (due to orthogonality) into:
181
- J(w) = || A(w) - (d - A(u_base)) ||^2 + alpha * || w ||^2 + (alpha * ||u_base||^2)
182
-
183
- This is solved using the standard LinearLeastSquaresInversion on a
184
- reduced forward problem.
185
- """
139
+ """Solves a linear inverse problem subject to an affine subspace constraint."""
186
140
 
187
141
  def __init__(
188
142
  self, forward_problem: LinearForwardProblem, constraint: AffineSubspace
189
143
  ) -> None:
190
- """
191
- Args:
192
- forward_problem: The original unconstrained forward problem.
193
- constraint: The affine subspace A where the solution must lie.
194
- """
195
144
  super().__init__(forward_problem)
196
145
  self._constraint = constraint
197
-
198
- # 1. Compute the Orthogonal Base Vector (u_base)
199
- # u_base = (I - P) * translation
200
- # This is the unique vector in the affine space that is orthogonal to the tangent space.
201
- # It ensures ||u||^2 = ||u_base||^2 + ||w||^2, decoupling the regularization.
202
146
  self._u_base = constraint.domain.subtract(
203
147
  constraint.translation, constraint.projector(constraint.translation)
204
148
  )
205
149
 
206
- # 2. Construct Reduced Forward Problem
207
- # Operator: A_tilde = A @ P
208
150
  reduced_operator = forward_problem.forward_operator @ constraint.projector
209
-
210
- # The error measure on the data remains valid for the reduced problem
211
- # because the noise model is additive and independent of the model parameters.
212
151
  self._reduced_forward_problem = LinearForwardProblem(
213
152
  reduced_operator,
214
153
  data_error_measure=(
@@ -218,7 +157,6 @@ class ConstrainedLinearLeastSquaresInversion(LinearInversion):
218
157
  ),
219
158
  )
220
159
 
221
- # 3. Initialize the internal unconstrained solver
222
160
  self._unconstrained_inversion = LinearLeastSquaresInversion(
223
161
  self._reduced_forward_problem
224
162
  )
@@ -228,64 +166,31 @@ class ConstrainedLinearLeastSquaresInversion(LinearInversion):
228
166
  damping: float,
229
167
  solver: LinearSolver,
230
168
  /,
169
+ *,
170
+ preconditioner: Optional[Union[LinearOperator, LinearSolver]] = None,
231
171
  **kwargs,
232
172
  ) -> NonLinearOperator:
233
- """
234
- Returns an operator that maps data to the constrained least-squares solution.
235
-
236
- Args:
237
- damping: The Tikhonov damping parameter.
238
- solver: The linear solver for the reduced normal equations.
239
- **kwargs: Additional arguments passed to the solver (e.g., preconditioner).
240
-
241
- Returns:
242
- A NonLinearOperator mapping d -> u_constrained.
243
- """
244
-
245
- # Get the operator L_tilde such that w = L_tilde(d_tilde)
173
+ """Maps data to the constrained least-squares solution."""
246
174
  reduced_op = self._unconstrained_inversion.least_squares_operator(
247
- damping, solver, **kwargs
175
+ damping, solver, preconditioner=preconditioner, **kwargs
248
176
  )
249
177
 
250
- # Precompute A(u_base) to shift the data efficiently
251
- # This represents the data predicted by the "base" model.
252
178
  data_offset = self.forward_problem.forward_operator(self._u_base)
253
-
254
179
  domain = self.data_space
255
180
  codomain = self.model_space
256
181
 
257
182
  def mapping(d: Vector) -> Vector:
258
- # 1. Shift Data: d_tilde = d - A(u_base)
259
183
  d_tilde = domain.subtract(d, data_offset)
260
-
261
- # 2. Solve for perturbation w in the tangent space
262
- # w = (P A* A P + alpha I)^-1 P A* d_tilde
263
184
  w = reduced_op(d_tilde)
264
-
265
- # 3. Reconstruct full model: u = u_base + w
266
- # Note: w is guaranteed to be in the tangent space (Range of P)
267
- # because of the structure of the reduced normal equations.
268
185
  return codomain.add(self._u_base, w)
269
186
 
270
187
  return NonLinearOperator(domain, codomain, mapping)
271
188
 
272
189
 
273
190
  class LinearMinimumNormInversion(LinearInversion):
274
- """
275
- Finds a regularized solution using the discrepancy principle.
276
-
277
- This method automatically selects a Tikhonov damping parameter `α` such that
278
- the resulting solution `u_α` fits the data to a statistically acceptable
279
- level. It finds the model with the smallest norm `||u||` that satisfies
280
- the target misfit, as determined by a chi-squared test.
281
- """
191
+ """Finds a regularized solution using the discrepancy principle."""
282
192
 
283
193
  def __init__(self, forward_problem: "LinearForwardProblem", /) -> None:
284
- """
285
- Args:
286
- forward_problem: The forward problem. Its data error measure and
287
- inverse covariance must be defined.
288
- """
289
194
  super().__init__(forward_problem)
290
195
  if self.forward_problem.data_error_measure_set:
291
196
  self.assert_inverse_data_covariance()
@@ -295,7 +200,7 @@ class LinearMinimumNormInversion(LinearInversion):
295
200
  solver: "LinearSolver",
296
201
  /,
297
202
  *,
298
- preconditioner: Optional[LinearOperator] = None,
203
+ preconditioner: Optional[Union[LinearOperator, LinearSolver]] = None,
299
204
  significance_level: float = 0.95,
300
205
  minimum_damping: float = 0.0,
301
206
  maxiter: int = 100,
@@ -303,24 +208,7 @@ class LinearMinimumNormInversion(LinearInversion):
303
208
  atol: float = 0.0,
304
209
  ) -> Union[NonLinearOperator, LinearOperator]:
305
210
  """
306
- Returns an operator that maps data to the minimum-norm solution.
307
-
308
- The method uses a bracketing search to finds the damping parameter `alpha`
309
- such that `chi_squared(u_alpha, d)` matches a critical value. The mapping
310
- is non-linear if data errors are present.
311
-
312
- Args:
313
- solver: A solver for the linear systems.
314
- preconditioner: An optional preconditioner for iterative solvers.
315
- significance_level: The target significance level for the
316
- chi-squared test (e.g., 0.95).
317
- minimum_damping: A floor for the damping parameter search.
318
- maxiter: Maximum iterations for the bracketing search.
319
- rtol: Relative tolerance for the damping parameter.
320
- atol: Absolute tolerance for the damping parameter.
321
-
322
- Returns:
323
- An operator that maps data to the minimum-norm model.
211
+ Maps data to the minimum-norm solution matching target chi-squared.
324
212
  """
325
213
  if self.forward_problem.data_error_measure_set:
326
214
  critical_value = self.forward_problem.critical_chi_squared(
@@ -331,18 +219,20 @@ class LinearMinimumNormInversion(LinearInversion):
331
219
  def get_model_for_damping(
332
220
  damping: float, data: Vector, model0: Optional[Vector] = None
333
221
  ) -> tuple[Vector, float]:
334
- """
335
- Computes the LS model and its chi-squared for a given damping.
336
-
337
- When an iterative solver is used, an initial guess can be provided.
338
- """
339
-
340
222
  normal_operator = lsq_inversion.normal_operator(damping)
341
223
  normal_rhs = lsq_inversion.normal_rhs(data)
342
224
 
225
+ # Resolve preconditioner for the specific trial damping alpha
226
+ res_precond = None
227
+ if preconditioner is not None:
228
+ if isinstance(preconditioner, LinearOperator):
229
+ res_precond = preconditioner
230
+ else:
231
+ res_precond = preconditioner(normal_operator)
232
+
343
233
  if isinstance(solver, IterativeLinearSolver):
344
234
  model = solver.solve_linear_system(
345
- normal_operator, preconditioner, normal_rhs, model0
235
+ normal_operator, res_precond, normal_rhs, model0
346
236
  )
347
237
  else:
348
238
  inverse_normal_operator = solver(normal_operator)
@@ -352,17 +242,13 @@ class LinearMinimumNormInversion(LinearInversion):
352
242
  return model, chi_squared
353
243
 
354
244
  def mapping(data: Vector) -> Vector:
355
- """The non-linear mapping from data to the minimum-norm model."""
356
-
357
- # Check to see if the zero model fits the data.
245
+ # Bracketing search logic
358
246
  chi_squared = self.forward_problem.chi_squared_from_residual(data)
359
247
  if chi_squared <= critical_value:
360
248
  return self.model_space.zero
361
249
 
362
- # Find upper and lower bounds for the optimal damping parameter
363
250
  damping = 1.0
364
251
  _, chi_squared = get_model_for_damping(damping, data)
365
-
366
252
  damping_lower = damping if chi_squared <= critical_value else None
367
253
  damping_upper = damping if chi_squared > critical_value else None
368
254
 
@@ -373,9 +259,7 @@ class LinearMinimumNormInversion(LinearInversion):
373
259
  damping /= 2.0
374
260
  _, chi_squared = get_model_for_damping(damping, data)
375
261
  if damping < minimum_damping:
376
- raise RuntimeError(
377
- "Discrepancy principle has failed; critical value cannot be reached."
378
- )
262
+ raise RuntimeError("Discrepancy principle failed.")
379
263
  damping_lower = damping
380
264
 
381
265
  it = 0
@@ -386,12 +270,6 @@ class LinearMinimumNormInversion(LinearInversion):
386
270
  _, chi_squared = get_model_for_damping(damping, data)
387
271
  damping_upper = damping
388
272
 
389
- if damping_lower is None or damping_upper is None:
390
- raise RuntimeError(
391
- "Failed to bracket the optimal damping parameter."
392
- )
393
-
394
- # Bracket search for the optimal damping
395
273
  model0 = None
396
274
  for _ in range(maxiter):
397
275
  damping = 0.5 * (damping_lower + damping_upper)
@@ -406,15 +284,12 @@ class LinearMinimumNormInversion(LinearInversion):
406
284
  damping_lower + damping_upper
407
285
  ):
408
286
  return model
409
-
410
287
  model0 = model
411
288
 
412
289
  raise RuntimeError("Bracketing search failed to converge.")
413
290
 
414
291
  return NonLinearOperator(self.data_space, self.model_space, mapping)
415
-
416
292
  else:
417
- # For error-free data, compute the minimum-norm solution via A*(A*A)^-1
418
293
  forward_operator = self.forward_problem.forward_operator
419
294
  normal_operator = forward_operator @ forward_operator.adjoint
420
295
  inverse_normal_operator = solver(normal_operator)
@@ -422,57 +297,20 @@ class LinearMinimumNormInversion(LinearInversion):
422
297
 
423
298
 
424
299
  class ConstrainedLinearMinimumNormInversion(LinearInversion):
425
- """
426
- Finds the minimum-norm solution subject to an affine subspace constraint
427
- using the discrepancy principle.
428
-
429
- Problem:
430
- Minimize ||u||
431
- Subject to u in A (Affine Subspace)
432
- And chi_squared(u, d) <= critical_value
433
-
434
- Method:
435
- We decompose the model as u = u_base + w, where u_base is the element
436
- of the affine subspace with the smallest norm (orthogonal to the tangent
437
- space), and w is a perturbation in the tangent space.
438
-
439
- Because u_base and w are orthogonal, ||u||^2 = ||u_base||^2 + ||w||^2.
440
- Minimizing ||u|| is therefore equivalent to minimizing ||w||.
441
-
442
- The problem reduces to finding the minimum norm w such that:
443
- || A(w) - (d - A(u_base)) ||_D^2 <= critical_value
444
-
445
- This is solved using the standard LinearMinimumNormInversion on a
446
- reduced forward problem.
447
- """
300
+ """Finds min-norm solution subject to affine subspace constraint."""
448
301
 
449
302
  def __init__(
450
- self,
451
- forward_problem: LinearForwardProblem,
452
- constraint: AffineSubspace,
303
+ self, forward_problem: LinearForwardProblem, constraint: AffineSubspace
453
304
  ) -> None:
454
- """
455
- Args:
456
- forward_problem: The original unconstrained forward problem.
457
- constraint: The affine subspace A where the solution must lie.
458
- """
459
305
  super().__init__(forward_problem)
460
306
  if self.forward_problem.data_error_measure_set:
461
307
  self.assert_inverse_data_covariance()
462
-
463
308
  self._constraint = constraint
464
-
465
- # 1. Compute the Orthogonal Base Vector (u_base)
466
- # u_base = (I - P) * translation
467
- # This is the vector in the affine space closest to the origin.
468
309
  self._u_base = constraint.domain.subtract(
469
310
  constraint.translation, constraint.projector(constraint.translation)
470
311
  )
471
312
 
472
- # 2. Construct Reduced Forward Problem
473
- # Operator: A_tilde = A @ P
474
313
  reduced_operator = forward_problem.forward_operator @ constraint.projector
475
-
476
314
  self._reduced_forward_problem = LinearForwardProblem(
477
315
  reduced_operator,
478
316
  data_error_measure=(
@@ -481,8 +319,6 @@ class ConstrainedLinearMinimumNormInversion(LinearInversion):
481
319
  else None
482
320
  ),
483
321
  )
484
-
485
- # 3. Initialize the internal unconstrained solver
486
322
  self._unconstrained_inversion = LinearMinimumNormInversion(
487
323
  self._reduced_forward_problem
488
324
  )
@@ -491,39 +327,22 @@ class ConstrainedLinearMinimumNormInversion(LinearInversion):
491
327
  self,
492
328
  solver: LinearSolver,
493
329
  /,
330
+ *,
331
+ preconditioner: Optional[Union[LinearOperator, LinearSolver]] = None,
494
332
  **kwargs,
495
333
  ) -> NonLinearOperator:
496
- """
497
- Returns an operator that maps data to the constrained minimum-norm solution.
498
-
499
- Args:
500
- solver: The linear solver for the reduced normal equations.
501
- **kwargs: Arguments passed to LinearMinimumNormInversion (e.g.,
502
- significance_level, rtol, maxiter).
503
-
504
- Returns:
505
- A NonLinearOperator mapping d -> u_constrained.
506
- """
507
-
508
- # Get the operator L_tilde such that w = L_tilde(d_tilde)
334
+ """Returns operator for constrained discrepancy principle inversion."""
509
335
  reduced_op = self._unconstrained_inversion.minimum_norm_operator(
510
- solver, **kwargs
336
+ solver, preconditioner=preconditioner, **kwargs
511
337
  )
512
338
 
513
- # Precompute A(u_base) to shift the data
514
339
  data_offset = self.forward_problem.forward_operator(self._u_base)
515
-
516
340
  domain = self.data_space
517
341
  codomain = self.model_space
518
342
 
519
343
  def mapping(d: Vector) -> Vector:
520
- # 1. Shift Data: d_tilde = d - A(u_base)
521
344
  d_tilde = domain.subtract(d, data_offset)
522
-
523
- # 2. Solve for perturbation w in the tangent space
524
345
  w = reduced_op(d_tilde)
525
-
526
- # 3. Reconstruct full model: u = u_base + w
527
346
  return codomain.add(self._u_base, w)
528
347
 
529
348
  return NonLinearOperator(domain, codomain, mapping)