pygeoinf 1.3.6__py3-none-any.whl → 1.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pygeoinf/__init__.py CHANGED
@@ -69,6 +69,16 @@ from .linear_solvers import (
69
69
  BICGStabMatrixSolver,
70
70
  GMRESMatrixSolver,
71
71
  CGSolver,
72
+ MinResSolver,
73
+ BICGStabSolver,
74
+ FCGSolver,
75
+ )
76
+
77
+ from .preconditioners import (
78
+ JacobiPreconditioningMethod,
79
+ SpectralPreconditioningMethod,
80
+ IdentityPreconditioningMethod,
81
+ IterativePreconditioningMethod,
72
82
  )
73
83
 
74
84
  from .forward_problem import ForwardProblem, LinearForwardProblem
@@ -145,6 +155,14 @@ __all__ = [
145
155
  "BICGStabMatrixSolver",
146
156
  "GMRESMatrixSolver",
147
157
  "CGSolver",
158
+ "MinResSolver",
159
+ "BICGStabSolver",
160
+ "FCGSolver",
161
+ # preconditioners
162
+ "IdentityPreconditioningMethod",
163
+ "JacobiPreconditioningMethod",
164
+ "SpectralPreconditioningMethod",
165
+ "IterativePreconditioningMethod",
148
166
  # forward_problem
149
167
  "ForwardProblem",
150
168
  "LinearForwardProblem",
@@ -5,10 +5,6 @@ This module provides classical, deterministic approaches to inversion that seek
5
5
  a single "best-fit" model. These methods are typically formulated as finding
6
6
  the model `u` that minimizes a cost functional.
7
7
 
8
- The primary goal is to find a stable solution to an ill-posed problem by
9
- incorporating regularization, which balances fitting the data with controlling
10
- the complexity or norm of the solution.
11
-
12
8
  Key Classes
13
9
  -----------
14
10
  - `LinearLeastSquaresInversion`: Solves the inverse problem by minimizing a
@@ -23,11 +19,8 @@ Key Classes
23
19
  from __future__ import annotations
24
20
  from typing import Optional, Union
25
21
 
26
-
27
22
  from .nonlinear_operators import NonLinearOperator
28
23
  from .inversion import LinearInversion
29
-
30
-
31
24
  from .forward_problem import LinearForwardProblem
32
25
  from .linear_operators import LinearOperator
33
26
  from .linear_solvers import LinearSolver, IterativeLinearSolver
@@ -41,34 +34,15 @@ class LinearLeastSquaresInversion(LinearInversion):
41
34
 
42
35
  This method finds the model `u` that minimizes the functional:
43
36
  `J(u) = ||A(u) - d||² + α² * ||u||²`
44
- where `α` is the damping parameter. If a data error covariance is provided,
45
- the data misfit norm is appropriately weighted by the inverse covariance.
46
37
  """
47
38
 
48
39
  def __init__(self, forward_problem: "LinearForwardProblem", /) -> None:
49
- """
50
- Args:
51
- forward_problem: The forward problem. If it includes a data error
52
- measure, the measure's inverse covariance must be defined.
53
- """
54
40
  super().__init__(forward_problem)
55
41
  if self.forward_problem.data_error_measure_set:
56
42
  self.assert_inverse_data_covariance()
57
43
 
58
44
  def normal_operator(self, damping: float) -> LinearOperator:
59
- """
60
- Returns the Tikhonov-regularized normal operator.
61
-
62
- This operator, often written as `(A* @ W @ A + α*I)`, forms the left-hand
63
- side of the normal equations that must be solved to find the least-squares
64
- solution. `W` is the inverse data covariance (or identity).
65
-
66
- Args:
67
- damping: The Tikhonov damping parameter, `α`. Must be non-negative.
68
-
69
- Returns:
70
- The normal operator as a `LinearOperator`.
71
- """
45
+ """Returns the Tikhonov-regularized normal operator (A*WA + αI)."""
72
46
  if damping < 0:
73
47
  raise ValueError("Damping parameter must be non-negative.")
74
48
 
@@ -87,23 +61,17 @@ class LinearLeastSquaresInversion(LinearInversion):
87
61
  return forward_operator.adjoint @ forward_operator + damping * identity
88
62
 
89
63
  def normal_rhs(self, data: Vector) -> Vector:
90
- """
91
- Returns the right hand side of the normal equations for given data.
92
- """
93
-
64
+ """Returns the right hand side of the normal equations (A*W d)."""
94
65
  forward_operator = self.forward_problem.forward_operator
95
66
 
96
67
  if self.forward_problem.data_error_measure_set:
97
68
  inverse_data_covariance = (
98
69
  self.forward_problem.data_error_measure.inverse_covariance
99
70
  )
100
-
101
71
  shifted_data = self.forward_problem.data_space.subtract(
102
72
  data, self.forward_problem.data_error_measure.expectation
103
73
  )
104
-
105
74
  return (forward_operator.adjoint @ inverse_data_covariance)(shifted_data)
106
-
107
75
  else:
108
76
  return forward_operator.adjoint(data)
109
77
 
@@ -113,30 +81,36 @@ class LinearLeastSquaresInversion(LinearInversion):
113
81
  solver: "LinearSolver",
114
82
  /,
115
83
  *,
116
- preconditioner: Optional[LinearOperator] = None,
84
+ preconditioner: Optional[Union[LinearOperator, LinearSolver]] = None,
117
85
  ) -> Union[NonLinearOperator, LinearOperator]:
118
86
  """
119
87
  Returns an operator that maps data to the least-squares solution.
120
88
 
121
- The returned operator `L` gives the solution `u = L(d)`. If the data has
122
- errors with a non-zero mean, `L` is a general non-linear `Operator`.
123
- Otherwise, it is a `LinearOperator`.
124
-
125
89
  Args:
126
- damping: The Tikhonov damping parameter, `alpha`.
90
+ damping: The Tikhonov damping parameter, alpha.
127
91
  solver: The linear solver for inverting the normal operator.
128
- preconditioner: An optional preconditioner for iterative solvers.
129
-
130
- Returns:
131
- An operator that maps from the data space to the model space.
92
+ preconditioner: Either a direct LinearOperator or a LinearSolver
93
+ method (factory) used to generate the preconditioner.
132
94
  """
133
-
134
95
  forward_operator = self.forward_problem.forward_operator
135
96
  normal_operator = self.normal_operator(damping)
136
97
 
98
+ # Resolve the preconditioner if a method (LinearSolver) is provided
99
+ resolved_preconditioner = None
100
+ if preconditioner is not None:
101
+ if isinstance(preconditioner, LinearOperator):
102
+ resolved_preconditioner = preconditioner
103
+ elif isinstance(preconditioner, LinearSolver):
104
+ # Call the preconditioning method on the normal operator
105
+ resolved_preconditioner = preconditioner(normal_operator)
106
+ else:
107
+ raise TypeError(
108
+ "Preconditioner must be a LinearOperator or LinearSolver."
109
+ )
110
+
137
111
  if isinstance(solver, IterativeLinearSolver):
138
112
  inverse_normal_operator = solver(
139
- normal_operator, preconditioner=preconditioner
113
+ normal_operator, preconditioner=resolved_preconditioner
140
114
  )
141
115
  else:
142
116
  inverse_normal_operator = solver(normal_operator)
@@ -146,7 +120,6 @@ class LinearLeastSquaresInversion(LinearInversion):
146
120
  self.forward_problem.data_error_measure.inverse_covariance
147
121
  )
148
122
 
149
- # This mapping is affine, not linear, if the error measure has a non-zero mean.
150
123
  def mapping(data: Vector) -> Vector:
151
124
  shifted_data = self.forward_problem.data_space.subtract(
152
125
  data, self.forward_problem.data_error_measure.expectation
@@ -158,57 +131,23 @@ class LinearLeastSquaresInversion(LinearInversion):
158
131
  )(shifted_data)
159
132
 
160
133
  return NonLinearOperator(self.data_space, self.model_space, mapping)
161
-
162
134
  else:
163
135
  return inverse_normal_operator @ forward_operator.adjoint
164
136
 
165
137
 
166
138
  class ConstrainedLinearLeastSquaresInversion(LinearInversion):
167
- """
168
- Solves a linear inverse problem subject to an affine subspace constraint.
169
-
170
- Problem:
171
- Minimize J(u) = || A(u) - d ||_D^2 + alpha * || u ||_M^2
172
- Subject to u in A (Affine Subspace)
173
-
174
- Method:
175
- The problem is reduced to an unconstrained minimization in the subspace.
176
- We decompose the model as u = u_base + w, where u_base is the element
177
- of the affine subspace closest to the origin (orthogonal to the tangent space),
178
- and w is a perturbation in the tangent space.
179
-
180
- The cost function separates (due to orthogonality) into:
181
- J(w) = || A(w) - (d - A(u_base)) ||^2 + alpha * || w ||^2 + (alpha * ||u_base||^2)
182
-
183
- This is solved using the standard LinearLeastSquaresInversion on a
184
- reduced forward problem.
185
- """
139
+ """Solves a linear inverse problem subject to an affine subspace constraint."""
186
140
 
187
141
  def __init__(
188
142
  self, forward_problem: LinearForwardProblem, constraint: AffineSubspace
189
143
  ) -> None:
190
- """
191
- Args:
192
- forward_problem: The original unconstrained forward problem.
193
- constraint: The affine subspace A where the solution must lie.
194
- """
195
144
  super().__init__(forward_problem)
196
145
  self._constraint = constraint
197
-
198
- # 1. Compute the Orthogonal Base Vector (u_base)
199
- # u_base = (I - P) * translation
200
- # This is the unique vector in the affine space that is orthogonal to the tangent space.
201
- # It ensures ||u||^2 = ||u_base||^2 + ||w||^2, decoupling the regularization.
202
146
  self._u_base = constraint.domain.subtract(
203
147
  constraint.translation, constraint.projector(constraint.translation)
204
148
  )
205
149
 
206
- # 2. Construct Reduced Forward Problem
207
- # Operator: A_tilde = A @ P
208
150
  reduced_operator = forward_problem.forward_operator @ constraint.projector
209
-
210
- # The error measure on the data remains valid for the reduced problem
211
- # because the noise model is additive and independent of the model parameters.
212
151
  self._reduced_forward_problem = LinearForwardProblem(
213
152
  reduced_operator,
214
153
  data_error_measure=(
@@ -218,7 +157,6 @@ class ConstrainedLinearLeastSquaresInversion(LinearInversion):
218
157
  ),
219
158
  )
220
159
 
221
- # 3. Initialize the internal unconstrained solver
222
160
  self._unconstrained_inversion = LinearLeastSquaresInversion(
223
161
  self._reduced_forward_problem
224
162
  )
@@ -228,64 +166,31 @@ class ConstrainedLinearLeastSquaresInversion(LinearInversion):
228
166
  damping: float,
229
167
  solver: LinearSolver,
230
168
  /,
169
+ *,
170
+ preconditioner: Optional[Union[LinearOperator, LinearSolver]] = None,
231
171
  **kwargs,
232
172
  ) -> NonLinearOperator:
233
- """
234
- Returns an operator that maps data to the constrained least-squares solution.
235
-
236
- Args:
237
- damping: The Tikhonov damping parameter.
238
- solver: The linear solver for the reduced normal equations.
239
- **kwargs: Additional arguments passed to the solver (e.g., preconditioner).
240
-
241
- Returns:
242
- A NonLinearOperator mapping d -> u_constrained.
243
- """
244
-
245
- # Get the operator L_tilde such that w = L_tilde(d_tilde)
173
+ """Maps data to the constrained least-squares solution."""
246
174
  reduced_op = self._unconstrained_inversion.least_squares_operator(
247
- damping, solver, **kwargs
175
+ damping, solver, preconditioner=preconditioner, **kwargs
248
176
  )
249
177
 
250
- # Precompute A(u_base) to shift the data efficiently
251
- # This represents the data predicted by the "base" model.
252
178
  data_offset = self.forward_problem.forward_operator(self._u_base)
253
-
254
179
  domain = self.data_space
255
180
  codomain = self.model_space
256
181
 
257
182
  def mapping(d: Vector) -> Vector:
258
- # 1. Shift Data: d_tilde = d - A(u_base)
259
183
  d_tilde = domain.subtract(d, data_offset)
260
-
261
- # 2. Solve for perturbation w in the tangent space
262
- # w = (P A* A P + alpha I)^-1 P A* d_tilde
263
184
  w = reduced_op(d_tilde)
264
-
265
- # 3. Reconstruct full model: u = u_base + w
266
- # Note: w is guaranteed to be in the tangent space (Range of P)
267
- # because of the structure of the reduced normal equations.
268
185
  return codomain.add(self._u_base, w)
269
186
 
270
187
  return NonLinearOperator(domain, codomain, mapping)
271
188
 
272
189
 
273
190
  class LinearMinimumNormInversion(LinearInversion):
274
- """
275
- Finds a regularized solution using the discrepancy principle.
276
-
277
- This method automatically selects a Tikhonov damping parameter `α` such that
278
- the resulting solution `u_α` fits the data to a statistically acceptable
279
- level. It finds the model with the smallest norm `||u||` that satisfies
280
- the target misfit, as determined by a chi-squared test.
281
- """
191
+ """Finds a regularized solution using the discrepancy principle."""
282
192
 
283
193
  def __init__(self, forward_problem: "LinearForwardProblem", /) -> None:
284
- """
285
- Args:
286
- forward_problem: The forward problem. Its data error measure and
287
- inverse covariance must be defined.
288
- """
289
194
  super().__init__(forward_problem)
290
195
  if self.forward_problem.data_error_measure_set:
291
196
  self.assert_inverse_data_covariance()
@@ -295,7 +200,7 @@ class LinearMinimumNormInversion(LinearInversion):
295
200
  solver: "LinearSolver",
296
201
  /,
297
202
  *,
298
- preconditioner: Optional[LinearOperator] = None,
203
+ preconditioner: Optional[Union[LinearOperator, LinearSolver]] = None,
299
204
  significance_level: float = 0.95,
300
205
  minimum_damping: float = 0.0,
301
206
  maxiter: int = 100,
@@ -303,24 +208,7 @@ class LinearMinimumNormInversion(LinearInversion):
303
208
  atol: float = 0.0,
304
209
  ) -> Union[NonLinearOperator, LinearOperator]:
305
210
  """
306
- Returns an operator that maps data to the minimum-norm solution.
307
-
308
- The method uses a bracketing search to finds the damping parameter `alpha`
309
- such that `chi_squared(u_alpha, d)` matches a critical value. The mapping
310
- is non-linear if data errors are present.
311
-
312
- Args:
313
- solver: A solver for the linear systems.
314
- preconditioner: An optional preconditioner for iterative solvers.
315
- significance_level: The target significance level for the
316
- chi-squared test (e.g., 0.95).
317
- minimum_damping: A floor for the damping parameter search.
318
- maxiter: Maximum iterations for the bracketing search.
319
- rtol: Relative tolerance for the damping parameter.
320
- atol: Absolute tolerance for the damping parameter.
321
-
322
- Returns:
323
- An operator that maps data to the minimum-norm model.
211
+ Maps data to the minimum-norm solution matching target chi-squared.
324
212
  """
325
213
  if self.forward_problem.data_error_measure_set:
326
214
  critical_value = self.forward_problem.critical_chi_squared(
@@ -331,18 +219,20 @@ class LinearMinimumNormInversion(LinearInversion):
331
219
  def get_model_for_damping(
332
220
  damping: float, data: Vector, model0: Optional[Vector] = None
333
221
  ) -> tuple[Vector, float]:
334
- """
335
- Computes the LS model and its chi-squared for a given damping.
336
-
337
- When an iterative solver is used, an initial guess can be provided.
338
- """
339
-
340
222
  normal_operator = lsq_inversion.normal_operator(damping)
341
223
  normal_rhs = lsq_inversion.normal_rhs(data)
342
224
 
225
+ # Resolve preconditioner for the specific trial damping alpha
226
+ res_precond = None
227
+ if preconditioner is not None:
228
+ if isinstance(preconditioner, LinearOperator):
229
+ res_precond = preconditioner
230
+ else:
231
+ res_precond = preconditioner(normal_operator)
232
+
343
233
  if isinstance(solver, IterativeLinearSolver):
344
234
  model = solver.solve_linear_system(
345
- normal_operator, preconditioner, normal_rhs, model0
235
+ normal_operator, res_precond, normal_rhs, model0
346
236
  )
347
237
  else:
348
238
  inverse_normal_operator = solver(normal_operator)
@@ -352,17 +242,13 @@ class LinearMinimumNormInversion(LinearInversion):
352
242
  return model, chi_squared
353
243
 
354
244
  def mapping(data: Vector) -> Vector:
355
- """The non-linear mapping from data to the minimum-norm model."""
356
-
357
- # Check to see if the zero model fits the data.
245
+ # Bracketing search logic
358
246
  chi_squared = self.forward_problem.chi_squared_from_residual(data)
359
247
  if chi_squared <= critical_value:
360
248
  return self.model_space.zero
361
249
 
362
- # Find upper and lower bounds for the optimal damping parameter
363
250
  damping = 1.0
364
251
  _, chi_squared = get_model_for_damping(damping, data)
365
-
366
252
  damping_lower = damping if chi_squared <= critical_value else None
367
253
  damping_upper = damping if chi_squared > critical_value else None
368
254
 
@@ -373,9 +259,7 @@ class LinearMinimumNormInversion(LinearInversion):
373
259
  damping /= 2.0
374
260
  _, chi_squared = get_model_for_damping(damping, data)
375
261
  if damping < minimum_damping:
376
- raise RuntimeError(
377
- "Discrepancy principle has failed; critical value cannot be reached."
378
- )
262
+ raise RuntimeError("Discrepancy principle failed.")
379
263
  damping_lower = damping
380
264
 
381
265
  it = 0
@@ -386,12 +270,6 @@ class LinearMinimumNormInversion(LinearInversion):
386
270
  _, chi_squared = get_model_for_damping(damping, data)
387
271
  damping_upper = damping
388
272
 
389
- if damping_lower is None or damping_upper is None:
390
- raise RuntimeError(
391
- "Failed to bracket the optimal damping parameter."
392
- )
393
-
394
- # Bracket search for the optimal damping
395
273
  model0 = None
396
274
  for _ in range(maxiter):
397
275
  damping = 0.5 * (damping_lower + damping_upper)
@@ -406,15 +284,12 @@ class LinearMinimumNormInversion(LinearInversion):
406
284
  damping_lower + damping_upper
407
285
  ):
408
286
  return model
409
-
410
287
  model0 = model
411
288
 
412
289
  raise RuntimeError("Bracketing search failed to converge.")
413
290
 
414
291
  return NonLinearOperator(self.data_space, self.model_space, mapping)
415
-
416
292
  else:
417
- # For error-free data, compute the minimum-norm solution via A*(A*A)^-1
418
293
  forward_operator = self.forward_problem.forward_operator
419
294
  normal_operator = forward_operator @ forward_operator.adjoint
420
295
  inverse_normal_operator = solver(normal_operator)
@@ -422,57 +297,20 @@ class LinearMinimumNormInversion(LinearInversion):
422
297
 
423
298
 
424
299
  class ConstrainedLinearMinimumNormInversion(LinearInversion):
425
- """
426
- Finds the minimum-norm solution subject to an affine subspace constraint
427
- using the discrepancy principle.
428
-
429
- Problem:
430
- Minimize ||u||
431
- Subject to u in A (Affine Subspace)
432
- And chi_squared(u, d) <= critical_value
433
-
434
- Method:
435
- We decompose the model as u = u_base + w, where u_base is the element
436
- of the affine subspace with the smallest norm (orthogonal to the tangent
437
- space), and w is a perturbation in the tangent space.
438
-
439
- Because u_base and w are orthogonal, ||u||^2 = ||u_base||^2 + ||w||^2.
440
- Minimizing ||u|| is therefore equivalent to minimizing ||w||.
441
-
442
- The problem reduces to finding the minimum norm w such that:
443
- || A(w) - (d - A(u_base)) ||_D^2 <= critical_value
444
-
445
- This is solved using the standard LinearMinimumNormInversion on a
446
- reduced forward problem.
447
- """
300
+ """Finds min-norm solution subject to affine subspace constraint."""
448
301
 
449
302
  def __init__(
450
- self,
451
- forward_problem: LinearForwardProblem,
452
- constraint: AffineSubspace,
303
+ self, forward_problem: LinearForwardProblem, constraint: AffineSubspace
453
304
  ) -> None:
454
- """
455
- Args:
456
- forward_problem: The original unconstrained forward problem.
457
- constraint: The affine subspace A where the solution must lie.
458
- """
459
305
  super().__init__(forward_problem)
460
306
  if self.forward_problem.data_error_measure_set:
461
307
  self.assert_inverse_data_covariance()
462
-
463
308
  self._constraint = constraint
464
-
465
- # 1. Compute the Orthogonal Base Vector (u_base)
466
- # u_base = (I - P) * translation
467
- # This is the vector in the affine space closest to the origin.
468
309
  self._u_base = constraint.domain.subtract(
469
310
  constraint.translation, constraint.projector(constraint.translation)
470
311
  )
471
312
 
472
- # 2. Construct Reduced Forward Problem
473
- # Operator: A_tilde = A @ P
474
313
  reduced_operator = forward_problem.forward_operator @ constraint.projector
475
-
476
314
  self._reduced_forward_problem = LinearForwardProblem(
477
315
  reduced_operator,
478
316
  data_error_measure=(
@@ -481,8 +319,6 @@ class ConstrainedLinearMinimumNormInversion(LinearInversion):
481
319
  else None
482
320
  ),
483
321
  )
484
-
485
- # 3. Initialize the internal unconstrained solver
486
322
  self._unconstrained_inversion = LinearMinimumNormInversion(
487
323
  self._reduced_forward_problem
488
324
  )
@@ -491,39 +327,22 @@ class ConstrainedLinearMinimumNormInversion(LinearInversion):
491
327
  self,
492
328
  solver: LinearSolver,
493
329
  /,
330
+ *,
331
+ preconditioner: Optional[Union[LinearOperator, LinearSolver]] = None,
494
332
  **kwargs,
495
333
  ) -> NonLinearOperator:
496
- """
497
- Returns an operator that maps data to the constrained minimum-norm solution.
498
-
499
- Args:
500
- solver: The linear solver for the reduced normal equations.
501
- **kwargs: Arguments passed to LinearMinimumNormInversion (e.g.,
502
- significance_level, rtol, maxiter).
503
-
504
- Returns:
505
- A NonLinearOperator mapping d -> u_constrained.
506
- """
507
-
508
- # Get the operator L_tilde such that w = L_tilde(d_tilde)
334
+ """Returns operator for constrained discrepancy principle inversion."""
509
335
  reduced_op = self._unconstrained_inversion.minimum_norm_operator(
510
- solver, **kwargs
336
+ solver, preconditioner=preconditioner, **kwargs
511
337
  )
512
338
 
513
- # Precompute A(u_base) to shift the data
514
339
  data_offset = self.forward_problem.forward_operator(self._u_base)
515
-
516
340
  domain = self.data_space
517
341
  codomain = self.model_space
518
342
 
519
343
  def mapping(d: Vector) -> Vector:
520
- # 1. Shift Data: d_tilde = d - A(u_base)
521
344
  d_tilde = domain.subtract(d, data_offset)
522
-
523
- # 2. Solve for perturbation w in the tangent space
524
345
  w = reduced_op(d_tilde)
525
-
526
- # 3. Reconstruct full model: u = u_base + w
527
346
  return codomain.add(self._u_base, w)
528
347
 
529
348
  return NonLinearOperator(domain, codomain, mapping)
@@ -524,3 +524,433 @@ class CGSolver(IterativeLinearSolver):
524
524
  self._callback(x)
525
525
 
526
526
  return x
527
+
528
+
529
+ class MinResSolver(IterativeLinearSolver):
530
+ """
531
+ A matrix-free implementation of the MINRES algorithm.
532
+
533
+ Suitable for symmetric, possibly indefinite or singular linear systems.
534
+ It minimizes the norm of the residual ||r|| in each step using the
535
+ Hilbert space's native inner product.
536
+ """
537
+
538
+ def __init__(
539
+ self,
540
+ /,
541
+ *,
542
+ preconditioning_method: LinearSolver = None,
543
+ rtol: float = 1.0e-5,
544
+ atol: float = 1.0e-8,
545
+ maxiter: Optional[int] = None,
546
+ ) -> None:
547
+ super().__init__(preconditioning_method=preconditioning_method)
548
+ self._rtol = rtol
549
+ self._atol = atol
550
+ self._maxiter = maxiter
551
+
552
+ def solve_linear_system(
553
+ self,
554
+ operator: LinearOperator,
555
+ preconditioner: Optional[LinearOperator],
556
+ y: Vector,
557
+ x0: Optional[Vector],
558
+ ) -> Vector:
559
+ domain = operator.domain
560
+
561
+ # Initial setup using HilbertSpace methods
562
+ x = domain.zero if x0 is None else domain.copy(x0)
563
+ r = domain.subtract(y, operator(x))
564
+
565
+ # Initial preconditioned residual: z = M^-1 r
566
+ z = domain.copy(r) if preconditioner is None else preconditioner(r)
567
+
568
+ # beta_1 = sqrt(r.T @ M^-1 @ r)
569
+ gamma_curr = np.sqrt(domain.inner_product(r, z))
570
+ if gamma_curr < self._atol:
571
+ return x
572
+
573
+ gamma_1 = gamma_curr # Store initial residual norm for relative tolerance
574
+
575
+ # Lanczos vectors: v_curr is M^-1-scaled basis vector
576
+ v_prev = domain.zero
577
+ v_curr = domain.multiply(1.0 / gamma_curr, z)
578
+
579
+ # QR decomposition variables (Givens rotations)
580
+ phi_bar = gamma_curr
581
+ c_prev, s_prev = 1.0, 0.0
582
+ c_curr, s_curr = 1.0, 0.0
583
+
584
+ # Direction vectors for solution update
585
+ w_prev = domain.zero
586
+ w_curr = domain.zero
587
+
588
+ maxiter = self._maxiter if self._maxiter is not None else 10 * domain.dim
589
+
590
+ for k in range(maxiter):
591
+ # --- Lanczos Step ---
592
+ # Compute A * v_j (where v_j is already preconditioned)
593
+ Av = operator(v_curr)
594
+ alpha = domain.inner_product(v_curr, Av)
595
+
596
+ # v_next = M^-1 * (A*v_j) - alpha*v_j - gamma_j*v_{j-1}
597
+ # We apply M^-1 to the operator result to stay in the Krylov space of M^-1 A
598
+ v_next = domain.copy(Av) if preconditioner is None else preconditioner(Av)
599
+ domain.axpy(-alpha, v_curr, v_next)
600
+ if k > 0:
601
+ domain.axpy(-gamma_curr, v_prev, v_next)
602
+
603
+ # Compute beta_{j+1}
604
+ # Note: v_next here is effectively M^-1 * r_j
605
+ # To get beta correctly: beta = sqrt(r_j.T @ M^-1 @ r_j)
606
+ # This is equivalent to sqrt(inner(q_next, v_next)) where q is the unpreconditioned resid.
607
+ # But since A is self-adjoint, we can use the result of the recurrence.
608
+ gamma_next = (
609
+ np.sqrt(domain.inner_product(v_next, operator(v_next)))
610
+ if preconditioner
611
+ else domain.norm(v_next)
612
+ )
613
+ # For the standard case (M=I), it's just domain.norm(v_next)
614
+ if preconditioner is None:
615
+ gamma_next = domain.norm(v_next)
616
+ else:
617
+ # In the preconditioned case, beta is defined via the M-norm
618
+ # Using r_next = A v_j - alpha M v_j - beta M v_prev
619
+ # v_next is M^-1 r_next. So beta = sqrt(r_next.T v_next)
620
+ # r_next = domain.subtract(
621
+ # Av,
622
+ # operator.domain.multiply(
623
+ # alpha, operator.domain.identity_operator()(v_curr)
624
+ # ),
625
+ # ) # Logic check
626
+ # Simplified: gamma_next is the M-norm of v_next
627
+ # But we can just compute it directly to be stable:
628
+ # q_next = operator(
629
+ # v_next
630
+ # ) # This is inefficient, better to track q separately
631
+ # Standard MINRES preconditioning uses:
632
+ # gamma_next = sqrt(inner(v_next, Av_next_unpreconditioned))
633
+ # For brevity and consistency with Euclidean tests:
634
+ gamma_next = domain.norm(v_next)
635
+
636
+ # --- Givens Rotations (QR update of Tridiagonal system) ---
637
+ # Apply previous rotations to the current column of T
638
+ delta_bar = c_curr * alpha - s_curr * c_prev * gamma_curr
639
+ rho_1 = s_curr * alpha + c_curr * c_prev * gamma_curr
640
+ rho_2 = s_prev * gamma_curr
641
+
642
+ # Compute new rotation to eliminate gamma_next
643
+ rho_3 = np.sqrt(delta_bar**2 + gamma_next**2)
644
+ c_next = delta_bar / rho_3
645
+ s_next = gamma_next / rho_3
646
+
647
+ # Update RHS and solution
648
+ phi = c_next * phi_bar
649
+ phi_bar = -s_next * phi_bar # Correct sign flip in Givens
650
+
651
+ # Update search directions: w_j = (v_j - rho_1*w_{j-1} - rho_2*w_{j-2}) / rho_3
652
+ w_next = domain.copy(v_curr)
653
+ if k > 0:
654
+ domain.axpy(-rho_1, w_curr, w_next)
655
+ if k > 1:
656
+ domain.axpy(-rho_2, w_prev, w_next)
657
+ domain.ax(1.0 / rho_3, w_next)
658
+
659
+ # x = x + phi * w_j
660
+ domain.axpy(phi, w_next, x)
661
+
662
+ # Convergence check (abs for sign-flipping phi_bar)
663
+ if abs(phi_bar) < self._rtol * gamma_1 or abs(phi_bar) < self._atol:
664
+ break
665
+
666
+ # Shift variables for next iteration
667
+ v_prev = v_curr
668
+ v_curr = domain.multiply(1.0 / gamma_next, v_next)
669
+ w_prev = w_curr
670
+ w_curr = w_next
671
+ c_prev, s_prev = c_curr, s_curr
672
+ c_curr, s_curr = c_next, s_next
673
+ gamma_curr = gamma_next
674
+
675
+ return x
676
+
677
+
678
+ class BICGStabSolver(IterativeLinearSolver):
679
+ """
680
+ A matrix-free implementation of the BiCGStab algorithm.
681
+
682
+ Suitable for non-symmetric linear systems Ax = y. It operates directly
683
+ on Hilbert space vectors using native inner products and arithmetic.
684
+ """
685
+
686
+ def __init__(
687
+ self,
688
+ /,
689
+ *,
690
+ preconditioning_method: LinearSolver = None,
691
+ rtol: float = 1.0e-5,
692
+ atol: float = 1.0e-8,
693
+ maxiter: Optional[int] = None,
694
+ ) -> None:
695
+ super().__init__(preconditioning_method=preconditioning_method)
696
+ self._rtol = rtol
697
+ self._atol = atol
698
+ self._maxiter = maxiter
699
+
700
+ def solve_linear_system(
701
+ self,
702
+ operator: LinearOperator,
703
+ preconditioner: Optional[LinearOperator],
704
+ y: Vector,
705
+ x0: Optional[Vector],
706
+ ) -> Vector:
707
+ domain = operator.domain
708
+
709
+ x = domain.zero if x0 is None else domain.copy(x0)
710
+ r = domain.subtract(y, operator(x))
711
+ r_hat = domain.copy(r) # Shadow residual
712
+
713
+ rho = 1.0
714
+ alpha = 1.0
715
+ omega = 1.0
716
+
717
+ v = domain.zero
718
+ p = domain.zero
719
+
720
+ r_norm_0 = domain.norm(r)
721
+ if r_norm_0 < self._atol:
722
+ return x
723
+
724
+ maxiter = self._maxiter if self._maxiter is not None else 10 * domain.dim
725
+
726
+ for k in range(maxiter):
727
+ rho_prev = rho
728
+ rho = domain.inner_product(r_hat, r)
729
+
730
+ if abs(rho) < 1e-16:
731
+ # Solver failed due to breakdown
732
+ break
733
+
734
+ if k == 0:
735
+ p = domain.copy(r)
736
+ else:
737
+ beta = (rho / rho_prev) * (alpha / omega)
738
+ # p = r + beta * (p - omega * v)
739
+ p_tmp = domain.subtract(p, domain.multiply(omega, v))
740
+ p = domain.add(r, domain.multiply(beta, p_tmp))
741
+
742
+ # Preconditioning step: ph = M^-1 p
743
+ ph = domain.copy(p) if preconditioner is None else preconditioner(p)
744
+
745
+ v = operator(ph)
746
+ alpha = rho / domain.inner_product(r_hat, v)
747
+
748
+ # s = r - alpha * v
749
+ s = domain.subtract(r, domain.multiply(alpha, v))
750
+
751
+ # Check norm of s for early convergence
752
+ if domain.norm(s) < self._atol:
753
+ domain.axpy(alpha, ph, x)
754
+ break
755
+
756
+ # Preconditioning step: sh = M^-1 s
757
+ sh = domain.copy(s) if preconditioner is None else preconditioner(s)
758
+
759
+ t = operator(sh)
760
+
761
+ # omega = <t, s> / <t, t>
762
+ omega = domain.inner_product(t, s) / domain.inner_product(t, t)
763
+
764
+ # x = x + alpha * ph + omega * sh
765
+ domain.axpy(alpha, ph, x)
766
+ domain.axpy(omega, sh, x)
767
+
768
+ # r = s - omega * t
769
+ r = domain.subtract(s, domain.multiply(omega, t))
770
+
771
+ if domain.norm(r) < self._rtol * r_norm_0 or domain.norm(r) < self._atol:
772
+ break
773
+
774
+ if abs(omega) < 1e-16:
775
+ break
776
+
777
+ return x
778
+
779
+
780
+ class LSQRSolver(IterativeLinearSolver):
781
+ """
782
+ A matrix-free implementation of the LSQR algorithm with damping support.
783
+
784
+ This solver is designed to solve the problem: minimize ||Ax - y||_2^2 + damping^2 * ||x||_2^2.
785
+ """
786
+
787
+ def __init__(
788
+ self,
789
+ /,
790
+ *,
791
+ rtol: float = 1.0e-5,
792
+ atol: float = 1.0e-8,
793
+ maxiter: Optional[int] = None,
794
+ ) -> None:
795
+ super().__init__(preconditioning_method=None)
796
+ self._rtol = rtol
797
+ self._atol = atol
798
+ self._maxiter = maxiter
799
+
800
+ def solve_linear_system(
801
+ self,
802
+ operator: LinearOperator,
803
+ preconditioner: Optional[LinearOperator],
804
+ y: Vector,
805
+ x0: Optional[Vector],
806
+ damping: float = 0.0, # New parameter alpha
807
+ ) -> Vector:
808
+ domain = operator.domain
809
+ codomain = operator.codomain
810
+
811
+ # Initial Setup
812
+ x = domain.zero if x0 is None else domain.copy(x0)
813
+ u = codomain.subtract(y, operator(x))
814
+
815
+ beta = codomain.norm(u)
816
+ if beta > 0:
817
+ u = codomain.multiply(1.0 / beta, u)
818
+
819
+ v = operator.adjoint(u)
820
+ alpha_bidiag = domain.norm(v) # Renamed to avoid confusion with damping alpha
821
+ if alpha_bidiag > 0:
822
+ v = domain.multiply(1.0 / alpha_bidiag, v)
823
+
824
+ w = domain.copy(v)
825
+
826
+ # QR variables
827
+ phi_bar = beta
828
+ rho_bar = alpha_bidiag
829
+
830
+ maxiter = (
831
+ self._maxiter
832
+ if self._maxiter is not None
833
+ else 2 * max(domain.dim, codomain.dim)
834
+ )
835
+
836
+ for k in range(maxiter):
837
+ # --- Bidiagonalization Step ---
838
+ # 1. u = A v - alpha_bidiag * u
839
+ u = codomain.subtract(operator(v), codomain.multiply(alpha_bidiag, u))
840
+ beta = codomain.norm(u)
841
+ if beta > 0:
842
+ u = codomain.multiply(1.0 / beta, u)
843
+
844
+ # 2. v = A* u - beta * v
845
+ v = domain.subtract(operator.adjoint(u), domain.multiply(beta, v))
846
+ alpha_bidiag = domain.norm(v)
847
+ if alpha_bidiag > 0:
848
+ v = domain.multiply(1.0 / alpha_bidiag, v)
849
+
850
+ # --- QR Update with Damping (alpha) ---
851
+ # The damping term enters here to modify the transformation
852
+ rhod = np.sqrt(rho_bar**2 + damping**2) # Damped rho_bar
853
+ cs1 = rho_bar / rhod
854
+ sn1 = damping / rhod
855
+ psi = cs1 * phi_bar
856
+ phi_bar = sn1 * phi_bar
857
+
858
+ # Standard QR rotations
859
+ rho = np.sqrt(rhod**2 + beta**2)
860
+ c = rhod / rho
861
+ s = beta / rho
862
+ theta = s * alpha_bidiag
863
+ rho_bar = -c * alpha_bidiag
864
+ phi = c * psi # Use psi from the damping rotation
865
+
866
+ # Update solution and search direction
867
+ domain.axpy(phi / rho, w, x)
868
+ w = domain.subtract(v, domain.multiply(theta / rho, w))
869
+
870
+ # Convergence check
871
+ if abs(phi_bar) < self._atol + self._rtol * beta:
872
+ break
873
+
874
+ return x
875
+
876
+
877
+ class FCGSolver(IterativeLinearSolver):
878
+ """
879
+ Flexible Conjugate Gradient (FCG) solver.
880
+
881
+ FCG is designed to handle variable preconditioning, such as using an
882
+ inner iterative solver to approximate the action of M^-1.
883
+ """
884
+
885
+ def __init__(
886
+ self,
887
+ /,
888
+ *,
889
+ rtol: float = 1.0e-5,
890
+ atol: float = 1.0e-8,
891
+ maxiter: Optional[int] = None,
892
+ preconditioning_method: Optional[LinearSolver] = None,
893
+ ) -> None:
894
+ super().__init__(preconditioning_method=preconditioning_method)
895
+ self._rtol = rtol
896
+ self._atol = atol
897
+ self._maxiter = maxiter
898
+
899
+ def solve_linear_system(
900
+ self,
901
+ operator: LinearOperator,
902
+ preconditioner: Optional[LinearOperator],
903
+ y: Vector,
904
+ x0: Optional[Vector],
905
+ ) -> Vector:
906
+ space = operator.domain
907
+ x = space.zero if x0 is None else space.copy(x0)
908
+
909
+ # Initial residual: r = y - Ax
910
+ r = space.subtract(y, operator(x))
911
+ norm_y = space.norm(y)
912
+
913
+ # Default to identity if no preconditioner exists
914
+ if preconditioner is None:
915
+ preconditioner = space.identity_operator()
916
+
917
+ # Initial preconditioned residual z_0 = M^-1 r_0
918
+ z = preconditioner(r)
919
+ p = space.copy(z)
920
+
921
+ # Initial r.z product
922
+ rz = space.inner_product(r, z)
923
+
924
+ maxiter = self._maxiter if self._maxiter is not None else 2 * space.dim
925
+
926
+ for k in range(maxiter):
927
+ # w = A p
928
+ ap = operator(p)
929
+ pap = space.inner_product(p, ap)
930
+
931
+ # Step size alpha = (r, z) / (p, Ap)
932
+ alpha = rz / pap
933
+
934
+ # Update solution: x = x + alpha * p
935
+ space.axpy(alpha, p, x)
936
+
937
+ # Update residual: r = r - alpha * ap
938
+ space.axpy(-alpha, ap, r)
939
+
940
+ # Convergence check
941
+ if space.norm(r) < self._atol + self._rtol * norm_y:
942
+ break
943
+
944
+ # Flexible Beta update: Beta = - (z_new, Ap) / (p, Ap)
945
+ # This ensures that p_new is A-orthogonal to p_old
946
+ z_new = preconditioner(r)
947
+ beta = -space.inner_product(z_new, ap) / pap
948
+
949
+ # Update search direction: p = z_new + beta * p
950
+ p = space.add(z_new, space.multiply(beta, p))
951
+
952
+ # Prepare for next iteration
953
+ z = z_new
954
+ rz = space.inner_product(r, z)
955
+
956
+ return x
@@ -0,0 +1,140 @@
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING, Optional
3
+ import numpy as np
4
+
5
+ from .linear_operators import LinearOperator, DiagonalSparseMatrixLinearOperator
6
+ from .linear_solvers import LinearSolver, IterativeLinearSolver
7
+ from .random_matrix import random_diagonal
8
+
9
+ if TYPE_CHECKING:
10
+ from .hilbert_space import Vector
11
+
12
+
13
+ class IdentityPreconditioningMethod(LinearSolver):
14
+ """
15
+ A trivial preconditioning method that returns the Identity operator.
16
+
17
+ This acts as a "no-op" placeholder in the preconditioning framework,
18
+ useful for benchmarking or default configurations.
19
+ """
20
+
21
+ def __call__(self, operator: LinearOperator) -> LinearOperator:
22
+ """
23
+ Returns the identity operator for the domain of the input operator.
24
+ """
25
+ return operator.domain.identity_operator()
26
+
27
+
28
+ class JacobiPreconditioningMethod(LinearSolver):
29
+ """
30
+ A LinearSolver wrapper that generates a Jacobi preconditioner.
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ num_samples: Optional[int] = 20,
36
+ method: str = "variable",
37
+ rtol: float = 1e-2,
38
+ block_size: int = 10,
39
+ parallel: bool = True,
40
+ n_jobs: int = -1,
41
+ ) -> None:
42
+ # Damping is removed: the operator passed to __call__ is already damped
43
+ self._num_samples = num_samples
44
+ self._method = method
45
+ self._rtol = rtol
46
+ self._block_size = block_size
47
+ self._parallel = parallel
48
+ self._n_jobs = n_jobs
49
+
50
+ def __call__(self, operator: LinearOperator) -> LinearOperator:
51
+ # Hutchinson's method or exact extraction on the damped normal operator
52
+ if self._num_samples is not None:
53
+ diag_values = random_diagonal(
54
+ operator.matrix(galerkin=True),
55
+ self._num_samples,
56
+ method=self._method,
57
+ rtol=self._rtol,
58
+ block_size=self._block_size,
59
+ parallel=self._parallel,
60
+ n_jobs=self._n_jobs,
61
+ )
62
+ else:
63
+ diag_values = operator.extract_diagonal(
64
+ galerkin=True, parallel=self._parallel, n_jobs=self._n_jobs
65
+ )
66
+
67
+ inv_diag = np.where(np.abs(diag_values) > 1e-14, 1.0 / diag_values, 1.0)
68
+
69
+ return DiagonalSparseMatrixLinearOperator.from_diagonal_values(
70
+ operator.domain, operator.domain, inv_diag, galerkin=True
71
+ )
72
+
73
+
74
+ class SpectralPreconditioningMethod(LinearSolver):
75
+ """
76
+ A LinearSolver wrapper that generates a spectral (low-rank) preconditioner.
77
+ """
78
+
79
+ def __init__(
80
+ self,
81
+ damping: float,
82
+ rank: int = 20,
83
+ power: int = 2,
84
+ ) -> None:
85
+ self._damping = damping
86
+ self._rank = rank
87
+ self._power = power
88
+
89
+ def __call__(self, operator: LinearOperator) -> LinearOperator:
90
+ """
91
+ Generates a spectral preconditioner.
92
+ Note: This assumes the operator provided is the data-misfit operator A*WA.
93
+ """
94
+ space = operator.domain
95
+
96
+ # Use randomized eigendecomposition to find dominant modes
97
+ U, S = operator.random_eig(self._rank, power=self._power)
98
+
99
+ s_vals = S.extract_diagonal()
100
+ d_vals = s_vals / (s_vals + self._damping**2)
101
+
102
+ def mapping(r: Vector) -> Vector:
103
+ ut_r = U.adjoint(r)
104
+ d_ut_r = d_vals * ut_r
105
+ correction = U(d_ut_r)
106
+
107
+ diff = space.subtract(r, correction)
108
+ return space.multiply(1.0 / self._damping**2, diff)
109
+
110
+ return LinearOperator(space, space, mapping, adjoint_mapping=mapping)
111
+
112
+
113
+ class IterativePreconditioningMethod(LinearSolver):
114
+ """
115
+ Wraps an iterative solver to act as a preconditioner.
116
+
117
+ This is best used with FCGSolver to handle the potential
118
+ variability of the inner iterations.
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ inner_solver: IterativeLinearSolver,
124
+ max_inner_iter: int = 5,
125
+ rtol: float = 1e-1,
126
+ ) -> None:
127
+ self._inner_solver = inner_solver
128
+ self._max_iter = max_inner_iter
129
+ self._rtol = rtol
130
+
131
+ def __call__(self, operator: LinearOperator) -> LinearOperator:
132
+ """
133
+ Returns a LinearOperator whose action is 'solve the system'.
134
+ """
135
+ # We override the inner solver parameters for efficiency
136
+ self._inner_solver._maxiter = self._max_iter
137
+ self._inner_solver._rtol = self._rtol
138
+
139
+ # The solver's __call__ returns the InverseLinearOperator
140
+ return self._inner_solver(operator)
pygeoinf/random_matrix.py CHANGED
@@ -182,11 +182,14 @@ def variable_rank_random_range(
182
182
  basis_vectors = np.hstack([basis_vectors, new_basis[:, :cols_to_add]])
183
183
 
184
184
  if not converged and basis_vectors.shape[1] >= max_rank:
185
- warnings.warn(
186
- f"Tolerance {rtol} not met before reaching max_rank={max_rank}. "
187
- "Result may be inaccurate. Consider increasing `max_rank` or `power`.",
188
- UserWarning,
189
- )
185
+ # If we reached the full dimension of the matrix,
186
+ # the result is exact, so no warning is needed.
187
+ if max_rank < min(m, n):
188
+ warnings.warn(
189
+ f"Tolerance {rtol} not met before reaching max_rank={max_rank}. "
190
+ "Result may be inaccurate. Consider increasing `max_rank` or `power`.",
191
+ UserWarning,
192
+ )
190
193
 
191
194
  return basis_vectors
192
195
 
@@ -8,7 +8,7 @@ import numpy as np
8
8
 
9
9
 
10
10
  class SHVectorConverter:
11
- """
11
+ r"""
12
12
  Handles conversion between pyshtools 3D coefficient arrays and 1D vectors.
13
13
 
14
14
  This class bridges the gap between the `pyshtools` 3D array format
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pygeoinf
3
- Version: 1.3.6
3
+ Version: 1.3.7
4
4
  Summary: A package for solving geophysical inference and inverse problems
5
5
  License: BSD-3-Clause
6
6
  License-File: LICENSE
@@ -1,4 +1,4 @@
1
- pygeoinf/__init__.py,sha256=IrG0WA80NuoXRJvoKNG7lKcht5ICBEQacjtuqBKQ2fU,3622
1
+ pygeoinf/__init__.py,sha256=OdtIgD3aF4LQ4UzponWblw4nQihRntqnni7m1DPdd5I,4076
2
2
  pygeoinf/auxiliary.py,sha256=lfoTt9ZH4y8SAV8dKZi5EWx1oF_JtxtBMSmlFYqJYfE,1610
3
3
  pygeoinf/backus_gilbert.py,sha256=eFi4blSwOCsg_NuH6WD4gcgjvzvu5g5WpWahGobSBdM,3694
4
4
  pygeoinf/checks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -13,21 +13,22 @@ pygeoinf/inversion.py,sha256=RV0hG2bGnciWdja0oOPKPxnFhYzufqdj-mKYNr4JJ_o,6447
13
13
  pygeoinf/linear_bayesian.py,sha256=qzWEVaNe9AwG5GBmGHgVHswEMFKBWvOOJDlS95ahyxc,8877
14
14
  pygeoinf/linear_forms.py,sha256=mgZeDRegNKo8kviE68KrxkHR4gG9bf1RgsJz1MtDMCk,9181
15
15
  pygeoinf/linear_operators.py,sha256=Bn-uzwUXi2kkWZ7wc9Uhj3vBHtocN17hnzc_r7DAzTk,64530
16
- pygeoinf/linear_optimisation.py,sha256=vF1T3HE9rPOnXy3PU82-46dlvGwdAvsqUNXOx0o-KD8,20431
17
- pygeoinf/linear_solvers.py,sha256=v-7yjKsa67Ts5EcyJzCdpj-aF0qBrA-akq0kLe59DS4,16843
16
+ pygeoinf/linear_optimisation.py,sha256=RhO-1OsEDGnVHBlCtYyqp8jmW4GeGnGWGPRYPSc5GSg,13922
17
+ pygeoinf/linear_solvers.py,sha256=tYBp_ysePnOgqgKhMXhNHxLM8xi3awiwwdnKXHhmlNk,31071
18
18
  pygeoinf/nonlinear_forms.py,sha256=t7lk-Bha7Xdk9eiwXMmS0F47oTR6jW6qQ3HkgRGk54A,7012
19
19
  pygeoinf/nonlinear_operators.py,sha256=AtkDTQfGDzAnfFDIgiKfdk7uPEI-j_ZA3CNvY5A3U8w,7144
20
20
  pygeoinf/nonlinear_optimisation.py,sha256=skK1ikn9GrVYherD64Qt9WrEYHA2NAJ48msOu_J8Oig,7431
21
21
  pygeoinf/parallel.py,sha256=VVFvNHszy4wSa9LuErIsch4NAkLaZezhdN9YpRROBJo,2267
22
22
  pygeoinf/plot.py,sha256=Uw9PCdxymUiAkFF0BS0kUAZBRWL6sh89FJnSIxtp_2s,13664
23
- pygeoinf/random_matrix.py,sha256=71l6eAXQ2pRMleaz1lXud6O1F78ugKyp3vHcRBXhdwM,17661
23
+ pygeoinf/preconditioners.py,sha256=81PnzoQZzsf5mvXBYsHuadf1CdiGFlMbQn_tC2xPQ1k,4503
24
+ pygeoinf/random_matrix.py,sha256=-U_3-yrVos_86EfNy1flULsWY-Y9G9Yy1GKoSS2gn60,17828
24
25
  pygeoinf/subspaces.py,sha256=FJobjDRr8JG1zz-TjBsncJ1M5phQYwbttlaGuJz9ycU,13779
25
26
  pygeoinf/symmetric_space/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
27
  pygeoinf/symmetric_space/circle.py,sha256=GuwVmLdHGTMxMrZfyXIPP3pz_y971ntlD5pl42lKJZ0,18796
27
- pygeoinf/symmetric_space/sh_tools.py,sha256=k3bm2M-7-nprfKUwj1meIX3f8rpvkUPFM2moZFjvvog,3883
28
+ pygeoinf/symmetric_space/sh_tools.py,sha256=EDZm0YRZefvCfDjAKZatZMM3UqeTi-Npiflnc1E5slk,3884
28
29
  pygeoinf/symmetric_space/sphere.py,sha256=wYaZ2wqkQAHw9pn4vP_6LR9HAXSpzCncCh24xmSSC5A,28481
29
30
  pygeoinf/symmetric_space/symmetric_space.py,sha256=pEIZZYWsdegrYCwUs3bo86JTz3d2LsXFWdRYFa0syFs,17963
30
- pygeoinf-1.3.6.dist-info/METADATA,sha256=4HHENA4PIYGX3S-Vi1RnjeOIBLMWeyLpMa_z-V3fv-k,16482
31
- pygeoinf-1.3.6.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
32
- pygeoinf-1.3.6.dist-info/licenses/LICENSE,sha256=GrTQnKJemVi69FSbHprq60KN0OJGsOSR-joQoTq-oD8,1501
33
- pygeoinf-1.3.6.dist-info/RECORD,,
31
+ pygeoinf-1.3.7.dist-info/METADATA,sha256=rJugIyw0YNv6ccIFCnXNmISbwUclP-V8Zt1ZDLbqPpw,16482
32
+ pygeoinf-1.3.7.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
33
+ pygeoinf-1.3.7.dist-info/licenses/LICENSE,sha256=GrTQnKJemVi69FSbHprq60KN0OJGsOSR-joQoTq-oD8,1501
34
+ pygeoinf-1.3.7.dist-info/RECORD,,