pygeoinf 1.3.0__py3-none-any.whl → 1.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pygeoinf/auxiliary.py ADDED
@@ -0,0 +1,29 @@
1
+ from .gaussian_measure import GaussianMeasure
2
+
3
+ def empirical_data_error_measure(model_measure, forward_operator, n_samples=10, scale_factor=1.0):
4
+ """
5
+ Generate an empirical data error measure based on samples from a measure on the model space. Useful for when you need
6
+ to define a reasonable data error measure for synthetic testing, and need the covariance matrix to be easily accessible.
7
+
8
+ Args:
9
+ model_measure: The measure on the model space used as a basis for the error measure (e.g., the model prior measure)
10
+ forward_operator: Linear operator mapping from model space to data space (e.g., operator B)
11
+ n_samples: Number of samples to generate for computing statistics (default: 10)
12
+ scale_factor: Scaling factor for the standard deviations (default: 1.0)
13
+
14
+ Returns:
15
+ inf.GaussianMeasure: Data error measure with empirically determined covariance
16
+ """
17
+ # Generate samples in data space by pushing forward model samples
18
+ data_samples = model_measure.affine_mapping(operator=forward_operator).samples(n_samples)
19
+ data_space = forward_operator.codomain
20
+
21
+ # Remove the mean from each sample
22
+ total = data_space.zero
23
+ for sample in data_samples:
24
+ total = data_space.add(total, sample)
25
+ mean = data_space.multiply(1.0 / n_samples, total)
26
+ zeroed_samples = [data_space.multiply(scale_factor, data_space.subtract(data_sample, mean)) for data_sample in data_samples]
27
+
28
+ # Create and return the Gaussian measure from the zeroed samples
29
+ return GaussianMeasure.from_samples(forward_operator.codomain, zeroed_samples)
@@ -46,6 +46,23 @@ class HilbertSpaceAxiomChecks:
46
46
  if not norm_sum <= self.norm(x) + self.norm(y):
47
47
  raise AssertionError("Axiom failed: Triangle inequality")
48
48
 
49
+ def _check_riesz_representation(self, x, y):
50
+ """
51
+ Checks that the inner product is consistent with the Riesz map (to_dual).
52
+ This ensures that <x, y> == (R(x))(y).
53
+ """
54
+ # Value from the (potentially optimized) direct inner product method
55
+ direct_inner_product = self.inner_product(x, y)
56
+
57
+ # Value from the Riesz map definition
58
+ dual_x = self.to_dual(x)
59
+ riesz_inner_product = self.duality_product(dual_x, y)
60
+
61
+ if not np.isclose(direct_inner_product, riesz_inner_product):
62
+ raise AssertionError(
63
+ "Axiom failed: Inner product is not consistent with the Riesz map."
64
+ )
65
+
49
66
  def _check_mapping_identities(self, x):
50
67
  """Checks that component and dual mappings are self-consistent."""
51
68
  # from_components(to_components(x)) == x
@@ -176,6 +193,7 @@ class HilbertSpaceAxiomChecks:
176
193
  # Run all checks
177
194
  self._check_vector_space_axioms(x, y, a)
178
195
  self._check_inner_product_axioms(x, y, z, a, b)
196
+ # self._check_riesz_representation(x, y)
179
197
  self._check_mapping_identities(x)
180
198
  self._check_inplace_operations(x, y, a)
181
199
  self._check_copy(x)
@@ -12,6 +12,7 @@ from .nonlinear_operators import NonLinearOperatorAxiomChecks
12
12
 
13
13
  if TYPE_CHECKING:
14
14
  from ..hilbert_space import Vector
15
+ from ..linear_forms import LinearForm
15
16
 
16
17
 
17
18
  class LinearOperatorAxiomChecks(NonLinearOperatorAxiomChecks):
@@ -22,7 +23,15 @@ class LinearOperatorAxiomChecks(NonLinearOperatorAxiomChecks):
22
23
  checks for linearity and the adjoint identity.
23
24
  """
24
25
 
25
- def _check_linearity(self, x: Vector, y: Vector, a: float, b: float):
26
+ def _check_linearity(
27
+ self,
28
+ x: Vector,
29
+ y: Vector,
30
+ a: float,
31
+ b: float,
32
+ check_rtol: float = 1e-5,
33
+ check_atol: float = 1e-8,
34
+ ):
26
35
  """Verifies the linearity property: L(ax + by) = a*L(x) + b*L(y)"""
27
36
  ax_plus_by = self.domain.add(
28
37
  self.domain.multiply(a, x), self.domain.multiply(b, y)
@@ -38,72 +47,130 @@ class LinearOperatorAxiomChecks(NonLinearOperatorAxiomChecks):
38
47
  rhs_norm = self.codomain.norm(rhs)
39
48
  relative_error = diff_norm / (rhs_norm + 1e-12)
40
49
 
41
- if relative_error > 1e-9:
50
+ if relative_error > check_rtol and diff_norm > check_atol:
42
51
  raise AssertionError(
43
- f"Linearity check failed: L(ax+by) != aL(x)+bL(y). Relative error: {relative_error:.2e}"
52
+ f"Linearity check failed: L(ax+by) != aL(x)+bL(y). "
53
+ f"Relative error: {relative_error:.2e} (Tol: {check_rtol:.2e}), "
54
+ f"Absolute error: {diff_norm:.2e} (Tol: {check_atol:.2e})"
44
55
  )
45
56
 
46
- def _check_adjoint_definition(self, x: Vector, y: Vector):
57
+ def _check_adjoint_definition(
58
+ self,
59
+ x: Vector,
60
+ y: Vector,
61
+ check_rtol: float = 1e-5,
62
+ check_atol: float = 1e-8,
63
+ ):
47
64
  """Verifies the adjoint identity: <L(x), y> = <x, L*(y)>"""
48
65
  lhs = self.codomain.inner_product(self(x), y)
49
66
  rhs = self.domain.inner_product(x, self.adjoint(y))
50
67
 
51
- if not np.isclose(lhs, rhs):
68
+ if not np.isclose(lhs, rhs, rtol=check_rtol, atol=check_atol):
52
69
  raise AssertionError(
53
- f"Adjoint definition failed: <L(x),y> = {lhs:.4e}, but <x,L*(y)> = {rhs:.4e}"
70
+ f"Adjoint definition failed: <L(x),y> = {lhs:.4e}, "
71
+ f"but <x,L*(y)> = {rhs:.4e} (RelTol: {check_rtol:.2e}, AbsTol: {check_atol:.2e})"
54
72
  )
55
73
 
56
- def _check_algebraic_identities(self, op1, op2, x, y, a):
74
+ def _check_algebraic_identities(
75
+ self,
76
+ op1,
77
+ op2,
78
+ x,
79
+ y,
80
+ a,
81
+ check_rtol: float = 1e-5,
82
+ check_atol: float = 1e-8,
83
+ ):
57
84
  """
58
85
  Verifies the algebraic properties of the adjoint and dual operators.
59
86
  Requires a second compatible operator (op2).
60
87
  """
88
+
89
+ def _check_norm_based(res1, res2, space, axiom_name):
90
+ """Helper to perform norm-based comparison."""
91
+ diff_norm = space.norm(space.subtract(res1, res2))
92
+ norm_res2 = space.norm(res2)
93
+ if diff_norm > check_atol and diff_norm > check_rtol * (norm_res2 + 1e-12):
94
+ raise AssertionError(
95
+ f"Axiom failed: {axiom_name}. "
96
+ f"Absolute error: {diff_norm:.2e}, Relative error: {diff_norm / (norm_res2 + 1e-12):.2e}"
97
+ )
98
+
61
99
  # --- Adjoint Identities ---
62
100
  # (A+B)* = A* + B*
63
- op_sum_adj = (op1 + op2).adjoint
64
- adj_sum = op1.adjoint + op2.adjoint
65
- diff = op1.domain.subtract(op_sum_adj(y), adj_sum(y))
66
- if op1.domain.norm(diff) > 1e-9:
67
- raise AssertionError("Axiom failed: (A+B)* != A* + B*")
101
+ res1 = (op1 + op2).adjoint(y)
102
+ res2 = (op1.adjoint + op2.adjoint)(y)
103
+ _check_norm_based(res1, res2, op1.domain, "(A+B)* != A* + B*")
68
104
 
69
105
  # (a*A)* = a*A*
70
- op_scaled_adj = (a * op1).adjoint
71
- adj_scaled = a * op1.adjoint
72
- diff = op1.domain.subtract(op_scaled_adj(y), adj_scaled(y))
73
- if op1.domain.norm(diff) > 1e-9:
74
- raise AssertionError("Axiom failed: (a*A)* != a*A*")
106
+ res1 = (a * op1).adjoint(y)
107
+ res2 = (a * op1.adjoint)(y)
108
+ _check_norm_based(res1, res2, op1.domain, "(a*A)* != a*A*")
75
109
 
76
110
  # (A*)* = A
77
- op_adj_adj = op1.adjoint.adjoint
78
- diff = op1.codomain.subtract(op_adj_adj(x), op1(x))
79
- if op1.codomain.norm(diff) > 1e-9:
80
- raise AssertionError("Axiom failed: (A*)* != A")
111
+ res1 = op1.adjoint.adjoint(x)
112
+ res2 = op1(x)
113
+ _check_norm_based(res1, res2, op1.codomain, "(A*)* != A")
81
114
 
82
115
  # (A@B)* = B*@A*
83
116
  if op1.domain == op2.codomain:
84
- op_comp_adj = (op1 @ op2).adjoint
85
- adj_comp = op2.adjoint @ op1.adjoint
86
- diff = op2.domain.subtract(op_comp_adj(y), adj_comp(y))
87
- if op2.domain.norm(diff) > 1e-9:
88
- raise AssertionError("Axiom failed: (A@B)* != B*@A*")
117
+ res1 = (op1 @ op2).adjoint(y)
118
+ res2 = (op2.adjoint @ op1.adjoint)(y)
119
+ _check_norm_based(res1, res2, op2.domain, "(A@B)* != B*@A*")
89
120
 
90
121
  # --- Dual Identities ---
91
122
  # (A+B)' = A' + B'
92
123
  op_sum_dual = (op1 + op2).dual
93
124
  dual_sum = op1.dual + op2.dual
94
125
  y_dual = op1.codomain.to_dual(y)
95
- # The result of applying a dual operator is a LinearForm, which supports subtraction
96
- diff_dual = op_sum_dual(y_dual) - dual_sum(y_dual)
97
- if op1.domain.dual.norm(diff_dual) > 1e-9:
98
- raise AssertionError("Axiom failed: (A+B)' != A' + B'")
99
126
 
100
- def check(self, n_checks: int = 5, op2=None) -> None:
127
+ # The result of applying a dual operator is a LinearForm
128
+ res1_form: LinearForm = op_sum_dual(y_dual)
129
+ res2_form: LinearForm = dual_sum(y_dual)
130
+
131
+ # CORRECTED: Use LinearForm subtraction and dual space norm
132
+ # (This assumes LinearForm overloads __sub__)
133
+ try:
134
+ diff_form = res1_form - res2_form
135
+ diff_norm = op1.domain.dual.norm(diff_form)
136
+ norm_res2 = op1.domain.dual.norm(res2_form)
137
+
138
+ if diff_norm > check_atol and diff_norm > check_rtol * (norm_res2 + 1e-12):
139
+ raise AssertionError(
140
+ f"Axiom failed: (A+B)' != A' + B'. "
141
+ f"Absolute error: {diff_norm:.2e}, Relative error: {diff_norm / (norm_res2 + 1e-12):.2e}"
142
+ )
143
+ except (AttributeError, TypeError):
144
+ # Fallback if LinearForm doesn't support subtraction or norm
145
+ if not np.allclose(
146
+ res1_form.components,
147
+ res2_form.components,
148
+ rtol=check_rtol,
149
+ atol=check_atol,
150
+ ):
151
+ raise AssertionError(
152
+ "Axiom failed: (A+B)' != A' + B' (component check)."
153
+ )
154
+
155
+ def check(
156
+ self,
157
+ n_checks: int = 5,
158
+ op2=None,
159
+ check_rtol: float = 1e-5,
160
+ check_atol: float = 1e-8,
161
+ ) -> None:
101
162
  """
102
163
  Runs all checks for the LinearOperator, including non-linear checks
103
164
  and algebraic identities.
165
+
166
+ Args:
167
+ n_checks: The number of randomized trials to perform.
168
+ op2: An optional second operator for testing algebraic rules.
169
+ check_rtol: The relative tolerance for numerical checks.
170
+ check_atol: The absolute tolerance for numerical checks.
104
171
  """
105
172
  # First, run the parent (non-linear) checks from the base class
106
- super().check(n_checks, op2=op2)
173
+ super().check(n_checks, op2=op2, check_rtol=check_rtol, check_atol=check_atol)
107
174
 
108
175
  # Now, run the linear-specific checks
109
176
  print(
@@ -115,10 +182,16 @@ class LinearOperatorAxiomChecks(NonLinearOperatorAxiomChecks):
115
182
  y = self.codomain.random()
116
183
  a, b = np.random.randn(), np.random.randn()
117
184
 
118
- self._check_linearity(x1, x2, a, b)
119
- self._check_adjoint_definition(x1, y)
185
+ self._check_linearity(
186
+ x1, x2, a, b, check_rtol=check_rtol, check_atol=check_atol
187
+ )
188
+ self._check_adjoint_definition(
189
+ x1, y, check_rtol=check_rtol, check_atol=check_atol
190
+ )
120
191
 
121
192
  if op2:
122
- self._check_algebraic_identities(self, op2, x1, y, a)
193
+ self._check_algebraic_identities(
194
+ self, op2, x1, y, a, check_rtol=check_rtol, check_atol=check_atol
195
+ )
123
196
 
124
197
  print(f"✅ All {n_checks} linear operator checks passed successfully.")
@@ -13,7 +13,9 @@ if TYPE_CHECKING:
13
13
  class NonLinearOperatorAxiomChecks:
14
14
  """A mixin for checking the properties of a NonLinearOperator."""
15
15
 
16
- def _check_derivative_finite_difference(self, x, v, h=1e-7):
16
+ def _check_derivative_finite_difference(
17
+ self, x, v, h=1e-7, check_rtol: float = 1e-5, check_atol: float = 1e-8
18
+ ):
17
19
  """
18
20
  Verifies the derivative using the finite difference formula:
19
21
  D[F](x) @ v ≈ (F(x + h*v) - F(x)) / h
@@ -49,12 +51,20 @@ class NonLinearOperatorAxiomChecks:
49
51
  analytic_norm = self.codomain.norm(analytic_result)
50
52
  relative_error = diff_norm / (analytic_norm + 1e-12)
51
53
 
52
- if relative_error > 1e-4:
54
+ # The finite difference method itself has an error, so we use
55
+ # the max of the requested rtol and a default 1e-4.
56
+ effective_rtol = max(check_rtol, 1e-4)
57
+
58
+ if relative_error > effective_rtol and diff_norm > check_atol:
53
59
  raise AssertionError(
54
- f"Finite difference check failed. Relative error: {relative_error:.2e}"
60
+ f"Finite difference check failed. Relative error: {relative_error:.2e} "
61
+ f"(Tolerance: {effective_rtol:.2e}), "
62
+ f"Absolute error: {diff_norm:.2e} (Tol: {check_atol:.2e})"
55
63
  )
56
64
 
57
- def _check_add_derivative(self, op1, op2, x, v):
65
+ def _check_add_derivative(
66
+ self, op1, op2, x, v, check_rtol: float = 1e-5, check_atol: float = 1e-8
67
+ ):
58
68
  """Verifies the sum rule for derivatives: (F+G)' = F' + G'"""
59
69
  if not (op1.has_derivative and op2.has_derivative):
60
70
  return # Skip if derivatives aren't defined
@@ -70,11 +80,19 @@ class NonLinearOperatorAxiomChecks:
70
80
  res1 = derivative_of_sum(v)
71
81
  res2 = sum_of_derivatives(v)
72
82
 
73
- diff_norm = self.codomain.norm(self.codomain.subtract(res1, res2))
74
- if diff_norm > 1e-9:
75
- raise AssertionError("Axiom failed: Derivative of sum is incorrect.")
83
+ # CORRECTED: Use norm-based comparison, not np.allclose
84
+ diff_norm = op1.codomain.norm(op1.codomain.subtract(res1, res2))
85
+ norm_res2 = op1.codomain.norm(res2)
86
+
87
+ if diff_norm > check_atol and diff_norm > check_rtol * (norm_res2 + 1e-12):
88
+ raise AssertionError(
89
+ f"Axiom failed: Derivative of sum is incorrect. "
90
+ f"Absolute error: {diff_norm:.2e}, Relative error: {diff_norm / (norm_res2 + 1e-12):.2e}"
91
+ )
76
92
 
77
- def _check_scalar_mul_derivative(self, op, x, v, a):
93
+ def _check_scalar_mul_derivative(
94
+ self, op, x, v, a, check_rtol: float = 1e-5, check_atol: float = 1e-8
95
+ ):
78
96
  """Verifies the scalar multiple rule: (a*F)' = a*F'"""
79
97
  if not op.has_derivative:
80
98
  return
@@ -90,13 +108,19 @@ class NonLinearOperatorAxiomChecks:
90
108
  res1 = derivative_of_scaled(v)
91
109
  res2 = scaled_derivative(v)
92
110
 
93
- diff_norm = self.codomain.norm(self.codomain.subtract(res1, res2))
94
- if diff_norm > 1e-9:
111
+ # CORRECTED: Use norm-based comparison
112
+ diff_norm = op.codomain.norm(op.codomain.subtract(res1, res2))
113
+ norm_res2 = op.codomain.norm(res2)
114
+
115
+ if diff_norm > check_atol and diff_norm > check_rtol * (norm_res2 + 1e-12):
95
116
  raise AssertionError(
96
- "Axiom failed: Derivative of scalar multiple is incorrect."
117
+ f"Axiom failed: Derivative of scalar multiple is incorrect. "
118
+ f"Absolute error: {diff_norm:.2e}, Relative error: {diff_norm / (norm_res2 + 1e-12):.2e}"
97
119
  )
98
120
 
99
- def _check_matmul_derivative(self, op1, op2, x, v):
121
+ def _check_matmul_derivative(
122
+ self, op1, op2, x, v, check_rtol: float = 1e-5, check_atol: float = 1e-8
123
+ ):
100
124
  """Verifies the chain rule for derivatives: (F o G)'(x) = F'(G(x)) @ G'(x)"""
101
125
  if not (op1.has_derivative and op2.has_derivative):
102
126
  return
@@ -115,13 +139,23 @@ class NonLinearOperatorAxiomChecks:
115
139
  res1 = derivative_of_composed(v)
116
140
  res2 = chain_rule_derivative(v)
117
141
 
142
+ # CORRECTED: Use norm-based comparison
118
143
  diff_norm = op1.codomain.norm(op1.codomain.subtract(res1, res2))
119
- if diff_norm > 1e-9:
144
+ norm_res2 = op1.codomain.norm(res2)
145
+
146
+ if diff_norm > check_atol and diff_norm > check_rtol * (norm_res2 + 1e-12):
120
147
  raise AssertionError(
121
- "Axiom failed: Chain rule for derivatives is incorrect."
148
+ f"Axiom failed: Chain rule for derivatives is incorrect. "
149
+ f"Absolute error: {diff_norm:.2e}, Relative error: {diff_norm / (norm_res2 + 1e-12):.2e}"
122
150
  )
123
151
 
124
- def check(self, n_checks: int = 5, op2=None) -> None:
152
+ def check(
153
+ self,
154
+ n_checks: int = 5,
155
+ op2=None,
156
+ check_rtol: float = 1e-5,
157
+ check_atol: float = 1e-8,
158
+ ) -> None:
125
159
  """
126
160
  Runs randomized checks to validate the operator's derivative and
127
161
  its algebraic properties.
@@ -129,6 +163,8 @@ class NonLinearOperatorAxiomChecks:
129
163
  Args:
130
164
  n_checks: The number of randomized trials to perform.
131
165
  op2: An optional second operator for testing algebraic rules.
166
+ check_rtol: The relative tolerance for numerical checks.
167
+ check_atol: The absolute tolerance for numerical checks.
132
168
  """
133
169
  print(
134
170
  f"\nRunning {n_checks} randomized checks for {self.__class__.__name__}..."
@@ -143,12 +179,20 @@ class NonLinearOperatorAxiomChecks:
143
179
  v = self.domain.random()
144
180
 
145
181
  # Original check
146
- self._check_derivative_finite_difference(x, v)
182
+ self._check_derivative_finite_difference(
183
+ x, v, check_rtol=check_rtol, check_atol=check_atol
184
+ )
147
185
 
148
186
  # New algebraic checks
149
- self._check_scalar_mul_derivative(self, x, v, a)
187
+ self._check_scalar_mul_derivative(
188
+ self, x, v, a, check_rtol=check_rtol, check_atol=check_atol
189
+ )
150
190
  if op2:
151
- self._check_add_derivative(self, op2, x, v)
152
- self._check_matmul_derivative(self, op2, x, v)
191
+ self._check_add_derivative(
192
+ self, op2, x, v, check_rtol=check_rtol, check_atol=check_atol
193
+ )
194
+ self._check_matmul_derivative(
195
+ self, op2, x, v, check_rtol=check_rtol, check_atol=check_atol
196
+ )
153
197
 
154
198
  print(f"✅ All {n_checks} non-linear operator checks passed successfully.")
@@ -21,6 +21,7 @@ Key Features
21
21
 
22
22
  from __future__ import annotations
23
23
  from typing import Callable, Optional, Any, List, TYPE_CHECKING
24
+ import warnings
24
25
 
25
26
  import numpy as np
26
27
  from scipy.linalg import eigh
@@ -197,6 +198,7 @@ class GaussianMeasure:
197
198
  /,
198
199
  *,
199
200
  expectation: Vector = None,
201
+ rtol: float = 1e-10,
200
202
  ) -> GaussianMeasure:
201
203
  """
202
204
  Creates a Gaussian measure from a dense covariance matrix.
@@ -205,15 +207,36 @@ class GaussianMeasure:
205
207
  the covariance operator. This method computes a Cholesky-like
206
208
  decomposition of the matrix to create a `covariance_factor`.
207
209
 
210
+ It includes a check to handle numerical precision issues, allowing for
211
+ eigenvalues that are slightly negative within a relative tolerance.
212
+
208
213
  Args:
209
214
  domain: The Hilbert space the measure is defined on.
210
215
  covariance_matrix: The dense covariance matrix.
211
216
  expectation: The expectation (mean) of the measure.
217
+ rtol: The relative tolerance used to check for negative eigenvalues.
212
218
  """
213
219
 
214
220
  eigenvalues, U = eigh(covariance_matrix)
215
- if any(val < 0 for val in eigenvalues):
216
- raise ValueError("Covariance matrix is not non-negative")
221
+
222
+ if np.any(eigenvalues < 0):
223
+ max_eig = np.max(np.abs(eigenvalues))
224
+ min_eig = np.min(eigenvalues)
225
+
226
+ # Check if the most negative eigenvalue is outside the tolerance
227
+ if min_eig < -rtol * max_eig:
228
+ raise ValueError(
229
+ "Covariance matrix has significantly negative eigenvalues, "
230
+ "indicating it is not positive semi-definite."
231
+ )
232
+ else:
233
+ # If negative eigenvalues are within tolerance, warn and correct
234
+ warnings.warn(
235
+ "Covariance matrix has small negative eigenvalues due to "
236
+ "numerical error. Clipping them to zero.",
237
+ UserWarning,
238
+ )
239
+ eigenvalues[eigenvalues < 0] = 0
217
240
 
218
241
  values = np.sqrt(eigenvalues)
219
242
  D = diags([values], [0])
@@ -466,24 +489,57 @@ class GaussianMeasure:
466
489
  sample=new_sample if self.sample_set else None,
467
490
  )
468
491
 
469
- def as_multivariate_normal(self) -> multivariate_normal:
492
+ def as_multivariate_normal(
493
+ self, /, *, parallel: bool = False, n_jobs: int = -1
494
+ ) -> multivariate_normal:
470
495
  """
471
496
  Returns the measure as a `scipy.stats.multivariate_normal` object.
472
497
 
473
498
  This is only possible if the measure is defined on a EuclideanSpace.
474
- """
475
499
 
500
+ If the covariance matrix has small negative eigenvalues due to numerical
501
+ precision issues, this method attempts to correct them by setting them
502
+ to zero.
503
+
504
+ Args:
505
+ parallel (bool, optional): If `True`, computes the dense covariance
506
+ matrix in parallel. Defaults to `False`.
507
+ n_jobs (int, optional): The number of parallel jobs to use. `-1`
508
+ uses all available cores. Defaults to -1.
509
+ """
476
510
  if not isinstance(self.domain, EuclideanSpace):
477
511
  raise NotImplementedError(
478
512
  "Method only defined for measures on Euclidean space."
479
513
  )
480
514
 
481
- return multivariate_normal(
482
- mean=self.expectation,
483
- cov=self.covariance.matrix(dense=True),
484
- allow_singular=True,
515
+ mean_vector = self.expectation
516
+
517
+ # Pass the parallelization arguments directly to the matrix creation method
518
+ cov_matrix = self.covariance.matrix(
519
+ dense=True, parallel=parallel, n_jobs=n_jobs
485
520
  )
486
521
 
522
+ try:
523
+ # First, try to create the distribution directly.
524
+ return multivariate_normal(
525
+ mean=mean_vector, cov=cov_matrix, allow_singular=True
526
+ )
527
+ except ValueError:
528
+ # If it fails, clean the covariance matrix and try again.
529
+ warnings.warn(
530
+ "Covariance matrix is not positive semi-definite due to "
531
+ "numerical errors. Setting negative eigenvalues to zero.",
532
+ UserWarning,
533
+ )
534
+
535
+ eigenvalues, eigenvectors = eigh(cov_matrix)
536
+ eigenvalues[eigenvalues < 0] = 0
537
+ cleaned_cov = eigenvectors @ diags(eigenvalues) @ eigenvectors.T
538
+
539
+ return multivariate_normal(
540
+ mean=mean_vector, cov=cleaned_cov, allow_singular=True
541
+ )
542
+
487
543
  def low_rank_approximation(
488
544
  self,
489
545
  size_estimate: int,
pygeoinf/hilbert_space.py CHANGED
@@ -166,6 +166,22 @@ class HilbertSpace(ABC, HilbertSpaceAxiomChecks):
166
166
  """
167
167
  return isinstance(x, type(self.zero))
168
168
 
169
+ def inner_product(self, x1: Vector, x2: Vector) -> float:
170
+ """
171
+ Computes the inner product of two vectors, `(x1, x2)`.
172
+
173
+ This is defined via the duality product as `<R(x1), x2>`, where `R` is
174
+ the Riesz map (`to_dual`).
175
+
176
+ Args:
177
+ x1: The first vector.
178
+ x2: The second vector.
179
+
180
+ Returns:
181
+ The inner product as a float.
182
+ """
183
+ return self.duality_product(self.to_dual(x1), x2)
184
+
169
185
  def duality_product(self, xp: LinearForm, x: Vector) -> float:
170
186
  """
171
187
  Computes the duality product <xp, x>.
@@ -293,23 +309,6 @@ class HilbertSpace(ABC, HilbertSpaceAxiomChecks):
293
309
 
294
310
  return LinearOperator.self_dual(self, self.to_dual)
295
311
 
296
- @final
297
- def inner_product(self, x1: Vector, x2: Vector) -> float:
298
- """
299
- Computes the inner product of two vectors, `(x1, x2)`.
300
-
301
- This is defined via the duality product as `<R(x1), x2>`, where `R` is
302
- the Riesz map (`to_dual`).
303
-
304
- Args:
305
- x1: The first vector.
306
- x2: The second vector.
307
-
308
- Returns:
309
- The inner product as a float.
310
- """
311
- return self.duality_product(self.to_dual(x1), x2)
312
-
313
312
  @final
314
313
  def squared_norm(self, x: Vector) -> float:
315
314
  """
@@ -588,6 +587,15 @@ class EuclideanSpace(HilbertSpace):
588
587
  """Maps a `LinearForm` back to a vector via its components."""
589
588
  return self.dual.to_components(xp)
590
589
 
590
+ def inner_product(self, x1: np.ndarray, x2: np.ndarray) -> float:
591
+ """
592
+ Computes the inner product of two vectors.
593
+
594
+ Notes:
595
+ Default implementation overrident for efficiency.
596
+ """
597
+ return np.dot(x1, x2)
598
+
591
599
  def __eq__(self, other: object):
592
600
  if not isinstance(other, EuclideanSpace):
593
601
  return NotImplemented
@@ -597,7 +605,7 @@ class EuclideanSpace(HilbertSpace):
597
605
  """
598
606
  Checks if an object is a valid element of the space.
599
607
  """
600
- return isinstance(x, np.ndarray) and len(x) == self.dim
608
+ return isinstance(x, np.ndarray) and x.shape == (self.dim,)
601
609
 
602
610
 
603
611
  class MassWeightedHilbertSpace(HilbertSpace):
@@ -678,6 +686,15 @@ class MassWeightedHilbertSpace(HilbertSpace):
678
686
  x = self.underlying_space.from_dual(xp)
679
687
  return self._inverse_mass_operator(x)
680
688
 
689
+ def inner_product(self, x1: Vector, x2: Vector) -> float:
690
+ """
691
+ Computes the inner product of two vectors.
692
+
693
+ Notes:
694
+ Default implementation overrident for efficiency.
695
+ """
696
+ return self._underlying_space.inner_product(self._mass_operator(x1), x2)
697
+
681
698
  def __eq__(self, other: object) -> bool:
682
699
  """
683
700
  Checks for equality with another MassWeightedHilbertSpace.
pygeoinf/plot.py ADDED
@@ -0,0 +1,350 @@
1
+ import matplotlib.pyplot as plt
2
+ import matplotlib.colors as colors
3
+ import numpy as np
4
+ import scipy.stats as stats
5
+ from typing import Union, List, Optional
6
+
7
+ def plot_1d_distributions(
8
+ posterior_measures: Union[object, List[object]],
9
+ prior_measures: Optional[Union[object, List[object]]] = None,
10
+ true_value: Optional[float] = None,
11
+ xlabel: str = "Property Value",
12
+ title: str = "Prior and Posterior Probability Distributions",
13
+ figsize: tuple = (12, 7),
14
+ show_plot: bool = True
15
+ ):
16
+ """
17
+ Plot 1D probability distributions for prior and posterior measures using dual y-axes.
18
+
19
+ Args:
20
+ posterior_measures: Single measure or list of measures for posterior distributions
21
+ prior_measures: Single measure or list of measures for prior distributions (optional)
22
+ true_value: True value to mark with a vertical line (optional)
23
+ xlabel: Label for x-axis
24
+ title: Title for the plot
25
+ figsize: Figure size tuple
26
+ show_plot: Whether to display the plot
27
+
28
+ Returns:
29
+ fig, (ax1, ax2): Figure and axes objects
30
+ """
31
+
32
+ # Convert single measures to lists for uniform handling
33
+ if not isinstance(posterior_measures, list):
34
+ posterior_measures = [posterior_measures]
35
+
36
+ if prior_measures is not None and not isinstance(prior_measures, list):
37
+ prior_measures = [prior_measures]
38
+
39
+ # Define color sequences
40
+ prior_colors = ['green', 'orange', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan']
41
+ posterior_colors = ['blue', 'red', 'darkgreen', 'orange', 'purple', 'brown', 'pink', 'gray']
42
+
43
+ # Calculate statistics for all distributions
44
+ posterior_stats = []
45
+ for measure in posterior_measures:
46
+ if hasattr(measure, 'expectation') and hasattr(measure, 'covariance'):
47
+ # For pygeoinf measures
48
+ mean = measure.expectation[0]
49
+ var = measure.covariance.matrix(dense=True)[0, 0]
50
+ std = np.sqrt(var)
51
+ else:
52
+ # For scipy distributions
53
+ mean = measure.mean[0]
54
+ std = np.sqrt(measure.cov[0, 0])
55
+ posterior_stats.append((mean, std))
56
+
57
+ prior_stats = []
58
+ if prior_measures is not None:
59
+ for measure in prior_measures:
60
+ if hasattr(measure, 'expectation') and hasattr(measure, 'covariance'):
61
+ # For pygeoinf measures
62
+ mean = measure.expectation[0]
63
+ var = measure.covariance.matrix(dense=True)[0, 0]
64
+ std = np.sqrt(var)
65
+ else:
66
+ # For scipy distributions
67
+ mean = measure.mean[0]
68
+ std = np.sqrt(measure.cov[0, 0])
69
+ prior_stats.append((mean, std))
70
+
71
+ # Determine plot range to include all distributions
72
+ all_means = [stat[0] for stat in posterior_stats]
73
+ all_stds = [stat[1] for stat in posterior_stats]
74
+
75
+ if prior_measures is not None:
76
+ all_means.extend([stat[0] for stat in prior_stats])
77
+ all_stds.extend([stat[1] for stat in prior_stats])
78
+
79
+ if true_value is not None:
80
+ all_means.append(true_value)
81
+ all_stds.append(0) # No std for true value
82
+
83
+ # Calculate x-axis range (6 sigma coverage)
84
+ x_min = min([mean - 6 * std for mean, std in zip(all_means, all_stds) if std > 0])
85
+ x_max = max([mean + 6 * std for mean, std in zip(all_means, all_stds) if std > 0])
86
+
87
+ # Add some padding around true value if needed
88
+ if true_value is not None:
89
+ range_size = x_max - x_min
90
+ x_min = min(x_min, true_value - 0.1 * range_size)
91
+ x_max = max(x_max, true_value + 0.1 * range_size)
92
+
93
+ x_axis = np.linspace(x_min, x_max, 1000)
94
+
95
+ # Create the plot with two y-axes
96
+ fig, ax1 = plt.subplots(figsize=figsize)
97
+
98
+ # Plot priors on the first axis (left y-axis) if provided
99
+ if prior_measures is not None:
100
+ color1 = prior_colors[0] if len(prior_measures) > 0 else 'green'
101
+ ax1.set_xlabel(xlabel)
102
+ ax1.set_ylabel('Prior Probability Density', color=color1)
103
+
104
+ for i, (measure, (mean, std)) in enumerate(zip(prior_measures, prior_stats)):
105
+ color = prior_colors[i % len(prior_colors)]
106
+
107
+ # Calculate PDF values using scipy.stats
108
+ pdf_values = stats.norm.pdf(x_axis, loc=mean, scale=std)
109
+
110
+ # Determine label
111
+ if len(prior_measures) == 1:
112
+ label = f'Prior PDF (Mean: {mean:.5f})'
113
+ else:
114
+ label = f'Prior {i+1} (Mean: {mean:.5f})'
115
+
116
+ ax1.plot(x_axis, pdf_values, color=color, lw=2, linestyle=':', label=label)
117
+ ax1.fill_between(x_axis, pdf_values, color=color, alpha=0.15)
118
+
119
+ ax1.tick_params(axis='y', labelcolor=color1)
120
+ ax1.grid(True, linestyle='--')
121
+ else:
122
+ # If no priors, use the left axis for posteriors
123
+ ax1.set_xlabel(xlabel)
124
+ ax1.set_ylabel('Probability Density')
125
+ ax1.grid(True, linestyle='--')
126
+
127
+ # Create second y-axis for posteriors (or use first if no priors)
128
+ if prior_measures is not None:
129
+ ax2 = ax1.twinx()
130
+ color2 = posterior_colors[0] if len(posterior_measures) > 0 else 'blue'
131
+ ax2.set_ylabel('Posterior Probability Density', color=color2)
132
+ ax2.tick_params(axis='y', labelcolor=color2)
133
+ ax2.grid(False)
134
+ plot_ax = ax2
135
+ else:
136
+ plot_ax = ax1
137
+ color2 = posterior_colors[0] if len(posterior_measures) > 0 else 'blue'
138
+
139
+ # Plot posteriors
140
+ for i, (measure, (mean, std)) in enumerate(zip(posterior_measures, posterior_stats)):
141
+ color = posterior_colors[i % len(posterior_colors)]
142
+
143
+ # Calculate PDF values using scipy.stats
144
+ pdf_values = stats.norm.pdf(x_axis, loc=mean, scale=std)
145
+
146
+ # Determine label
147
+ if len(posterior_measures) == 1:
148
+ label = f'Posterior PDF (Mean: {mean:.5f})'
149
+ else:
150
+ label = f'Posterior {i+1} (Mean: {mean:.5f})'
151
+
152
+ plot_ax.plot(x_axis, pdf_values, color=color, lw=2, label=label)
153
+ plot_ax.fill_between(x_axis, pdf_values, color=color, alpha=0.2)
154
+
155
+ # Plot true value if provided
156
+ if true_value is not None:
157
+ ax1.axvline(true_value, color='black', linestyle='-', lw=2,
158
+ label=f'True Value: {true_value:.5f}')
159
+
160
+ # Create combined legend
161
+ handles1, labels1 = ax1.get_legend_handles_labels()
162
+
163
+ if prior_measures is not None:
164
+ handles2, labels2 = ax2.get_legend_handles_labels()
165
+ all_handles = handles1 + handles2
166
+ all_labels = [h.get_label() for h in all_handles]
167
+ else:
168
+ all_handles = handles1
169
+ all_labels = [h.get_label() for h in all_handles]
170
+
171
+ fig.legend(all_handles, all_labels, loc='upper right', bbox_to_anchor=(0.9, 0.9))
172
+ fig.suptitle(title, fontsize=16)
173
+ fig.tight_layout(rect=[0, 0, 1, 0.96])
174
+
175
+ if show_plot:
176
+ plt.show()
177
+
178
+ if prior_measures is not None:
179
+ return fig, (ax1, ax2)
180
+ else:
181
+ return fig, ax1
182
+
183
+
184
+ def plot_corner_distributions(
185
+ posterior_measure: object,
186
+ true_values: Optional[Union[List[float], np.ndarray]] = None,
187
+ labels: Optional[List[str]] = None,
188
+ title: str = "Joint Posterior Distribution",
189
+ figsize: Optional[tuple] = None,
190
+ show_plot: bool = True,
191
+ include_sigma_contours: bool = True,
192
+ colormap: str = "Blues"
193
+ ):
194
+ """
195
+ Create a corner plot for multi-dimensional posterior distributions.
196
+
197
+ Args:
198
+ posterior_measure: Multi-dimensional posterior measure (pygeoinf object)
199
+ true_values: True values for each dimension (optional)
200
+ labels: Labels for each dimension (optional)
201
+ title: Title for the plot
202
+ figsize: Figure size tuple (if None, calculated based on dimensions)
203
+ show_plot: Whether to display the plot
204
+ include_sigma_contours: Whether to include 1-sigma contour lines
205
+ colormap: Colormap for 2D plots
206
+
207
+ Returns:
208
+ fig, axes: Figure and axes array
209
+ """
210
+
211
+ # Extract statistics from the measure
212
+ if hasattr(posterior_measure, 'expectation') and hasattr(posterior_measure, 'covariance'):
213
+ mean_posterior = posterior_measure.expectation
214
+ cov_posterior = posterior_measure.covariance.matrix(dense=True, parallel=True)
215
+ else:
216
+ raise ValueError("posterior_measure must have 'expectation' and 'covariance' attributes")
217
+
218
+ n_dims = len(mean_posterior)
219
+
220
+ # Set default labels if not provided
221
+ if labels is None:
222
+ labels = [f"Dimension {i+1}" for i in range(n_dims)]
223
+
224
+ # Set figure size based on dimensions if not provided
225
+ if figsize is None:
226
+ figsize = (3 * n_dims, 3 * n_dims)
227
+
228
+ # Create subplots
229
+ fig, axes = plt.subplots(n_dims, n_dims, figsize=figsize)
230
+ fig.suptitle(title, fontsize=16)
231
+
232
+ # Ensure axes is always 2D array
233
+ if n_dims == 1:
234
+ axes = np.array([[axes]])
235
+ elif n_dims == 2:
236
+ axes = axes.reshape(2, 2)
237
+
238
+ # Initialize pcm variable for colorbar
239
+ pcm = None
240
+
241
+ for i in range(n_dims):
242
+ for j in range(n_dims):
243
+ ax = axes[i, j]
244
+
245
+ if i == j: # Diagonal plots (1D marginal distributions)
246
+ mu = mean_posterior[i]
247
+ sigma = np.sqrt(cov_posterior[i, i])
248
+
249
+ # Create x-axis range
250
+ x = np.linspace(mu - 4 * sigma, mu + 4 * sigma, 200)
251
+ pdf = stats.norm.pdf(x, mu, sigma)
252
+
253
+ # Plot the PDF
254
+ ax.plot(x, pdf, "darkblue", label="Posterior PDF")
255
+ ax.fill_between(x, pdf, color="lightblue", alpha=0.6)
256
+
257
+ # Add true value if provided
258
+ if true_values is not None:
259
+ true_val = true_values[i]
260
+ ax.axvline(true_val, color="black", linestyle="-",
261
+ label=f"True: {true_val:.2f}")
262
+
263
+ ax.set_xlabel(labels[i])
264
+ ax.set_ylabel("Density" if i == 0 else "")
265
+ ax.set_yticklabels([])
266
+
267
+ elif i > j: # Lower triangle: 2D joint distributions
268
+ # Extract 2D mean and covariance
269
+ mean_2d = np.array([mean_posterior[j], mean_posterior[i]])
270
+ cov_2d = np.array([
271
+ [cov_posterior[j, j], cov_posterior[j, i]],
272
+ [cov_posterior[i, j], cov_posterior[i, i]]
273
+ ])
274
+
275
+ # Create 2D grid
276
+ sigma_j = np.sqrt(cov_posterior[j, j])
277
+ sigma_i = np.sqrt(cov_posterior[i, i])
278
+
279
+ x_range = np.linspace(mean_2d[0] - 3.5 * sigma_j,
280
+ mean_2d[0] + 3.5 * sigma_j, 100)
281
+ y_range = np.linspace(mean_2d[1] - 3.5 * sigma_i,
282
+ mean_2d[1] + 3.5 * sigma_i, 100)
283
+
284
+ X, Y = np.meshgrid(x_range, y_range)
285
+ pos = np.dstack((X, Y))
286
+
287
+ # Calculate PDF values
288
+ rv = stats.multivariate_normal(mean_2d, cov_2d)
289
+ Z = rv.pdf(pos)
290
+
291
+ # Create filled contour plot using pcolormesh like the original
292
+ pcm = ax.pcolormesh(
293
+ X, Y, Z, shading="auto", cmap=colormap,
294
+ norm=colors.LogNorm(vmin=Z.min(), vmax=Z.max())
295
+ )
296
+
297
+ # Add contour lines
298
+ ax.contour(X, Y, Z, colors="black", linewidths=0.5, alpha=0.6)
299
+
300
+ # Add 1-sigma contour if requested
301
+ if include_sigma_contours:
302
+ # Calculate 1-sigma level (approximately 39% of peak for 2D Gaussian)
303
+ sigma_level = rv.pdf(mean_2d) * np.exp(-0.5)
304
+ ax.contour(X, Y, Z, levels=[sigma_level], colors="red",
305
+ linewidths=1, linestyles="--", alpha=0.8)
306
+
307
+ # Plot mean point
308
+ ax.plot(mean_posterior[j], mean_posterior[i], "r+",
309
+ markersize=10, mew=2, label="Posterior Mean")
310
+
311
+ # Plot true value if provided
312
+ if true_values is not None:
313
+ ax.plot(true_values[j], true_values[i], "kx",
314
+ markersize=10, mew=2, label="True Value")
315
+
316
+ ax.set_xlabel(labels[j])
317
+ ax.set_ylabel(labels[i])
318
+
319
+ else: # Upper triangle: hide these plots
320
+ ax.axis("off")
321
+
322
+ # Create legend similar to the original
323
+ handles, labels_leg = axes[0, 0].get_legend_handles_labels()
324
+ if n_dims > 1:
325
+ handles2, labels2 = axes[1, 0].get_legend_handles_labels()
326
+ handles.extend(handles2)
327
+ labels_leg.extend(labels2)
328
+
329
+ # Clean up labels by removing values after colons
330
+ cleaned_labels = [label.split(":")[0] for label in labels_leg]
331
+
332
+ fig.legend(
333
+ handles, cleaned_labels,
334
+ loc="upper right",
335
+ bbox_to_anchor=(0.9, 0.95)
336
+ )
337
+
338
+ # Adjust main plot layout to make room on the right for the colorbar
339
+ plt.tight_layout(rect=[0, 0, 0.88, 0.96])
340
+
341
+ # Add a colorbar if we have 2D plots
342
+ if n_dims > 1 and pcm is not None:
343
+ cbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
344
+ cbar = fig.colorbar(pcm, cax=cbar_ax)
345
+ cbar.set_label("Probability Density", size=12)
346
+
347
+ if show_plot:
348
+ plt.show()
349
+
350
+ return fig, axes
@@ -158,11 +158,11 @@ class CircleHelper:
158
158
  """
159
159
  return np.fromiter((f(theta) for theta in self.angles()), float)
160
160
 
161
- def to_coefficient(self, u: np.ndarray) -> np.ndarray:
161
+ def to_coefficients(self, u: np.ndarray) -> np.ndarray:
162
162
  """Maps a function vector to its complex Fourier coefficients."""
163
163
  return rfft(u) * self.fft_factor
164
164
 
165
- def from_coefficient(self, coeff: np.ndarray) -> np.ndarray:
165
+ def from_coefficients(self, coeff: np.ndarray) -> np.ndarray:
166
166
  """Maps complex Fourier coefficients to a function vector."""
167
167
  return irfft(coeff, n=2 * self.kmax) * self._inverse_fft_factor
168
168
 
@@ -235,7 +235,7 @@ class CircleHelper:
235
235
  # a minimal, non-redundant representation.
236
236
  return np.concatenate((coeff.real, coeff.imag[1 : self.kmax]))
237
237
 
238
- def _component_to_coefficient(self, c: np.ndarray) -> np.ndarray:
238
+ def _component_to_coefficients(self, c: np.ndarray) -> np.ndarray:
239
239
  """Unpacks a real component vector into complex Fourier coefficients."""
240
240
  # This is the inverse of `_coefficient_to_component`. It reconstructs
241
241
  # the full complex coefficient array that irfft expects. We re-insert
@@ -290,26 +290,26 @@ class Lebesgue(CircleHelper, HilbertModule, AbstractInvariantLebesgueSpace):
290
290
 
291
291
  def to_components(self, u: np.ndarray) -> np.ndarray:
292
292
  """Converts a function vector to its real component representation."""
293
- coeff = self.to_coefficient(u)
293
+ coeff = self.to_coefficients(u)
294
294
  return self._coefficient_to_component(coeff)
295
295
 
296
296
  def from_components(self, c: np.ndarray) -> np.ndarray:
297
297
  """Converts a real component vector back to a function vector."""
298
- coeff = self._component_to_coefficient(c)
299
- return self.from_coefficient(coeff)
298
+ coeff = self._component_to_coefficients(c)
299
+ return self.from_coefficients(coeff)
300
300
 
301
301
  def to_dual(self, u: np.ndarray) -> "LinearForm":
302
302
  """Maps a vector `u` to its dual representation `u*`."""
303
- coeff = self.to_coefficient(u)
303
+ coeff = self.to_coefficients(u)
304
304
  cp = self._coefficient_to_component(self._metric @ coeff)
305
305
  return self.dual.from_components(cp)
306
306
 
307
307
  def from_dual(self, up: "LinearForm") -> np.ndarray:
308
308
  """Maps a dual vector `u*` back to its primal representation `u`."""
309
309
  cp = self.dual.to_components(up)
310
- dual_coeff = self._component_to_coefficient(cp)
310
+ dual_coeff = self._component_to_coefficients(cp)
311
311
  primal_coeff = self._inverse_metric @ dual_coeff
312
- return self.from_coefficient(primal_coeff)
312
+ return self.from_coefficients(primal_coeff)
313
313
 
314
314
  def vector_multiply(self, x1: np.ndarray, x2: np.ndarray) -> np.ndarray:
315
315
  """
@@ -366,9 +366,9 @@ class Lebesgue(CircleHelper, HilbertModule, AbstractInvariantLebesgueSpace):
366
366
  matrix = diags([values], [0])
367
367
 
368
368
  def mapping(u):
369
- coeff = self.to_coefficient(u)
369
+ coeff = self.to_coefficients(u)
370
370
  coeff = matrix @ coeff
371
- return self.from_coefficient(coeff)
371
+ return self.from_coefficients(coeff)
372
372
 
373
373
  return LinearOperator.self_adjoint(self, mapping)
374
374
 
@@ -468,6 +468,31 @@ class Sobolev(
468
468
 
469
469
  return Sobolev(k, order, scale, radius=radius)
470
470
 
471
+ @property
472
+ def derivative_operator(self) -> LinearOperator:
473
+ """
474
+ Returns the derivative operator from the space to one with a lower order.
475
+ """
476
+
477
+ codomain = Sobolev(self.kmax, self.order - 1, self.scale, radius=self.radius)
478
+
479
+ lebesgue_space = self.underlying_space
480
+ k = np.arange(self.kmax + 1)
481
+
482
+ def mapping(u):
483
+ coeff = lebesgue_space.to_coefficients(u)
484
+ diff_coeff = 1j * k * coeff
485
+ return lebesgue_space.from_coefficients(diff_coeff)
486
+
487
+ op_L2 = LinearOperator(
488
+ lebesgue_space,
489
+ lebesgue_space,
490
+ mapping,
491
+ adjoint_mapping=lambda u: -1 * mapping(u),
492
+ )
493
+
494
+ return LinearOperator.from_formal_adjoint(self, codomain, op_L2)
495
+
471
496
  def __eq__(self, other: object) -> bool:
472
497
  """
473
498
  Checks for mathematical equality with another Sobolev space on a circle.
@@ -208,11 +208,11 @@ class SphereHelper:
208
208
 
209
209
  return u
210
210
 
211
- def to_coefficient(self, u: sh.SHGrid) -> sh.SHCoeffs:
211
+ def to_coefficients(self, u: sh.SHGrid) -> sh.SHCoeffs:
212
212
  """Maps a function vector to its spherical harmonic coefficients."""
213
213
  return u.expand(normalization=self.normalization, csphase=self.csphase)
214
214
 
215
- def from_coefficient(self, ulm: sh.SHCoeffs) -> sh.SHGrid:
215
+ def from_coefficients(self, ulm: sh.SHCoeffs) -> sh.SHGrid:
216
216
  """Maps spherical harmonic coefficients to a function vector."""
217
217
  grid = self.grid if self._sampling == 1 else "DH2"
218
218
  return ulm.expand(grid=grid, extend=self.extend)
@@ -393,7 +393,7 @@ class SphereHelper:
393
393
  flat_coeffs = ulm.coeffs.flatten(order="C")
394
394
  return self._sparse_coeffs_to_component @ flat_coeffs
395
395
 
396
- def _component_to_coefficient(self, c: np.ndarray) -> sh.SHCoeffs:
396
+ def _component_to_coefficients(self, c: np.ndarray) -> sh.SHCoeffs:
397
397
  """Maps a component vector to spherical harmonic coefficients."""
398
398
  flat_coeffs = self._sparse_coeffs_to_component.T @ c
399
399
  coeffs = flat_coeffs.reshape((2, self.lmax + 1, self.lmax + 1))
@@ -435,22 +435,22 @@ class Lebesgue(SphereHelper, HilbertModule, AbstractInvariantLebesgueSpace):
435
435
  return self._dim
436
436
 
437
437
  def to_components(self, u: sh.SHGrid) -> np.ndarray:
438
- coeff = self.to_coefficient(u)
438
+ coeff = self.to_coefficients(u)
439
439
  return self._coefficient_to_component(coeff)
440
440
 
441
441
  def from_components(self, c: np.ndarray) -> sh.SHGrid:
442
- coeff = self._component_to_coefficient(c)
443
- return self.from_coefficient(coeff)
442
+ coeff = self._component_to_coefficients(c)
443
+ return self.from_coefficients(coeff)
444
444
 
445
445
  def to_dual(self, u: sh.SHGrid) -> LinearForm:
446
- coeff = self.to_coefficient(u)
446
+ coeff = self.to_coefficients(u)
447
447
  cp = self._coefficient_to_component(coeff) * self.radius**2
448
448
  return self.dual.from_components(cp)
449
449
 
450
450
  def from_dual(self, up: LinearForm) -> sh.SHGrid:
451
451
  cp = self.dual.to_components(up) / self.radius**2
452
- coeff = self._component_to_coefficient(cp)
453
- return self.from_coefficient(coeff)
452
+ coeff = self._component_to_coefficients(cp)
453
+ return self.from_coefficients(coeff)
454
454
 
455
455
  def ax(self, a: float, x: sh.SHGrid) -> None:
456
456
  """
@@ -513,8 +513,8 @@ class Lebesgue(SphereHelper, HilbertModule, AbstractInvariantLebesgueSpace):
513
513
 
514
514
  def mapping(u):
515
515
  c = matrix @ (self.to_components(u))
516
- coeff = self._component_to_coefficient(c)
517
- return self.from_coefficient(coeff)
516
+ coeff = self._component_to_coefficients(c)
517
+ return self.from_coefficients(coeff)
518
518
 
519
519
  return LinearOperator.self_adjoint(self, mapping)
520
520
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pygeoinf
3
- Version: 1.3.0
3
+ Version: 1.3.2
4
4
  Summary: A package for solving geophysical inference and inverse problems
5
5
  License: BSD-3-Clause
6
6
  License-File: LICENSE
@@ -1,12 +1,13 @@
1
1
  pygeoinf/__init__.py,sha256=vAoI6Kw2EL5koHn0EP0kbLvhtWV9gxA439PowiqkQHU,3246
2
+ pygeoinf/auxiliary.py,sha256=lfoTt9ZH4y8SAV8dKZi5EWx1oF_JtxtBMSmlFYqJYfE,1610
2
3
  pygeoinf/backus_gilbert.py,sha256=eFi4blSwOCsg_NuH6WD4gcgjvzvu5g5WpWahGobSBdM,3694
3
- pygeoinf/checks/hilbert_space.py,sha256=Kr7PcOGrNIISezty0FBj5uXavIHC91yjCp2FVGNlHeE,7931
4
- pygeoinf/checks/linear_operators.py,sha256=RkmtAW6e5Zr6EuhX6GAt_pI0IWu2WZ-CrfjSBN_7dsU,4664
5
- pygeoinf/checks/nonlinear_operators.py,sha256=vB12HDX9YHJ8nNSVxG9BWzsVYTg12L5rrYbW98lPYxE,5560
4
+ pygeoinf/checks/hilbert_space.py,sha256=07AZ6fx44PgSPjo_bjRJlVWTta1k1hhIX0TTTwMRdm8,8665
5
+ pygeoinf/checks/linear_operators.py,sha256=LjC7X3RRimsyoLu062RNdhj1KEau3CBhBTZ4m3ZRmjI,7042
6
+ pygeoinf/checks/nonlinear_operators.py,sha256=CoINs_Pm0lzo8nR3H70bo8Osvauiy03CA-b99MnCPjw,7532
6
7
  pygeoinf/direct_sum.py,sha256=7V0qrwFGj0GN-p_zzffefPrIB0dPu5dshLTxem1mQGE,19274
7
8
  pygeoinf/forward_problem.py,sha256=NnqWp7iMfkhHa9d-jBHzYHClaAfhKmO5D058AcJLLYg,10724
8
- pygeoinf/gaussian_measure.py,sha256=FezItV3QFHtVvAc_-Butr2lA4PTWjcZeKYb4I3vN7d4,23816
9
- pygeoinf/hilbert_space.py,sha256=0NCCG-OOHysdXYEFUs1wtJhGgOnuKvjZCZg8NJZO-DA,25331
9
+ pygeoinf/gaussian_measure.py,sha256=bBh64xHgmLFl27krn9hkf8qDQjop_39x69cyhJgUHN8,26219
10
+ pygeoinf/hilbert_space.py,sha256=rKF8upjHw7uw3qBKS-bsEo1J9-So0urTcmVsmJJQeog,25875
10
11
  pygeoinf/inversion.py,sha256=RV0hG2bGnciWdja0oOPKPxnFhYzufqdj-mKYNr4JJ_o,6447
11
12
  pygeoinf/linear_bayesian.py,sha256=L1cJkeHtba4fPXZ8CmiLRBtuG2fmzG228M_iEar-iP8,9643
12
13
  pygeoinf/linear_forms.py,sha256=mgZeDRegNKo8kviE68KrxkHR4gG9bf1RgsJz1MtDMCk,9181
@@ -17,12 +18,13 @@ pygeoinf/nonlinear_forms.py,sha256=t7lk-Bha7Xdk9eiwXMmS0F47oTR6jW6qQ3HkgRGk54A,7
17
18
  pygeoinf/nonlinear_operators.py,sha256=AtkDTQfGDzAnfFDIgiKfdk7uPEI-j_ZA3CNvY5A3U8w,7144
18
19
  pygeoinf/nonlinear_optimisation.py,sha256=skK1ikn9GrVYherD64Qt9WrEYHA2NAJ48msOu_J8Oig,7431
19
20
  pygeoinf/parallel.py,sha256=VVFvNHszy4wSa9LuErIsch4NAkLaZezhdN9YpRROBJo,2267
21
+ pygeoinf/plot.py,sha256=zXY0o5MvgJUiwy3uEwTOwVdKT_5XRBKx_-9qmyalB3A,13654
20
22
  pygeoinf/random_matrix.py,sha256=71l6eAXQ2pRMleaz1lXud6O1F78ugKyp3vHcRBXhdwM,17661
21
23
  pygeoinf/symmetric_space/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- pygeoinf/symmetric_space/circle.py,sha256=7Bz9BfSkbDnoz5-HFwTsAQE4a09jUapBePwoCK0xYWw,18007
23
- pygeoinf/symmetric_space/sphere.py,sha256=LN5HJsPtj7thIHAElbbtAS9F4t6HKmXEjwMUXQoGLPQ,23365
24
+ pygeoinf/symmetric_space/circle.py,sha256=GuwVmLdHGTMxMrZfyXIPP3pz_y971ntlD5pl42lKJZ0,18796
25
+ pygeoinf/symmetric_space/sphere.py,sha256=SYeGa70fKzasXxwPoVk3tNBtlP0QwLQe5jwW7o3AmcU,23376
24
26
  pygeoinf/symmetric_space/symmetric_space.py,sha256=pEIZZYWsdegrYCwUs3bo86JTz3d2LsXFWdRYFa0syFs,17963
25
- pygeoinf-1.3.0.dist-info/METADATA,sha256=UFX827ttCmk0fTTiXe4XsKx8xRqeaud3AegP6eBtE9k,16365
26
- pygeoinf-1.3.0.dist-info/WHEEL,sha256=M5asmiAlL6HEcOq52Yi5mmk9KmTVjY2RDPtO4p9DMrc,88
27
- pygeoinf-1.3.0.dist-info/licenses/LICENSE,sha256=GrTQnKJemVi69FSbHprq60KN0OJGsOSR-joQoTq-oD8,1501
28
- pygeoinf-1.3.0.dist-info/RECORD,,
27
+ pygeoinf-1.3.2.dist-info/METADATA,sha256=VCh-zC2vcghWe8Ja34MCFu7xj7qZgsZmpBPqCXQhwOU,16365
28
+ pygeoinf-1.3.2.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
29
+ pygeoinf-1.3.2.dist-info/licenses/LICENSE,sha256=GrTQnKJemVi69FSbHprq60KN0OJGsOSR-joQoTq-oD8,1501
30
+ pygeoinf-1.3.2.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.2.0
2
+ Generator: poetry-core 2.2.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any