pygeoinf 1.3.0__tar.gz → 1.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/PKG-INFO +1 -1
  2. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/checks/hilbert_space.py +18 -0
  3. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/gaussian_measure.py +64 -8
  4. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/hilbert_space.py +35 -18
  5. pygeoinf-1.3.1/pygeoinf/plot.py +350 -0
  6. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/symmetric_space/circle.py +36 -11
  7. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/symmetric_space/sphere.py +11 -11
  8. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pyproject.toml +1 -1
  9. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/LICENSE +0 -0
  10. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/README.md +0 -0
  11. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/__init__.py +0 -0
  12. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/backus_gilbert.py +0 -0
  13. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/checks/linear_operators.py +0 -0
  14. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/checks/nonlinear_operators.py +0 -0
  15. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/direct_sum.py +0 -0
  16. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/forward_problem.py +0 -0
  17. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/inversion.py +0 -0
  18. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/linear_bayesian.py +0 -0
  19. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/linear_forms.py +0 -0
  20. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/linear_operators.py +0 -0
  21. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/linear_optimisation.py +0 -0
  22. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/linear_solvers.py +0 -0
  23. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/nonlinear_forms.py +0 -0
  24. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/nonlinear_operators.py +0 -0
  25. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/nonlinear_optimisation.py +0 -0
  26. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/parallel.py +0 -0
  27. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/random_matrix.py +0 -0
  28. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/symmetric_space/__init__.py +0 -0
  29. {pygeoinf-1.3.0 → pygeoinf-1.3.1}/pygeoinf/symmetric_space/symmetric_space.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pygeoinf
3
- Version: 1.3.0
3
+ Version: 1.3.1
4
4
  Summary: A package for solving geophysical inference and inverse problems
5
5
  License: BSD-3-Clause
6
6
  License-File: LICENSE
@@ -46,6 +46,23 @@ class HilbertSpaceAxiomChecks:
46
46
  if not norm_sum <= self.norm(x) + self.norm(y):
47
47
  raise AssertionError("Axiom failed: Triangle inequality")
48
48
 
49
+ def _check_riesz_representation(self, x, y):
50
+ """
51
+ Checks that the inner product is consistent with the Riesz map (to_dual).
52
+ This ensures that <x, y> == (R(x))(y).
53
+ """
54
+ # Value from the (potentially optimized) direct inner product method
55
+ direct_inner_product = self.inner_product(x, y)
56
+
57
+ # Value from the Riesz map definition
58
+ dual_x = self.to_dual(x)
59
+ riesz_inner_product = self.duality_product(dual_x, y)
60
+
61
+ if not np.isclose(direct_inner_product, riesz_inner_product):
62
+ raise AssertionError(
63
+ "Axiom failed: Inner product is not consistent with the Riesz map."
64
+ )
65
+
49
66
  def _check_mapping_identities(self, x):
50
67
  """Checks that component and dual mappings are self-consistent."""
51
68
  # from_components(to_components(x)) == x
@@ -176,6 +193,7 @@ class HilbertSpaceAxiomChecks:
176
193
  # Run all checks
177
194
  self._check_vector_space_axioms(x, y, a)
178
195
  self._check_inner_product_axioms(x, y, z, a, b)
196
+ # self._check_riesz_representation(x, y)
179
197
  self._check_mapping_identities(x)
180
198
  self._check_inplace_operations(x, y, a)
181
199
  self._check_copy(x)
@@ -21,6 +21,7 @@ Key Features
21
21
 
22
22
  from __future__ import annotations
23
23
  from typing import Callable, Optional, Any, List, TYPE_CHECKING
24
+ import warnings
24
25
 
25
26
  import numpy as np
26
27
  from scipy.linalg import eigh
@@ -197,6 +198,7 @@ class GaussianMeasure:
197
198
  /,
198
199
  *,
199
200
  expectation: Vector = None,
201
+ rtol: float = 1e-10,
200
202
  ) -> GaussianMeasure:
201
203
  """
202
204
  Creates a Gaussian measure from a dense covariance matrix.
@@ -205,15 +207,36 @@ class GaussianMeasure:
205
207
  the covariance operator. This method computes a Cholesky-like
206
208
  decomposition of the matrix to create a `covariance_factor`.
207
209
 
210
+ It includes a check to handle numerical precision issues, allowing for
211
+ eigenvalues that are slightly negative within a relative tolerance.
212
+
208
213
  Args:
209
214
  domain: The Hilbert space the measure is defined on.
210
215
  covariance_matrix: The dense covariance matrix.
211
216
  expectation: The expectation (mean) of the measure.
217
+ rtol: The relative tolerance used to check for negative eigenvalues.
212
218
  """
213
219
 
214
220
  eigenvalues, U = eigh(covariance_matrix)
215
- if any(val < 0 for val in eigenvalues):
216
- raise ValueError("Covariance matrix is not non-negative")
221
+
222
+ if np.any(eigenvalues < 0):
223
+ max_eig = np.max(np.abs(eigenvalues))
224
+ min_eig = np.min(eigenvalues)
225
+
226
+ # Check if the most negative eigenvalue is outside the tolerance
227
+ if min_eig < -rtol * max_eig:
228
+ raise ValueError(
229
+ "Covariance matrix has significantly negative eigenvalues, "
230
+ "indicating it is not positive semi-definite."
231
+ )
232
+ else:
233
+ # If negative eigenvalues are within tolerance, warn and correct
234
+ warnings.warn(
235
+ "Covariance matrix has small negative eigenvalues due to "
236
+ "numerical error. Clipping them to zero.",
237
+ UserWarning,
238
+ )
239
+ eigenvalues[eigenvalues < 0] = 0
217
240
 
218
241
  values = np.sqrt(eigenvalues)
219
242
  D = diags([values], [0])
@@ -466,24 +489,57 @@ class GaussianMeasure:
466
489
  sample=new_sample if self.sample_set else None,
467
490
  )
468
491
 
469
- def as_multivariate_normal(self) -> multivariate_normal:
492
+ def as_multivariate_normal(
493
+ self, /, *, parallel: bool = False, n_jobs: int = -1
494
+ ) -> multivariate_normal:
470
495
  """
471
496
  Returns the measure as a `scipy.stats.multivariate_normal` object.
472
497
 
473
498
  This is only possible if the measure is defined on a EuclideanSpace.
474
- """
475
499
 
500
+ If the covariance matrix has small negative eigenvalues due to numerical
501
+ precision issues, this method attempts to correct them by setting them
502
+ to zero.
503
+
504
+ Args:
505
+ parallel (bool, optional): If `True`, computes the dense covariance
506
+ matrix in parallel. Defaults to `False`.
507
+ n_jobs (int, optional): The number of parallel jobs to use. `-1`
508
+ uses all available cores. Defaults to -1.
509
+ """
476
510
  if not isinstance(self.domain, EuclideanSpace):
477
511
  raise NotImplementedError(
478
512
  "Method only defined for measures on Euclidean space."
479
513
  )
480
514
 
481
- return multivariate_normal(
482
- mean=self.expectation,
483
- cov=self.covariance.matrix(dense=True),
484
- allow_singular=True,
515
+ mean_vector = self.expectation
516
+
517
+ # Pass the parallelization arguments directly to the matrix creation method
518
+ cov_matrix = self.covariance.matrix(
519
+ dense=True, parallel=parallel, n_jobs=n_jobs
485
520
  )
486
521
 
522
+ try:
523
+ # First, try to create the distribution directly.
524
+ return multivariate_normal(
525
+ mean=mean_vector, cov=cov_matrix, allow_singular=True
526
+ )
527
+ except ValueError:
528
+ # If it fails, clean the covariance matrix and try again.
529
+ warnings.warn(
530
+ "Covariance matrix is not positive semi-definite due to "
531
+ "numerical errors. Setting negative eigenvalues to zero.",
532
+ UserWarning,
533
+ )
534
+
535
+ eigenvalues, eigenvectors = eigh(cov_matrix)
536
+ eigenvalues[eigenvalues < 0] = 0
537
+ cleaned_cov = eigenvectors @ diags(eigenvalues) @ eigenvectors.T
538
+
539
+ return multivariate_normal(
540
+ mean=mean_vector, cov=cleaned_cov, allow_singular=True
541
+ )
542
+
487
543
  def low_rank_approximation(
488
544
  self,
489
545
  size_estimate: int,
@@ -166,6 +166,22 @@ class HilbertSpace(ABC, HilbertSpaceAxiomChecks):
166
166
  """
167
167
  return isinstance(x, type(self.zero))
168
168
 
169
+ def inner_product(self, x1: Vector, x2: Vector) -> float:
170
+ """
171
+ Computes the inner product of two vectors, `(x1, x2)`.
172
+
173
+ This is defined via the duality product as `<R(x1), x2>`, where `R` is
174
+ the Riesz map (`to_dual`).
175
+
176
+ Args:
177
+ x1: The first vector.
178
+ x2: The second vector.
179
+
180
+ Returns:
181
+ The inner product as a float.
182
+ """
183
+ return self.duality_product(self.to_dual(x1), x2)
184
+
169
185
  def duality_product(self, xp: LinearForm, x: Vector) -> float:
170
186
  """
171
187
  Computes the duality product <xp, x>.
@@ -293,23 +309,6 @@ class HilbertSpace(ABC, HilbertSpaceAxiomChecks):
293
309
 
294
310
  return LinearOperator.self_dual(self, self.to_dual)
295
311
 
296
- @final
297
- def inner_product(self, x1: Vector, x2: Vector) -> float:
298
- """
299
- Computes the inner product of two vectors, `(x1, x2)`.
300
-
301
- This is defined via the duality product as `<R(x1), x2>`, where `R` is
302
- the Riesz map (`to_dual`).
303
-
304
- Args:
305
- x1: The first vector.
306
- x2: The second vector.
307
-
308
- Returns:
309
- The inner product as a float.
310
- """
311
- return self.duality_product(self.to_dual(x1), x2)
312
-
313
312
  @final
314
313
  def squared_norm(self, x: Vector) -> float:
315
314
  """
@@ -588,6 +587,15 @@ class EuclideanSpace(HilbertSpace):
588
587
  """Maps a `LinearForm` back to a vector via its components."""
589
588
  return self.dual.to_components(xp)
590
589
 
590
+ def inner_product(self, x1: np.ndarray, x2: np.ndarray) -> float:
591
+ """
592
+ Computes the inner product of two vectors.
593
+
594
+ Notes:
595
+ Default implementation overrident for efficiency.
596
+ """
597
+ return np.dot(x1, x2)
598
+
591
599
  def __eq__(self, other: object):
592
600
  if not isinstance(other, EuclideanSpace):
593
601
  return NotImplemented
@@ -597,7 +605,7 @@ class EuclideanSpace(HilbertSpace):
597
605
  """
598
606
  Checks if an object is a valid element of the space.
599
607
  """
600
- return isinstance(x, np.ndarray) and len(x) == self.dim
608
+ return isinstance(x, np.ndarray) and x.shape == (self.dim,)
601
609
 
602
610
 
603
611
  class MassWeightedHilbertSpace(HilbertSpace):
@@ -678,6 +686,15 @@ class MassWeightedHilbertSpace(HilbertSpace):
678
686
  x = self.underlying_space.from_dual(xp)
679
687
  return self._inverse_mass_operator(x)
680
688
 
689
+ def inner_product(self, x1: Vector, x2: Vector) -> float:
690
+ """
691
+ Computes the inner product of two vectors.
692
+
693
+ Notes:
694
+ Default implementation overrident for efficiency.
695
+ """
696
+ return self._underlying_space.inner_product(self._mass_operator(x1), x2)
697
+
681
698
  def __eq__(self, other: object) -> bool:
682
699
  """
683
700
  Checks for equality with another MassWeightedHilbertSpace.
@@ -0,0 +1,350 @@
1
+ import matplotlib.pyplot as plt
2
+ import matplotlib.colors as colors
3
+ import numpy as np
4
+ import scipy.stats as stats
5
+ from typing import Union, List, Optional
6
+
7
+ def plot_1d_distributions(
8
+ posterior_measures: Union[object, List[object]],
9
+ prior_measures: Optional[Union[object, List[object]]] = None,
10
+ true_value: Optional[float] = None,
11
+ xlabel: str = "Property Value",
12
+ title: str = "Prior and Posterior Probability Distributions",
13
+ figsize: tuple = (12, 7),
14
+ show_plot: bool = True
15
+ ):
16
+ """
17
+ Plot 1D probability distributions for prior and posterior measures using dual y-axes.
18
+
19
+ Args:
20
+ posterior_measures: Single measure or list of measures for posterior distributions
21
+ prior_measures: Single measure or list of measures for prior distributions (optional)
22
+ true_value: True value to mark with a vertical line (optional)
23
+ xlabel: Label for x-axis
24
+ title: Title for the plot
25
+ figsize: Figure size tuple
26
+ show_plot: Whether to display the plot
27
+
28
+ Returns:
29
+ fig, (ax1, ax2): Figure and axes objects
30
+ """
31
+
32
+ # Convert single measures to lists for uniform handling
33
+ if not isinstance(posterior_measures, list):
34
+ posterior_measures = [posterior_measures]
35
+
36
+ if prior_measures is not None and not isinstance(prior_measures, list):
37
+ prior_measures = [prior_measures]
38
+
39
+ # Define color sequences
40
+ prior_colors = ['green', 'orange', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan']
41
+ posterior_colors = ['blue', 'red', 'darkgreen', 'orange', 'purple', 'brown', 'pink', 'gray']
42
+
43
+ # Calculate statistics for all distributions
44
+ posterior_stats = []
45
+ for measure in posterior_measures:
46
+ if hasattr(measure, 'expectation') and hasattr(measure, 'covariance'):
47
+ # For pygeoinf measures
48
+ mean = measure.expectation[0]
49
+ var = measure.covariance.matrix(dense=True)[0, 0]
50
+ std = np.sqrt(var)
51
+ else:
52
+ # For scipy distributions
53
+ mean = measure.mean[0]
54
+ std = np.sqrt(measure.cov[0, 0])
55
+ posterior_stats.append((mean, std))
56
+
57
+ prior_stats = []
58
+ if prior_measures is not None:
59
+ for measure in prior_measures:
60
+ if hasattr(measure, 'expectation') and hasattr(measure, 'covariance'):
61
+ # For pygeoinf measures
62
+ mean = measure.expectation[0]
63
+ var = measure.covariance.matrix(dense=True)[0, 0]
64
+ std = np.sqrt(var)
65
+ else:
66
+ # For scipy distributions
67
+ mean = measure.mean[0]
68
+ std = np.sqrt(measure.cov[0, 0])
69
+ prior_stats.append((mean, std))
70
+
71
+ # Determine plot range to include all distributions
72
+ all_means = [stat[0] for stat in posterior_stats]
73
+ all_stds = [stat[1] for stat in posterior_stats]
74
+
75
+ if prior_measures is not None:
76
+ all_means.extend([stat[0] for stat in prior_stats])
77
+ all_stds.extend([stat[1] for stat in prior_stats])
78
+
79
+ if true_value is not None:
80
+ all_means.append(true_value)
81
+ all_stds.append(0) # No std for true value
82
+
83
+ # Calculate x-axis range (6 sigma coverage)
84
+ x_min = min([mean - 6 * std for mean, std in zip(all_means, all_stds) if std > 0])
85
+ x_max = max([mean + 6 * std for mean, std in zip(all_means, all_stds) if std > 0])
86
+
87
+ # Add some padding around true value if needed
88
+ if true_value is not None:
89
+ range_size = x_max - x_min
90
+ x_min = min(x_min, true_value - 0.1 * range_size)
91
+ x_max = max(x_max, true_value + 0.1 * range_size)
92
+
93
+ x_axis = np.linspace(x_min, x_max, 1000)
94
+
95
+ # Create the plot with two y-axes
96
+ fig, ax1 = plt.subplots(figsize=figsize)
97
+
98
+ # Plot priors on the first axis (left y-axis) if provided
99
+ if prior_measures is not None:
100
+ color1 = prior_colors[0] if len(prior_measures) > 0 else 'green'
101
+ ax1.set_xlabel(xlabel)
102
+ ax1.set_ylabel('Prior Probability Density', color=color1)
103
+
104
+ for i, (measure, (mean, std)) in enumerate(zip(prior_measures, prior_stats)):
105
+ color = prior_colors[i % len(prior_colors)]
106
+
107
+ # Calculate PDF values using scipy.stats
108
+ pdf_values = stats.norm.pdf(x_axis, loc=mean, scale=std)
109
+
110
+ # Determine label
111
+ if len(prior_measures) == 1:
112
+ label = f'Prior PDF (Mean: {mean:.5f})'
113
+ else:
114
+ label = f'Prior {i+1} (Mean: {mean:.5f})'
115
+
116
+ ax1.plot(x_axis, pdf_values, color=color, lw=2, linestyle=':', label=label)
117
+ ax1.fill_between(x_axis, pdf_values, color=color, alpha=0.15)
118
+
119
+ ax1.tick_params(axis='y', labelcolor=color1)
120
+ ax1.grid(True, linestyle='--')
121
+ else:
122
+ # If no priors, use the left axis for posteriors
123
+ ax1.set_xlabel(xlabel)
124
+ ax1.set_ylabel('Probability Density')
125
+ ax1.grid(True, linestyle='--')
126
+
127
+ # Create second y-axis for posteriors (or use first if no priors)
128
+ if prior_measures is not None:
129
+ ax2 = ax1.twinx()
130
+ color2 = posterior_colors[0] if len(posterior_measures) > 0 else 'blue'
131
+ ax2.set_ylabel('Posterior Probability Density', color=color2)
132
+ ax2.tick_params(axis='y', labelcolor=color2)
133
+ ax2.grid(False)
134
+ plot_ax = ax2
135
+ else:
136
+ plot_ax = ax1
137
+ color2 = posterior_colors[0] if len(posterior_measures) > 0 else 'blue'
138
+
139
+ # Plot posteriors
140
+ for i, (measure, (mean, std)) in enumerate(zip(posterior_measures, posterior_stats)):
141
+ color = posterior_colors[i % len(posterior_colors)]
142
+
143
+ # Calculate PDF values using scipy.stats
144
+ pdf_values = stats.norm.pdf(x_axis, loc=mean, scale=std)
145
+
146
+ # Determine label
147
+ if len(posterior_measures) == 1:
148
+ label = f'Posterior PDF (Mean: {mean:.5f})'
149
+ else:
150
+ label = f'Posterior {i+1} (Mean: {mean:.5f})'
151
+
152
+ plot_ax.plot(x_axis, pdf_values, color=color, lw=2, label=label)
153
+ plot_ax.fill_between(x_axis, pdf_values, color=color, alpha=0.2)
154
+
155
+ # Plot true value if provided
156
+ if true_value is not None:
157
+ ax1.axvline(true_value, color='black', linestyle='-', lw=2,
158
+ label=f'True Value: {true_value:.5f}')
159
+
160
+ # Create combined legend
161
+ handles1, labels1 = ax1.get_legend_handles_labels()
162
+
163
+ if prior_measures is not None:
164
+ handles2, labels2 = ax2.get_legend_handles_labels()
165
+ all_handles = handles1 + handles2
166
+ all_labels = [h.get_label() for h in all_handles]
167
+ else:
168
+ all_handles = handles1
169
+ all_labels = [h.get_label() for h in all_handles]
170
+
171
+ fig.legend(all_handles, all_labels, loc='upper right', bbox_to_anchor=(0.9, 0.9))
172
+ fig.suptitle(title, fontsize=16)
173
+ fig.tight_layout(rect=[0, 0, 1, 0.96])
174
+
175
+ if show_plot:
176
+ plt.show()
177
+
178
+ if prior_measures is not None:
179
+ return fig, (ax1, ax2)
180
+ else:
181
+ return fig, ax1
182
+
183
+
184
+ def plot_corner_distributions(
185
+ posterior_measure: object,
186
+ true_values: Optional[Union[List[float], np.ndarray]] = None,
187
+ labels: Optional[List[str]] = None,
188
+ title: str = "Joint Posterior Distribution",
189
+ figsize: Optional[tuple] = None,
190
+ show_plot: bool = True,
191
+ include_sigma_contours: bool = True,
192
+ colormap: str = "Blues"
193
+ ):
194
+ """
195
+ Create a corner plot for multi-dimensional posterior distributions.
196
+
197
+ Args:
198
+ posterior_measure: Multi-dimensional posterior measure (pygeoinf object)
199
+ true_values: True values for each dimension (optional)
200
+ labels: Labels for each dimension (optional)
201
+ title: Title for the plot
202
+ figsize: Figure size tuple (if None, calculated based on dimensions)
203
+ show_plot: Whether to display the plot
204
+ include_sigma_contours: Whether to include 1-sigma contour lines
205
+ colormap: Colormap for 2D plots
206
+
207
+ Returns:
208
+ fig, axes: Figure and axes array
209
+ """
210
+
211
+ # Extract statistics from the measure
212
+ if hasattr(posterior_measure, 'expectation') and hasattr(posterior_measure, 'covariance'):
213
+ mean_posterior = posterior_measure.expectation
214
+ cov_posterior = posterior_measure.covariance.matrix(dense=True, parallel=True)
215
+ else:
216
+ raise ValueError("posterior_measure must have 'expectation' and 'covariance' attributes")
217
+
218
+ n_dims = len(mean_posterior)
219
+
220
+ # Set default labels if not provided
221
+ if labels is None:
222
+ labels = [f"Dimension {i+1}" for i in range(n_dims)]
223
+
224
+ # Set figure size based on dimensions if not provided
225
+ if figsize is None:
226
+ figsize = (3 * n_dims, 3 * n_dims)
227
+
228
+ # Create subplots
229
+ fig, axes = plt.subplots(n_dims, n_dims, figsize=figsize)
230
+ fig.suptitle(title, fontsize=16)
231
+
232
+ # Ensure axes is always 2D array
233
+ if n_dims == 1:
234
+ axes = np.array([[axes]])
235
+ elif n_dims == 2:
236
+ axes = axes.reshape(2, 2)
237
+
238
+ # Initialize pcm variable for colorbar
239
+ pcm = None
240
+
241
+ for i in range(n_dims):
242
+ for j in range(n_dims):
243
+ ax = axes[i, j]
244
+
245
+ if i == j: # Diagonal plots (1D marginal distributions)
246
+ mu = mean_posterior[i]
247
+ sigma = np.sqrt(cov_posterior[i, i])
248
+
249
+ # Create x-axis range
250
+ x = np.linspace(mu - 4 * sigma, mu + 4 * sigma, 200)
251
+ pdf = stats.norm.pdf(x, mu, sigma)
252
+
253
+ # Plot the PDF
254
+ ax.plot(x, pdf, "darkblue", label="Posterior PDF")
255
+ ax.fill_between(x, pdf, color="lightblue", alpha=0.6)
256
+
257
+ # Add true value if provided
258
+ if true_values is not None:
259
+ true_val = true_values[i]
260
+ ax.axvline(true_val, color="black", linestyle="-",
261
+ label=f"True: {true_val:.2f}")
262
+
263
+ ax.set_xlabel(labels[i])
264
+ ax.set_ylabel("Density" if i == 0 else "")
265
+ ax.set_yticklabels([])
266
+
267
+ elif i > j: # Lower triangle: 2D joint distributions
268
+ # Extract 2D mean and covariance
269
+ mean_2d = np.array([mean_posterior[j], mean_posterior[i]])
270
+ cov_2d = np.array([
271
+ [cov_posterior[j, j], cov_posterior[j, i]],
272
+ [cov_posterior[i, j], cov_posterior[i, i]]
273
+ ])
274
+
275
+ # Create 2D grid
276
+ sigma_j = np.sqrt(cov_posterior[j, j])
277
+ sigma_i = np.sqrt(cov_posterior[i, i])
278
+
279
+ x_range = np.linspace(mean_2d[0] - 3.5 * sigma_j,
280
+ mean_2d[0] + 3.5 * sigma_j, 100)
281
+ y_range = np.linspace(mean_2d[1] - 3.5 * sigma_i,
282
+ mean_2d[1] + 3.5 * sigma_i, 100)
283
+
284
+ X, Y = np.meshgrid(x_range, y_range)
285
+ pos = np.dstack((X, Y))
286
+
287
+ # Calculate PDF values
288
+ rv = stats.multivariate_normal(mean_2d, cov_2d)
289
+ Z = rv.pdf(pos)
290
+
291
+ # Create filled contour plot using pcolormesh like the original
292
+ pcm = ax.pcolormesh(
293
+ X, Y, Z, shading="auto", cmap=colormap,
294
+ norm=colors.LogNorm(vmin=Z.min(), vmax=Z.max())
295
+ )
296
+
297
+ # Add contour lines
298
+ ax.contour(X, Y, Z, colors="black", linewidths=0.5, alpha=0.6)
299
+
300
+ # Add 1-sigma contour if requested
301
+ if include_sigma_contours:
302
+ # Calculate 1-sigma level (approximately 39% of peak for 2D Gaussian)
303
+ sigma_level = rv.pdf(mean_2d) * np.exp(-0.5)
304
+ ax.contour(X, Y, Z, levels=[sigma_level], colors="red",
305
+ linewidths=1, linestyles="--", alpha=0.8)
306
+
307
+ # Plot mean point
308
+ ax.plot(mean_posterior[j], mean_posterior[i], "r+",
309
+ markersize=10, mew=2, label="Posterior Mean")
310
+
311
+ # Plot true value if provided
312
+ if true_values is not None:
313
+ ax.plot(true_values[j], true_values[i], "kx",
314
+ markersize=10, mew=2, label="True Value")
315
+
316
+ ax.set_xlabel(labels[j])
317
+ ax.set_ylabel(labels[i])
318
+
319
+ else: # Upper triangle: hide these plots
320
+ ax.axis("off")
321
+
322
+ # Create legend similar to the original
323
+ handles, labels_leg = axes[0, 0].get_legend_handles_labels()
324
+ if n_dims > 1:
325
+ handles2, labels2 = axes[1, 0].get_legend_handles_labels()
326
+ handles.extend(handles2)
327
+ labels_leg.extend(labels2)
328
+
329
+ # Clean up labels by removing values after colons
330
+ cleaned_labels = [label.split(":")[0] for label in labels_leg]
331
+
332
+ fig.legend(
333
+ handles, cleaned_labels,
334
+ loc="upper right",
335
+ bbox_to_anchor=(0.9, 0.95)
336
+ )
337
+
338
+ # Adjust main plot layout to make room on the right for the colorbar
339
+ plt.tight_layout(rect=[0, 0, 0.88, 0.96])
340
+
341
+ # Add a colorbar if we have 2D plots
342
+ if n_dims > 1 and pcm is not None:
343
+ cbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
344
+ cbar = fig.colorbar(pcm, cax=cbar_ax)
345
+ cbar.set_label("Probability Density", size=12)
346
+
347
+ if show_plot:
348
+ plt.show()
349
+
350
+ return fig, axes
@@ -158,11 +158,11 @@ class CircleHelper:
158
158
  """
159
159
  return np.fromiter((f(theta) for theta in self.angles()), float)
160
160
 
161
- def to_coefficient(self, u: np.ndarray) -> np.ndarray:
161
+ def to_coefficients(self, u: np.ndarray) -> np.ndarray:
162
162
  """Maps a function vector to its complex Fourier coefficients."""
163
163
  return rfft(u) * self.fft_factor
164
164
 
165
- def from_coefficient(self, coeff: np.ndarray) -> np.ndarray:
165
+ def from_coefficients(self, coeff: np.ndarray) -> np.ndarray:
166
166
  """Maps complex Fourier coefficients to a function vector."""
167
167
  return irfft(coeff, n=2 * self.kmax) * self._inverse_fft_factor
168
168
 
@@ -235,7 +235,7 @@ class CircleHelper:
235
235
  # a minimal, non-redundant representation.
236
236
  return np.concatenate((coeff.real, coeff.imag[1 : self.kmax]))
237
237
 
238
- def _component_to_coefficient(self, c: np.ndarray) -> np.ndarray:
238
+ def _component_to_coefficients(self, c: np.ndarray) -> np.ndarray:
239
239
  """Unpacks a real component vector into complex Fourier coefficients."""
240
240
  # This is the inverse of `_coefficient_to_component`. It reconstructs
241
241
  # the full complex coefficient array that irfft expects. We re-insert
@@ -290,26 +290,26 @@ class Lebesgue(CircleHelper, HilbertModule, AbstractInvariantLebesgueSpace):
290
290
 
291
291
  def to_components(self, u: np.ndarray) -> np.ndarray:
292
292
  """Converts a function vector to its real component representation."""
293
- coeff = self.to_coefficient(u)
293
+ coeff = self.to_coefficients(u)
294
294
  return self._coefficient_to_component(coeff)
295
295
 
296
296
  def from_components(self, c: np.ndarray) -> np.ndarray:
297
297
  """Converts a real component vector back to a function vector."""
298
- coeff = self._component_to_coefficient(c)
299
- return self.from_coefficient(coeff)
298
+ coeff = self._component_to_coefficients(c)
299
+ return self.from_coefficients(coeff)
300
300
 
301
301
  def to_dual(self, u: np.ndarray) -> "LinearForm":
302
302
  """Maps a vector `u` to its dual representation `u*`."""
303
- coeff = self.to_coefficient(u)
303
+ coeff = self.to_coefficients(u)
304
304
  cp = self._coefficient_to_component(self._metric @ coeff)
305
305
  return self.dual.from_components(cp)
306
306
 
307
307
  def from_dual(self, up: "LinearForm") -> np.ndarray:
308
308
  """Maps a dual vector `u*` back to its primal representation `u`."""
309
309
  cp = self.dual.to_components(up)
310
- dual_coeff = self._component_to_coefficient(cp)
310
+ dual_coeff = self._component_to_coefficients(cp)
311
311
  primal_coeff = self._inverse_metric @ dual_coeff
312
- return self.from_coefficient(primal_coeff)
312
+ return self.from_coefficients(primal_coeff)
313
313
 
314
314
  def vector_multiply(self, x1: np.ndarray, x2: np.ndarray) -> np.ndarray:
315
315
  """
@@ -366,9 +366,9 @@ class Lebesgue(CircleHelper, HilbertModule, AbstractInvariantLebesgueSpace):
366
366
  matrix = diags([values], [0])
367
367
 
368
368
  def mapping(u):
369
- coeff = self.to_coefficient(u)
369
+ coeff = self.to_coefficients(u)
370
370
  coeff = matrix @ coeff
371
- return self.from_coefficient(coeff)
371
+ return self.from_coefficients(coeff)
372
372
 
373
373
  return LinearOperator.self_adjoint(self, mapping)
374
374
 
@@ -468,6 +468,31 @@ class Sobolev(
468
468
 
469
469
  return Sobolev(k, order, scale, radius=radius)
470
470
 
471
+ @property
472
+ def derivative_operator(self) -> LinearOperator:
473
+ """
474
+ Returns the derivative operator from the space to one with a lower order.
475
+ """
476
+
477
+ codomain = Sobolev(self.kmax, self.order - 1, self.scale, radius=self.radius)
478
+
479
+ lebesgue_space = self.underlying_space
480
+ k = np.arange(self.kmax + 1)
481
+
482
+ def mapping(u):
483
+ coeff = lebesgue_space.to_coefficients(u)
484
+ diff_coeff = 1j * k * coeff
485
+ return lebesgue_space.from_coefficients(diff_coeff)
486
+
487
+ op_L2 = LinearOperator(
488
+ lebesgue_space,
489
+ lebesgue_space,
490
+ mapping,
491
+ adjoint_mapping=lambda u: -1 * mapping(u),
492
+ )
493
+
494
+ return LinearOperator.from_formal_adjoint(self, codomain, op_L2)
495
+
471
496
  def __eq__(self, other: object) -> bool:
472
497
  """
473
498
  Checks for mathematical equality with another Sobolev space on a circle.
@@ -208,11 +208,11 @@ class SphereHelper:
208
208
 
209
209
  return u
210
210
 
211
- def to_coefficient(self, u: sh.SHGrid) -> sh.SHCoeffs:
211
+ def to_coefficients(self, u: sh.SHGrid) -> sh.SHCoeffs:
212
212
  """Maps a function vector to its spherical harmonic coefficients."""
213
213
  return u.expand(normalization=self.normalization, csphase=self.csphase)
214
214
 
215
- def from_coefficient(self, ulm: sh.SHCoeffs) -> sh.SHGrid:
215
+ def from_coefficients(self, ulm: sh.SHCoeffs) -> sh.SHGrid:
216
216
  """Maps spherical harmonic coefficients to a function vector."""
217
217
  grid = self.grid if self._sampling == 1 else "DH2"
218
218
  return ulm.expand(grid=grid, extend=self.extend)
@@ -393,7 +393,7 @@ class SphereHelper:
393
393
  flat_coeffs = ulm.coeffs.flatten(order="C")
394
394
  return self._sparse_coeffs_to_component @ flat_coeffs
395
395
 
396
- def _component_to_coefficient(self, c: np.ndarray) -> sh.SHCoeffs:
396
+ def _component_to_coefficients(self, c: np.ndarray) -> sh.SHCoeffs:
397
397
  """Maps a component vector to spherical harmonic coefficients."""
398
398
  flat_coeffs = self._sparse_coeffs_to_component.T @ c
399
399
  coeffs = flat_coeffs.reshape((2, self.lmax + 1, self.lmax + 1))
@@ -435,22 +435,22 @@ class Lebesgue(SphereHelper, HilbertModule, AbstractInvariantLebesgueSpace):
435
435
  return self._dim
436
436
 
437
437
  def to_components(self, u: sh.SHGrid) -> np.ndarray:
438
- coeff = self.to_coefficient(u)
438
+ coeff = self.to_coefficients(u)
439
439
  return self._coefficient_to_component(coeff)
440
440
 
441
441
  def from_components(self, c: np.ndarray) -> sh.SHGrid:
442
- coeff = self._component_to_coefficient(c)
443
- return self.from_coefficient(coeff)
442
+ coeff = self._component_to_coefficients(c)
443
+ return self.from_coefficients(coeff)
444
444
 
445
445
  def to_dual(self, u: sh.SHGrid) -> LinearForm:
446
- coeff = self.to_coefficient(u)
446
+ coeff = self.to_coefficients(u)
447
447
  cp = self._coefficient_to_component(coeff) * self.radius**2
448
448
  return self.dual.from_components(cp)
449
449
 
450
450
  def from_dual(self, up: LinearForm) -> sh.SHGrid:
451
451
  cp = self.dual.to_components(up) / self.radius**2
452
- coeff = self._component_to_coefficient(cp)
453
- return self.from_coefficient(coeff)
452
+ coeff = self._component_to_coefficients(cp)
453
+ return self.from_coefficients(coeff)
454
454
 
455
455
  def ax(self, a: float, x: sh.SHGrid) -> None:
456
456
  """
@@ -513,8 +513,8 @@ class Lebesgue(SphereHelper, HilbertModule, AbstractInvariantLebesgueSpace):
513
513
 
514
514
  def mapping(u):
515
515
  c = matrix @ (self.to_components(u))
516
- coeff = self._component_to_coefficient(c)
517
- return self.from_coefficient(coeff)
516
+ coeff = self._component_to_coefficients(c)
517
+ return self.from_coefficients(coeff)
518
518
 
519
519
  return LinearOperator.self_adjoint(self, mapping)
520
520
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "pygeoinf"
3
- version = "1.3.0"
3
+ version = "1.3.1"
4
4
  description = "A package for solving geophysical inference and inverse problems"
5
5
  authors = ["David Al-Attar and Dan Heathcote"]
6
6
  readme = "README.md"
File without changes
File without changes
File without changes
File without changes
File without changes