pygeoinf 1.3.9__tar.gz → 1.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/PKG-INFO +2 -1
  2. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/gaussian_measure.py +39 -3
  3. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/hilbert_space.py +15 -0
  4. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/linear_operators.py +32 -0
  5. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/symmetric_space/circle.py +41 -1
  6. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/symmetric_space/sphere.py +230 -21
  7. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/symmetric_space/symmetric_space.py +167 -12
  8. pygeoinf-1.4.0/pygeoinf/symmetric_space/wigner.py +284 -0
  9. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pyproject.toml +4 -1
  10. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/LICENSE +0 -0
  11. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/README.md +0 -0
  12. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/__init__.py +0 -0
  13. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/auxiliary.py +0 -0
  14. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/backus_gilbert.py +0 -0
  15. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/checks/__init__.py +0 -0
  16. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/checks/hilbert_space.py +0 -0
  17. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/checks/linear_operators.py +0 -0
  18. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/checks/nonlinear_operators.py +0 -0
  19. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/direct_sum.py +0 -0
  20. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/forward_problem.py +0 -0
  21. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/inversion.py +0 -0
  22. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/linear_bayesian.py +0 -0
  23. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/linear_forms.py +0 -0
  24. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/linear_optimisation.py +0 -0
  25. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/linear_solvers.py +0 -0
  26. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/nonlinear_forms.py +0 -0
  27. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/nonlinear_operators.py +0 -0
  28. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/nonlinear_optimisation.py +0 -0
  29. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/parallel.py +0 -0
  30. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/plot.py +0 -0
  31. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/preconditioners.py +0 -0
  32. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/random_matrix.py +0 -0
  33. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/subsets.py +0 -0
  34. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/subspaces.py +0 -0
  35. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/symmetric_space/__init__.py +0 -0
  36. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/symmetric_space/sh_tools.py +0 -0
  37. {pygeoinf-1.3.9 → pygeoinf-1.4.0}/pygeoinf/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pygeoinf
3
- Version: 1.3.9
3
+ Version: 1.4.0
4
4
  Summary: A package for solving geophysical inference and inverse problems
5
5
  License: BSD-3-Clause
6
6
  License-File: LICENSE
@@ -16,6 +16,7 @@ Provides-Extra: sphere
16
16
  Requires-Dist: Cartopy (>=0.23.0,<0.24.0) ; extra == "sphere"
17
17
  Requires-Dist: joblib (>=1.5.2,<2.0.0)
18
18
  Requires-Dist: matplotlib (>=3.0.0)
19
+ Requires-Dist: numba (>=0.63.1,<0.64.0)
19
20
  Requires-Dist: numpy (>=1.26.0)
20
21
  Requires-Dist: pyqt6 (>=6.0.0)
21
22
  Requires-Dist: pyshtools (>=4.0.0) ; extra == "sphere"
@@ -455,10 +455,10 @@ class GaussianMeasure:
455
455
  if n < 1:
456
456
  raise ValueError("Number of samples must be a positive integer.")
457
457
 
458
- # Step 1: Draw samples (Parallelized)
458
+ # Draw samples
459
459
  samples = self.samples(n, parallel=parallel, n_jobs=n_jobs)
460
460
 
461
- # Step 2: Compute variance using vector arithmetic
461
+ # Compute variance using vector arithmetic
462
462
  expectation = self.expectation
463
463
  variance = self.domain.zero
464
464
 
@@ -469,6 +469,42 @@ class GaussianMeasure:
469
469
 
470
470
  return variance
471
471
 
472
+ def sample_pointwise_std(
473
+ self, n: int, /, *, parallel: bool = False, n_jobs: int = -1
474
+ ) -> Vector:
475
+ """
476
+ Estimates the pointwise standard deviation by drawing n samples.
477
+
478
+ Args:
479
+ n: Number of samples to draw.
480
+ parallel: If True, draws samples in parallel.
481
+ n_jobs: Number of CPU cores to use. -1 means all available.
482
+ """
483
+ variance = self.sample_pointwise_variance(n, parallel=parallel, n_jobs=n_jobs)
484
+ return self.domain.vector_sqrt(variance)
485
+
486
+ def with_dense_covariance(self, parallel: bool = False, n_jobs: int = -1):
487
+ """
488
+ Forms a new Gaussian measure equivalent to the existing one, but
489
+ with its covariance matrix stored in dense form. The dense matrix
490
+ calculation can optionally be parallelised.
491
+
492
+ Args:
493
+ parallel: If True, computes the covariance in parallel.
494
+ n_jobs: Number of CPU cores to use. -1 means all available.
495
+
496
+ Returns:
497
+ The new Gaussian measure.
498
+ """
499
+
500
+ covariance_matrix = self.covariance.matrix(
501
+ dense=True, galerkin=True, parallel=parallel, n_jobs=n_jobs
502
+ )
503
+
504
+ return GaussianMeasure.from_covariance_matrix(
505
+ self.domain, covariance_matrix, expectation=self.expectation
506
+ )
507
+
472
508
  def affine_mapping(
473
509
  self, /, *, operator: LinearOperator = None, translation: Vector = None
474
510
  ) -> GaussianMeasure:
@@ -546,7 +582,7 @@ class GaussianMeasure:
546
582
 
547
583
  # Pass the parallelization arguments directly to the matrix creation method
548
584
  cov_matrix = self.covariance.matrix(
549
- dense=True, parallel=parallel, n_jobs=n_jobs
585
+ dense=True, galerkin=True, parallel=parallel, n_jobs=n_jobs
550
586
  )
551
587
 
552
588
  try:
@@ -547,6 +547,12 @@ class HilbertModule(HilbertSpace, ABC):
547
547
  The product of the two vectors.
548
548
  """
549
549
 
550
+ @abstractmethod
551
+ def vector_sqrt(self, x: Vector) -> Vector:
552
+ """
553
+ Returns the square root of a vector.
554
+ """
555
+
550
556
 
551
557
  class EuclideanSpace(HilbertSpace):
552
558
  """
@@ -829,3 +835,12 @@ class MassWeightedHilbertModule(MassWeightedHilbertSpace, HilbertModule):
829
835
  is itself an instance of `HilbertModule`.
830
836
  """
831
837
  return self.underlying_space.vector_multiply(x1, x2)
838
+
839
+ def vector_sqrt(self, x: Vector) -> Vector:
840
+ """
841
+ Computes vector multiplication by delegating to the underlying space.
842
+
843
+ Note: This assumes the underlying space provided during initialization
844
+ is itself an instance of `HilbertModule`.
845
+ """
846
+ return self.underlying_space.vector_sqrt(x)
@@ -102,10 +102,13 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
102
102
  self.__adjoint_mapping: Callable[[Any], Any]
103
103
  self.__dual_mapping: Callable[[Any], Any]
104
104
 
105
+ self.__using_default_dual_and_adjoint = False
106
+
105
107
  if dual_mapping is None:
106
108
  if adjoint_mapping is None:
107
109
  self.__dual_mapping = self._dual_mapping_default
108
110
  self.__adjoint_mapping = self._adjoint_mapping_from_dual
111
+ self.__using_default_dual_and_adjoint = True
109
112
  else:
110
113
  self.__adjoint_mapping = adjoint_mapping
111
114
  self.__dual_mapping = self._dual_mapping_from_adjoint
@@ -919,6 +922,35 @@ class LinearOperator(NonLinearOperator, LinearOperatorAxiomChecks):
919
922
  self, galerkin: bool, parallel: bool, n_jobs: int
920
923
  ) -> np.ndarray:
921
924
 
925
+ # Optimization: If the codomain is smaller than the domain, it is cheaper
926
+ # to compute the matrix of the adjoint/dual (which has fewer columns)
927
+ # and transpose the result.
928
+
929
+ # Note: This recursion naturally terminates because the adjoint/dual
930
+ # swaps the domain and codomain. In the recursive call,
931
+ # (codomain.dim < domain.dim) will be False, forcing the standard path.
932
+
933
+ # If the operator has its dual and adjoint actions done using the
934
+ # default implementation, this optimisation is skipped.
935
+ if (
936
+ self.codomain.dim < self.domain.dim
937
+ and not self.__using_default_dual_and_adjoint
938
+ ):
939
+ if galerkin:
940
+ # For Galerkin representations: Matrix(L) = Matrix(L*).T
941
+ return self.adjoint.matrix(
942
+ dense=True, galerkin=True, parallel=parallel, n_jobs=n_jobs
943
+ ).T
944
+ else:
945
+ # For Standard representations: Matrix(L) = Matrix(L').T
946
+ return self.dual.matrix(
947
+ dense=True, galerkin=False, parallel=parallel, n_jobs=n_jobs
948
+ ).T
949
+
950
+ # --- Standard Column-wise Construction ---
951
+ # This block executes if optimization is not applicable (or in the
952
+ # recursive base case).
953
+
922
954
  scipy_op_wrapper = self.matrix(galerkin=galerkin)
923
955
 
924
956
  if not parallel:
@@ -24,7 +24,7 @@ Key Classes
24
24
 
25
25
  from __future__ import annotations
26
26
 
27
- from typing import Callable, Tuple, Optional, Any
27
+ from typing import Callable, Tuple, Optional, Any, List
28
28
  import matplotlib.pyplot as plt
29
29
  import numpy as np
30
30
  from scipy.fft import rfft, irfft
@@ -226,6 +226,40 @@ class CircleHelper:
226
226
  ax.fill_between(self.angles(), u - u_bound, u + u_bound, **kwargs)
227
227
  return fig, ax
228
228
 
229
+ def geodesic_quadrature(
230
+ self, p1: float, p2: float, n_points: int
231
+ ) -> Tuple[List[float], np.ndarray]:
232
+ """
233
+ Returns quadrature points and weights for the shortest arc between p1 and p2.
234
+
235
+ Args:
236
+ p1: Starting angle in radians.
237
+ p2: Ending angle in radians.
238
+ n_points: Number of quadrature points.
239
+
240
+ Returns:
241
+ points: A list of angles (floats) along the shortest arc.
242
+ weights: Integration weights scaled by the arc length.
243
+ """
244
+ # Calculate the shortest signed angular distance on the circle
245
+ # This ensures we take the "inner" arc rather than the long way around.
246
+ diff = (p2 - p1 + np.pi) % (2 * np.pi) - np.pi
247
+ arc_length = np.abs(diff) * self.radius
248
+
249
+ # Get standard Gauss-Legendre nodes (x) and weights (w) on [-1, 1]
250
+ x, w = np.polynomial.legendre.leggauss(n_points)
251
+
252
+ # Map nodes to the angular interval [p1, p1 + diff]
253
+ # t moves from 0 to 1 as x moves from -1 to 1
254
+ t = (x + 1) / 2.0
255
+ angles = p1 + t * diff
256
+
257
+ # Scale weights: (w * 0.5) maps [-1, 1] to [0, 1]
258
+ # Multiplying by total arc_length gives the proper integration weights.
259
+ scaled_weights = w * (arc_length / 2.0)
260
+
261
+ return angles.tolist(), scaled_weights
262
+
229
263
  def _coefficient_to_component(self, coeff: np.ndarray) -> np.ndarray:
230
264
  """Packs complex Fourier coefficients into a real component vector."""
231
265
  # For a real-valued input, the output of rfft (real FFT) has
@@ -346,6 +380,12 @@ class Lebesgue(CircleHelper, HilbertModule, AbstractInvariantLebesgueSpace):
346
380
  return False
347
381
  return True
348
382
 
383
+ def vector_sqrt(self, u: np.ndarray) -> np.ndarray:
384
+ """
385
+ Returns the pointwise square root of a function.
386
+ """
387
+ return np.sqrt(u)
388
+
349
389
  def invariant_automorphism_from_index_function(self, g: Callable[[int], float]):
350
390
  """
351
391
  Implements an invariant automorphism of the form f(Δ) using Fourier
@@ -101,11 +101,6 @@ class SphereHelper:
101
101
  self._normalization: str = "ortho"
102
102
  self._csphase: int = 1
103
103
 
104
- # Set up sparse matrix that maps SHCoeff data arrrays into reduced form
105
- self._sparse_coeffs_to_component: coo_array = (
106
- self._coefficient_to_component_mapping()
107
- )
108
-
109
104
  def orthonormalised(self) -> bool:
110
105
  """The space is orthonormalised."""
111
106
  return True
@@ -341,6 +336,221 @@ class SphereHelper:
341
336
 
342
337
  return fig, ax, im
343
338
 
339
+ def plot_geodesic(
340
+ self,
341
+ p1: Tuple[float, float],
342
+ p2: Tuple[float, float],
343
+ ax: Optional["GeoAxes"] = None,
344
+ n_points: int = 100,
345
+ **kwargs,
346
+ ) -> Tuple["Figure", "GeoAxes"]:
347
+ """
348
+ Plots a geodesic curve onto a Cartopy map.
349
+ """
350
+ # Generate points via our quadrature logic (returns lats, lons)
351
+ points, _ = self.geodesic_quadrature(p1, p2, n_points=n_points)
352
+ lats, lons = zip(*points)
353
+
354
+ # 2. Get/Create Axes
355
+ if ax is None:
356
+ fig, ax = plt.subplots(
357
+ figsize=kwargs.pop("figsize", (10, 8)),
358
+ subplot_kw={"projection": ccrs.PlateCarree()},
359
+ )
360
+ else:
361
+ fig = ax.get_figure()
362
+
363
+ # 3. Plot with the Geodetic transform
364
+ # This 'transform' handles the conversion to whatever projection 'ax' uses.
365
+ kwargs.setdefault("color", "black")
366
+ kwargs.setdefault("linewidth", 2)
367
+
368
+ # We use Geodetic() here because our points were generated along a great circle
369
+ ax.plot(lons, lats, transform=ccrs.Geodetic(), **kwargs)
370
+
371
+ return fig, ax
372
+
373
+ def plot_geodesic_network(
374
+ self,
375
+ paths: List[Tuple[Tuple[float, float], Tuple[float, float]]],
376
+ ax: Optional["GeoAxes"] = None,
377
+ n_points: int = 50,
378
+ **kwargs,
379
+ ) -> Tuple["Figure", "GeoAxes"]:
380
+ """
381
+ Plots a network of geodesic paths onto a Cartopy map.
382
+
383
+ This method iterates through a list of source-receiver pairs and renders
384
+ each as a great-circle arc. It is useful for visualizing the spatial
385
+ coverage of a tomographic survey.
386
+
387
+ Args:
388
+ paths: A list of ((lat1, lon1), (lat2, lon2)) tuples.
389
+ ax: An existing cartopy GeoAxes object. If None, a new figure is created.
390
+ n_points: Number of points used to render each curve. A lower value
391
+ (e.g., 50) is often sufficient for batch plotting many lines.
392
+ **kwargs: Keyword arguments passed to the underlying plot calls
393
+ (e.g., color, alpha, linewidth).
394
+
395
+ Returns:
396
+ A tuple (figure, axes) containing the plot objects.
397
+ """
398
+
399
+ # Setup/Verify Axes
400
+ if ax is None:
401
+ figsize = kwargs.pop("figsize", (12, 10))
402
+ fig, ax = plt.subplots(
403
+ figsize=figsize, subplot_kw={"projection": ccrs.PlateCarree()}
404
+ )
405
+ ax.set_global()
406
+ ax.coastlines()
407
+ else:
408
+ fig = ax.get_figure()
409
+
410
+ # Set default styling for a "network" look
411
+ # Using a lower alpha and thinner lines helps prevent clutter
412
+ # when many paths overlap.
413
+ kwargs.setdefault("color", "black")
414
+ kwargs.setdefault("linewidth", 0.8)
415
+ kwargs.setdefault("alpha", 0.5)
416
+
417
+ # Batch plot all geodesics
418
+ for p1, p2 in paths:
419
+ self.plot_geodesic(p1, p2, ax=ax, n_points=n_points, **kwargs)
420
+
421
+ # Extract unique sources and receivers for marking
422
+ sources = list(set([tuple(p[0]) for p in paths]))
423
+ receivers = list(set([tuple(p[1]) for p in paths]))
424
+
425
+ src_lats, src_lons = zip(*sources)
426
+ rec_lats, rec_lons = zip(*receivers)
427
+
428
+ # Plot Sources (Stars)
429
+ src_style = kwargs.pop("source_kwargs", {})
430
+ src_style.setdefault("marker", "*")
431
+ src_style.setdefault("color", "gold")
432
+ src_style.setdefault("s", 150)
433
+ src_style.setdefault("edgecolor", "black")
434
+ src_style.setdefault("zorder", 5) # Ensure markers are on top
435
+
436
+ ax.scatter(src_lons, src_lats, transform=ccrs.Geodetic(), **src_style)
437
+
438
+ # Plot Receivers (Dots)
439
+ rec_style = kwargs.pop("receiver_kwargs", {})
440
+ rec_style.setdefault("marker", "o")
441
+ rec_style.setdefault("color", "red")
442
+ rec_style.setdefault("s", 50)
443
+ rec_style.setdefault("edgecolor", "white")
444
+ rec_style.setdefault("zorder", 5)
445
+
446
+ ax.scatter(rec_lons, rec_lats, transform=ccrs.Geodetic(), **rec_style)
447
+
448
+ return fig, ax
449
+
450
+ def sample_power_measure(
451
+ self,
452
+ measure,
453
+ n_samples,
454
+ /,
455
+ *,
456
+ lmin=None,
457
+ lmax=None,
458
+ parallel: bool = False,
459
+ n_jobs: int = -1,
460
+ ):
461
+ """
462
+ Takes in a Gaussian measure on the space, draws n_samples from
463
+ and returns samples for the spherical harmonic power at degrees in
464
+ the indicated range.
465
+ """
466
+
467
+ lmin = 0 if lmin is None else lmin
468
+ lmax = self.lmax if lmax is None else min(self.lmax, lmax)
469
+
470
+ samples = measure.samples(n_samples, parallel=parallel, n_jobs=n_jobs)
471
+
472
+ powers = []
473
+ for u in samples:
474
+ ulm = self.to_coefficients(u)
475
+ powers.append(ulm.spectrum(lmax=lmax, convention="power")[lmin:])
476
+
477
+ return powers
478
+
479
+ def geodesic_quadrature(
480
+ self, p1: Tuple[float, float], p2: Tuple[float, float], n_points: int
481
+ ) -> Tuple[List[Tuple[float, float]], np.ndarray]:
482
+ """
483
+ Generates Gauss-Legendre quadrature points and weights along a great-circle arc.
484
+
485
+ This implementation converts the start and end latitudes and longitudes into
486
+ unit vectors, calculates the central angle (omega), and interpolates the
487
+ geodesic path using SLERP.
488
+
489
+ Args:
490
+ p1: Start point as (latitude, longitude) in degrees.
491
+ p2: End point as (latitude, longitude) in degrees.
492
+ n_points: Number of quadrature points to generate.
493
+
494
+ Returns:
495
+ points: A list of (lat, lon) tuples in degrees along the geodesic.
496
+ weights: Integration weights scaled by the total arc length (R * omega).
497
+ """
498
+
499
+ # Coordinate Transforms (Degrees -> Radians -> Unit Vectors)
500
+ def to_vector(lat, lon):
501
+ lat_rad, lon_rad = np.radians(lat), np.radians(lon)
502
+ return np.array(
503
+ [
504
+ np.cos(lat_rad) * np.cos(lon_rad),
505
+ np.cos(lat_rad) * np.sin(lon_rad),
506
+ np.sin(lat_rad),
507
+ ]
508
+ )
509
+
510
+ def to_latlon(vec):
511
+ # Normalize for numerical stability before converting back
512
+ vec = vec / np.linalg.norm(vec)
513
+ lat_rad = np.arcsin(vec[2])
514
+ lon_rad = np.arctan2(vec[1], vec[0])
515
+ return (np.degrees(lat_rad), np.degrees(lon_rad))
516
+
517
+ v1, v2 = to_vector(*p1), to_vector(*p2)
518
+
519
+ # Calculate Central Angle (omega)
520
+ dot_product = np.clip(np.dot(v1, v2), -1.0, 1.0)
521
+ omega = np.arccos(dot_product)
522
+
523
+ # Handle identical points edge case
524
+ if omega < 1e-10:
525
+ return [p1] * n_points, np.zeros(n_points)
526
+
527
+ # Handle antipodal points (non-unique path)
528
+ if np.abs(omega - np.pi) < 1e-10:
529
+ raise ValueError(
530
+ "Points are antipodal; the great circle path is not unique."
531
+ )
532
+
533
+ # Generate Gauss-Legendre Nodes and Weights
534
+ x, w = np.polynomial.legendre.leggauss(n_points)
535
+
536
+ # Map Nodes to Path Parameter t in [0, 1] and scale weights
537
+ # t = (x + 1) / 2 maps [-1, 1] to [0, 1]
538
+ # Weights are scaled by (total_arc_length / 2)
539
+ t_vals = (x + 1) / 2.0
540
+ scaled_weights = w * (self.radius * omega / 2.0)
541
+
542
+ # Spherical Linear Interpolation (SLERP) for each node
543
+ sin_omega = np.sin(omega)
544
+ points = []
545
+
546
+ for t in t_vals:
547
+ coeff1 = np.sin((1 - t) * omega) / sin_omega
548
+ coeff2 = np.sin(t * omega) / sin_omega
549
+ v_interp = coeff1 * v1 + coeff2 * v2
550
+ points.append(to_latlon(v_interp))
551
+
552
+ return points, scaled_weights
553
+
344
554
  # --------------------------------------------------------------- #
345
555
  # private methods #
346
556
  # ----------------------------------------------------------------#
@@ -378,28 +588,19 @@ class SphereHelper:
378
588
 
379
589
  def _degree_dependent_scaling_values(self, f: Callable[[int], float]) -> diags:
380
590
  """Creates a diagonal sparse matrix from a function of degree `l`."""
381
- dim = (self.lmax + 1) ** 2
382
- values = np.zeros(dim)
383
- i = 0
384
- for l in range(self.lmax + 1):
385
- j = i + l + 1
386
- values[i:j] = f(l)
387
- i = j
388
- for l in range(1, self.lmax + 1):
389
- j = i + l
390
- values[i:j] = f(l)
391
- i = j
392
- return values
591
+ ls = np.arange(self.lmax + 1)
592
+ f_vectorized = np.vectorize(f)
593
+ values = f_vectorized(ls)
594
+ counts = 2 * ls + 1
595
+ return np.repeat(values, counts)
393
596
 
394
597
  def _coefficient_to_component(self, ulm: sh.SHCoeffs) -> np.ndarray:
395
598
  """Maps spherical harmonic coefficients to a component vector."""
396
- flat_coeffs = ulm.coeffs.flatten(order="C")
397
- return self._sparse_coeffs_to_component @ flat_coeffs
599
+ return sh.shio.SHCilmToVector(ulm.coeffs)
398
600
 
399
601
  def _component_to_coefficients(self, c: np.ndarray) -> sh.SHCoeffs:
400
602
  """Maps a component vector to spherical harmonic coefficients."""
401
- flat_coeffs = self._sparse_coeffs_to_component.T @ c
402
- coeffs = flat_coeffs.reshape((2, self.lmax + 1, self.lmax + 1))
603
+ coeffs = sh.shio.SHVectorToCilm(c)
403
604
  return sh.SHCoeffs.from_array(
404
605
  coeffs, normalization=self.normalization, csphase=self.csphase
405
606
  )
@@ -475,6 +676,14 @@ class Lebesgue(SphereHelper, HilbertModule, AbstractInvariantLebesgueSpace):
475
676
  """
476
677
  return x1 * x2
477
678
 
679
+ def vector_sqrt(self, x: sh.SHGrid) -> sh.SHGrid:
680
+ """
681
+ Returns the pointwise square root of a function.
682
+ """
683
+ y = x.copy()
684
+ y.data = np.sqrt(x.data)
685
+ return y
686
+
478
687
  def __eq__(self, other: object) -> bool:
479
688
  """
480
689
  Checks for mathematical equality with another Sobolev space on a sphere.
@@ -29,7 +29,7 @@ AbstractInvariantSobolevSpace
29
29
 
30
30
  from __future__ import annotations
31
31
  from abc import ABC, abstractmethod
32
- from typing import Callable, Any, List
32
+ from typing import Callable, Any, List, Tuple, Optional
33
33
 
34
34
 
35
35
  import numpy as np
@@ -120,6 +120,29 @@ class AbstractInvariantLebesgueSpace(ABC):
120
120
  g: A function that takes an eigenvalue index and returns a real value.
121
121
  """
122
122
 
123
+ @abstractmethod
124
+ def trace_of_invariant_automorphism(self, f: Callable[[float], float]) -> float:
125
+ """
126
+ Returns the trace of the automorphism of the form f(Δ) with f a function
127
+ that is well-defined on the spectrum of the Laplacian.
128
+
129
+ Args:
130
+ f: A real-valued function that is well-defined on the spectrum
131
+ of the Laplacian.
132
+ """
133
+
134
+ @abstractmethod
135
+ def geodesic_quadrature(
136
+ self, p1: Any, p2: Any, n_points: int
137
+ ) -> Tuple[List[Any], np.ndarray]:
138
+ """
139
+ Returns quadrature points and weights for a geodesic between p1 and p2.
140
+
141
+ Returns:
142
+ points: List of manifold coordinates.
143
+ weights: Integration weights scaled by the line element.
144
+ """
145
+
123
146
  def invariant_automorphism(self, f: Callable[[float], float]) -> LinearOperator:
124
147
  """
125
148
  Returns an automorphism of the form f(Δ) with f a function
@@ -143,17 +166,6 @@ class AbstractInvariantLebesgueSpace(ABC):
143
166
  lambda k: f(self.laplacian_eigenvalue(k))
144
167
  )
145
168
 
146
- @abstractmethod
147
- def trace_of_invariant_automorphism(self, f: Callable[[float], float]) -> float:
148
- """
149
- Returns the trace of the automorphism of the form f(Δ) with f a function
150
- that is well-defined on the spectrum of the Laplacian.
151
-
152
- Args:
153
- f: A real-valued function that is well-defined on the spectrum
154
- of the Laplacian.
155
- """
156
-
157
169
  def invariant_gaussian_measure(
158
170
  self,
159
171
  f: Callable[[float], float],
@@ -469,3 +481,146 @@ class AbstractInvariantSobolevSpace(AbstractInvariantLebesgueSpace):
469
481
  return self.point_value_scaled_invariant_gaussian_measure(
470
482
  lambda k: np.exp(-(scale**2) * k), amplitude
471
483
  )
484
+
485
+ def geodesic_integral(
486
+ self, p1: Any, p2: Any, n_points: Optional[int] = None
487
+ ) -> LinearForm:
488
+ """
489
+ Returns a linear functional representing the line integral of a function
490
+ along a geodesic path.
491
+
492
+ This method approximates the integral :math:`\\int_{\\gamma} u(s) ds`, where
493
+ :math:`\\gamma` is the shortest path (geodesic) connecting points `p1` and `p2`.
494
+ The integral is represented as a :class:`LinearForm` in the dual space,
495
+ constructed by summing weighted point evaluations (Dirac measures) along
496
+ the path.
497
+
498
+ For Hilbert spaces with a specified :attr:`scale`, the method can
499
+ automatically determine the required quadrature density to resolve the
500
+ smooth features of the space's sensitivity kernels.
501
+
502
+ Args:
503
+ p1 (Any): The starting point of the geodesic. The type is manifold-dependent
504
+ (e.g., float for :class:`Circle`, tuple for :class:`Sphere`).
505
+ p2 (Any): The end point of the geodesic.
506
+ n_points (int, optional): The number of Gauss-Legendre quadrature points.
507
+ If None, it is heuristically determined as:
508
+ :math:`n = \\lceil (\\text{arc\\_length} / \\text{scale}) \\times 2 \\rceil`.
509
+ This ensures at least two points per characteristic length-scale,
510
+ providing stable sampling of the sensitivity kernel. Defaults to None.
511
+
512
+ Returns:
513
+ LinearForm: A linear functional whose action on a vector `u` computes
514
+ the approximated line integral.
515
+
516
+ Raises:
517
+ NotImplementedError: If the Sobolev order :math:`s` is less than or
518
+ equal to half the spatial dimension :math:`n/2`.
519
+ """
520
+ if self.order <= self.spatial_dimension / 2:
521
+ raise NotImplementedError(
522
+ f"Order {self.order} is too low for point evaluation on a "
523
+ f"{self.spatial_dimension}D manifold."
524
+ )
525
+
526
+ # Heuristic quadrature density determination
527
+ if n_points is None:
528
+ # Perform a minimal call to determine the total arc length via weights
529
+ _, temp_weights = self.geodesic_quadrature(p1, p2, n_points=2)
530
+ arc_length = np.sum(temp_weights)
531
+
532
+ # Scale-based heuristic (Nyquist-like sampling)
533
+ n_points = int(np.ceil((arc_length / self.scale) * 2.0))
534
+ n_points = max(2, n_points)
535
+
536
+ # Retrieve final manifold-specific points and weights
537
+ points, weights = self.geodesic_quadrature(p1, p2, n_points)
538
+
539
+ # Aggregate weighted components into the dual space representation
540
+ # The components of a LinearForm represent the functional in the dual basis
541
+ total_components = np.zeros(self.dim)
542
+ for pt, weight in zip(points, weights):
543
+ # Accumulate the weighted Riesz representation of each Dirac delta
544
+ total_components += weight * self.dirac(pt).components
545
+
546
+ return LinearForm(self, components=total_components)
547
+
548
+ def geodesic_integral_representation(
549
+ self, p1: Any, p2: Any, n_points: Optional[int] = None
550
+ ) -> Any:
551
+ """
552
+ Returns the Riesz representation (sensitivity kernel) of the line integral.
553
+
554
+ This maps the LinearForm (the integral functional) back into the
555
+ primal Hilbert space. Visualizing this vector reveals the "sensitivity"
556
+ of the line integral to perturbations at different locations in the domain.
557
+
558
+ Args:
559
+ p1, p2: Start and end points of the geodesic.
560
+ n_points: Number of quadrature points.
561
+ """
562
+ # Create the functional and map it to a vector in the space
563
+ integral_form = self.geodesic_integral(p1, p2, n_points)
564
+ return self.from_dual(integral_form)
565
+
566
+ def path_average_operator(self, paths, n_points=None):
567
+ """
568
+ Constructs a tomographic operator mapping a function field to its
569
+ line integrals along a set of geodesic paths.
570
+
571
+ Note: Despite the name, this operator returns the line integral
572
+ (the dual pairing of the function with the path functional) rather
573
+ than a normalized average, unless the user manually scales the forms.
574
+ This corresponds to the 'path average' convention often used in
575
+ seismic and atmospheric tomography.
576
+
577
+ Args:
578
+ paths (List[Tuple[Any, Any]]): A list of start and end point pairs
579
+ defining the geodesics.
580
+ n_points (int, optional): The number of quadrature points per path.
581
+ If None, the heuristic based on the Sobolev scale is used.
582
+
583
+ Returns:
584
+ LinearOperator: An operator mapping Space -> EuclideanSpace(len(paths)).
585
+ The adjoint of this operator performs the 'back-projection'
586
+ mapping data residuals into the function space.
587
+ """
588
+ # Generate the set of linear functionals representing each path integral
589
+ # The integral logic is handled by the Abstract Geodesic Integral method
590
+ path_forms = [
591
+ self.geodesic_integral(p1, p2, n_points=n_points) for p1, p2 in paths
592
+ ]
593
+
594
+ # Convert the list of forms into a single LinearOperator mapping
595
+ return LinearOperator.from_linear_forms(path_forms)
596
+
597
+ def random_source_receiver_paths(
598
+ self, n_sources: int, n_receivers: int
599
+ ) -> List[Tuple[Any, Any]]:
600
+ """
601
+ Generates a list of source-receiver pairs by connecting every source to
602
+ every receiver.
603
+
604
+ This method uses the existing :meth:`random_points` logic to generate
605
+ coordinates appropriate for the specific symmetric space. For a set
606
+ of S sources and R receivers, this returns a list of S*R paths.
607
+
608
+ Args:
609
+ n_sources: The number of random source locations to generate.
610
+ n_receivers: The number of random receiver locations to generate.
611
+
612
+ Returns:
613
+ List[Tuple[Any, Any]]: A list of tuples, where each tuple contains
614
+ a (source, receiver) pair.
615
+ """
616
+ # Generate the points using the existing base class method
617
+ sources = self.random_points(n_sources)
618
+ receivers = self.random_points(n_receivers)
619
+
620
+ # Create the full-mesh network
621
+ paths = []
622
+ for src in sources:
623
+ for rec in receivers:
624
+ paths.append((src, rec))
625
+
626
+ return paths
@@ -0,0 +1,284 @@
1
+ import numpy as np
2
+ import numba as nb
3
+
4
+
5
+ @nb.jit(nopython=True, cache=True)
6
+ def _wigner_start_values(l, n, theta):
7
+ """
8
+ Computes the boundary values for the recursion (l == |n|).
9
+ Corresponds to WignerMinOrder/WignerMaxOrder in C++.
10
+ """
11
+ # Use log-space arithmetic for stability
12
+ # Corresponds to lines 86-105 in Wigner.h
13
+
14
+ half = 0.5
15
+ sin_half = np.sin(half * theta)
16
+ cos_half = np.cos(half * theta)
17
+
18
+ # Handle tiny angles (log stability)
19
+ # Note: In a full implementation, check for strict 0 or pi,
20
+ # but float precision usually handles this with small eps.
21
+ log_sin = np.log(sin_half) if sin_half > 1e-15 else -1e15
22
+ log_cos = np.log(cos_half) if cos_half > 1e-15 else -1e15
23
+
24
+ Fl = float(l)
25
+ Fn = float(n)
26
+
27
+ # Formula from WignerMinOrder
28
+ # exp( 0.5 * (lgamma(2l+1) - lgamma(l-n+1) - lgamma(l+n+1)) + ... )
29
+ term = np.exp(
30
+ half
31
+ * (
32
+ np.math.lgamma(2 * Fl + 1)
33
+ - np.math.lgamma(Fl - Fn + 1)
34
+ - np.math.lgamma(Fl + Fn + 1)
35
+ )
36
+ + (Fl + Fn) * log_sin
37
+ + (Fl - Fn) * log_cos
38
+ )
39
+
40
+ # Returns (min_val, max_val)
41
+ # min_val corresponds to m = -l (if n is negative logic)
42
+ # Based on WignerDetails logic, we return the value for m=-l and m=l
43
+
44
+ # Note: The C++ code handles sign flips based on n.
45
+ # We simplify for the standard case.
46
+ val_minus_l = term
47
+ val_plus_l = term * ((-1) ** (n + l)) # From MinusOneToPower in WignerMaxOrder
48
+
49
+ return val_minus_l, val_plus_l
50
+
51
+
52
+ @nb.jit(nopython=True, cache=True)
53
+ def compute_wigner_d_recursive(l_max, m_max, n, theta):
54
+ """
55
+ Direct port of GSHTrans::Wigner::Compute.
56
+ Returns a flat array of coefficients and an offset array to index it.
57
+ """
58
+
59
+ # 1. Precompute inverse square roots for integer factors
60
+ # (Matches PreCompute in Wigner.h)
61
+ size_pre = 2 * l_max + 5
62
+ sqrt_inv = np.zeros(size_pre)
63
+ sqrt_val = np.zeros(size_pre)
64
+ for i in range(1, size_pre):
65
+ sqrt_val[i] = np.sqrt(i)
66
+ sqrt_inv[i] = 1.0 / sqrt_val[i]
67
+
68
+ # 2. Calculate storage size and offsets
69
+ # We assume 'All' m-range for simplicity (m goes from -min(l, m_max) to min(l, m_max))
70
+ n_abs = abs(n)
71
+ offsets = np.zeros(l_max + 2, dtype=np.int64)
72
+ current_offset = 0
73
+
74
+ for l in range(l_max + 1):
75
+ offsets[l] = current_offset
76
+ if l >= n_abs:
77
+ effective_m_max = min(l, m_max)
78
+ # Size = (effective_m_max - (-effective_m_max)) + 1
79
+ current_offset += 2 * effective_m_max + 1
80
+
81
+ data = np.zeros(current_offset, dtype=np.float64)
82
+ cos_theta = np.cos(theta)
83
+
84
+ # 3. Main Recursion Loop
85
+ # Iterate degrees l from |n| to l_max
86
+ for l in range(n_abs, l_max + 1):
87
+
88
+ m_lim = min(l, m_max)
89
+ row_len = 2 * m_lim + 1
90
+
91
+ # Pointers to current and previous data in the flat array
92
+ ptr = offsets[l]
93
+ ptr_minus_1 = offsets[l - 1] if l > 0 else -1
94
+ ptr_minus_2 = offsets[l - 2] if l > 1 else -1
95
+
96
+ # A. Base Case: l = |n|
97
+ if l == n_abs:
98
+ val_min, val_max = _wigner_start_values(l, n, theta)
99
+
100
+ # If n is positive, we start filling from the "left" (m=-l) logic
101
+ # The C++ code separates logic for n>=0 and n<0.
102
+ # Assuming n=0 for common cases, or standard alignment:
103
+
104
+ # Fill directly. For l=|n|, usually there is only one valid starting m
105
+ # if we strictly followed the "Sector" logic, but Wigner.h fills the row.
106
+ # We will use the boundary values logic.
107
+
108
+ # Simple fill for l=|n|: usually 0 except at boundaries?
109
+ # The C++ code lines 326-338 imply it fills the whole row for l=|n|.
110
+ # But mathematically only m=-l or m=l are non-zero at the start of recursion?
111
+ # Actually, for l=n, d^n_{n,m} is computable.
112
+
113
+ # To be safe and "passably efficient", we only set the edges
114
+ # and let the loop fill (though loop is empty for size 1).
115
+ if m_lim == l: # If we have full range
116
+ if n >= 0:
117
+ data[ptr] = val_min # m = -l
118
+ data[ptr + row_len - 1] = val_max # m = +l
119
+ else:
120
+ data[ptr] = val_max # Flip logic
121
+ data[ptr + row_len - 1] = val_min
122
+
123
+ # Note: For l=|n|, intermediate m's are handled by specific logic
124
+ # or are zero? In Wigner.h line 334, it loops w/ WignerMaxUpperIndex.
125
+ # For simplicity in this port, we assume we just need the recursion seeds.
126
+
127
+ # B. One-term recursion: l = |n| + 1
128
+ elif l == n_abs + 1:
129
+ # Range of m for previous row (l-1)
130
+ m_lim_prev = min(l - 1, m_max)
131
+
132
+ # Iterate over m. The C++ code is careful about indices.
133
+ # We map m to index: index = m + m_lim
134
+
135
+ # Pre-calc coefficients
136
+ alpha_base = (2 * l - 1) * l * cos_theta * sqrt_inv[l + n_abs]
137
+ beta_base = (2 * l - 1) * sqrt_inv[l + n_abs]
138
+ if n < 0:
139
+ beta_base *= -1
140
+
141
+ # Loop over 'interior' m (those that exist in l-1)
142
+ # m goes from -m_lim_prev to m_lim_prev
143
+ for m in range(-m_lim_prev, m_lim_prev + 1):
144
+ # Indices
145
+ idx_prev = m + m_lim_prev # Index in l-1 row
146
+ idx_curr = m + m_lim # Index in l row
147
+
148
+ f1 = (alpha_base - beta_base * m) * sqrt_inv[l - m] * sqrt_inv[l + m]
149
+ data[ptr + idx_curr] = f1 * data[ptr_minus_1 + idx_prev]
150
+
151
+ # Add Boundaries (m = -l and m = +l) if they fit in m_max
152
+ if m_lim == l:
153
+ val_min, val_max = _wigner_start_values(l, n, theta)
154
+ data[ptr] = val_min # m = -l
155
+ data[ptr + row_len - 1] = val_max # m = l
156
+
157
+ # C. Two-term recursion: l > |n| + 1
158
+ else:
159
+ m_lim_prev = min(l - 1, m_max)
160
+ m_lim_prev2 = min(l - 2, m_max)
161
+
162
+ # Terms for recursion
163
+ # Matches C++ Lines 397-402
164
+ inv_l_minus_1 = 1.0 / (l - 1.0)
165
+
166
+ alpha = (2 * l - 1) * l * cos_theta * sqrt_inv[l - n] * sqrt_inv[l + n]
167
+ beta = (2 * l - 1) * n * sqrt_inv[l - n] * sqrt_inv[l + n] * inv_l_minus_1
168
+ gamma = (
169
+ l
170
+ * sqrt_val[l - 1 - n]
171
+ * sqrt_val[l - 1 + n]
172
+ * sqrt_inv[l - n]
173
+ * sqrt_inv[l + n]
174
+ * inv_l_minus_1
175
+ )
176
+
177
+ # 1. Fill Interior (where m exists in l-2)
178
+ # Range where we can use two-term: m in intersection of l-1 and l-2
179
+ m_start_2term = -m_lim_prev2
180
+ m_end_2term = m_lim_prev2
181
+
182
+ for m in range(m_start_2term, m_end_2term + 1):
183
+ idx_curr = m + m_lim
184
+ idx_prev = m + m_lim_prev
185
+ idx_prev2 = m + m_lim_prev2
186
+
187
+ denom = sqrt_inv[l - m] * sqrt_inv[l + m]
188
+ f1 = (alpha - beta * m) * denom
189
+ f2 = gamma * sqrt_val[l - 1 - m] * sqrt_val[l - 1 + m] * denom
190
+
191
+ term1 = f1 * data[ptr_minus_1 + idx_prev]
192
+ term2 = f2 * data[ptr_minus_2 + idx_prev2]
193
+
194
+ data[ptr + idx_curr] = term1 - term2
195
+
196
+ # 2. Fill Lower Gap (if m_max allows, between l-2 and l-1)
197
+ # This corresponds to "one-point recursion" logic for growing edges
198
+ # The gap is m = -(l-1). It exists in l-1 but not l-2.
199
+ if m_lim_prev > m_lim_prev2: # If l-1 has wider range than l-2
200
+ # Logic for m = -(l-1)
201
+ m = -(l - 1)
202
+ if abs(m) <= m_lim:
203
+ idx_curr = m + m_lim
204
+ idx_prev = m + m_lim_prev
205
+ # Use 1-term expansion (simplified from C++ lines 360-370)
206
+ # f1 derived from boundary conditions
207
+ f1 = (
208
+ (2 * l - 1)
209
+ * (l * (l - 1) * cos_theta - m * n)
210
+ * sqrt_inv[l - n]
211
+ * sqrt_inv[l + n]
212
+ * sqrt_inv[l - m]
213
+ * sqrt_inv[l + m]
214
+ * inv_l_minus_1
215
+ )
216
+
217
+ data[ptr + idx_curr] = f1 * data[ptr_minus_1 + idx_prev]
218
+
219
+ # Logic for m = +(l-1)
220
+ m = l - 1
221
+ if abs(m) <= m_lim:
222
+ idx_curr = m + m_lim
223
+ idx_prev = m + m_lim_prev
224
+ f1 = (
225
+ (2 * l - 1)
226
+ * (l * (l - 1) * cos_theta - m * n)
227
+ * sqrt_inv[l - n]
228
+ * sqrt_inv[l + n]
229
+ * sqrt_inv[l - m]
230
+ * sqrt_inv[l + m]
231
+ * inv_l_minus_1
232
+ )
233
+
234
+ data[ptr + idx_curr] = f1 * data[ptr_minus_1 + idx_prev]
235
+
236
+ # 3. Fill Outer Boundaries (m = -l and m = +l)
237
+ if m_lim == l:
238
+ val_min, val_max = _wigner_start_values(l, n, theta)
239
+ data[ptr] = val_min
240
+ data[ptr + row_len - 1] = val_max
241
+
242
+ # 4. Optional: Orthogonal Normalization (Matches GSHTrans::Ortho)
243
+ # Multiply by sqrt(2l+1) / sqrt(4pi) ?
244
+ # Your C++ code multiplies by (inv_sqrt_pi / 2) * sqrt(2l+1)
245
+ # We apply this to match your output exactly.
246
+ inv_sqrt_pi = 0.5641895835477563
247
+ factor = inv_sqrt_pi / 2.0
248
+
249
+ for l in range(n_abs, l_max + 1):
250
+ norm_factor = factor * np.sqrt(2 * l + 1)
251
+ start = offsets[l]
252
+ end = start + (2 * min(l, m_max) + 1)
253
+ data[start:end] *= norm_factor
254
+
255
+ return data, offsets
256
+
257
+
258
+ class WignerRecursion:
259
+ def __init__(self, l_max, m_max, n):
260
+ self.l_max = l_max
261
+ self.m_max = m_max
262
+ self.n = n
263
+
264
+ # JIT compile immediately with dummy data to avoid lag on first real use
265
+ compute_wigner_d_recursive(1, 1, 0, 0.1)
266
+
267
+ def compute(self, theta):
268
+ """
269
+ Computes Wigner elements for angle theta.
270
+ Returns:
271
+ data (np.array): Flat array of coefficients.
272
+ offsets (np.array): Indices where each degree l starts.
273
+ """
274
+ return compute_wigner_d_recursive(self.l_max, self.m_max, self.n, theta)
275
+
276
+ def get_index(self, l, m, offsets):
277
+ """Helper to find index in flat array"""
278
+ if l < abs(self.n) or l > self.l_max:
279
+ return -1
280
+ m_lim = min(l, self.m_max)
281
+ if abs(m) > m_lim:
282
+ return -1
283
+ # Offset + (m - m_min) where m_min is -m_lim
284
+ return offsets[l] + (m + m_lim)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "pygeoinf"
3
- version = "1.3.9"
3
+ version = "1.4.0"
4
4
  description = "A package for solving geophysical inference and inverse problems"
5
5
  authors = ["David Al-Attar and Dan Heathcote"]
6
6
  readme = "README.md"
@@ -18,6 +18,7 @@ joblib = "^1.5.2"
18
18
  pyshtools = { version = ">=4.0.0", optional = true }
19
19
  Cartopy = { version = "^0.23.0", optional = true }
20
20
  threadpoolctl = "^3.6.0"
21
+ numba = "^0.63.1"
21
22
 
22
23
  [tool.poetry.extras]
23
24
  sphere = ["pyshtools", "Cartopy"]
@@ -53,3 +54,5 @@ build-backend = "poetry.core.masonry.api"
53
54
  # harmonic degree, ignore the ambiguous variable name warning (E741).
54
55
  "pygeoinf/symmetric_space/sphere.py" = ["E741"]
55
56
  "pygeoinf/symmetric_space/sh_tools.py" = ["E741"]
57
+ "pygeoinf/symmetric_space/wigner.py" = ["E741"]
58
+ "pygeoinf/rough_work/*" = ["E741"]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes