pygeoinf 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,211 @@
1
+ """
2
+ Module for solution of non-linear inverse and inference problems based on optimisation methods.
3
+ """
4
+
5
+ from typing import Any, Callable
6
+
7
+ import numpy as np
8
+ from scipy.optimize import minimize
9
+ from scipy.optimize import line_search as scipy_line_search
10
+ from scipy.sparse.linalg import LinearOperator as ScipyLinOp
11
+
12
+
13
+ from .hilbert_space import Vector
14
+ from .nonlinear_forms import NonLinearForm
15
+
16
+
17
+ class ScipyUnconstrainedOptimiser:
18
+ """
19
+ A wrapper for scipy.optimize.minimize that adapts a NonLinearForm.
20
+ """
21
+
22
+ _HESSIAN_METHODS = {
23
+ "Newton-CG",
24
+ "trust-ncg",
25
+ "trust-krylov",
26
+ "trust-exact",
27
+ "dogleg",
28
+ }
29
+
30
+ _GRADIENT_METHODS = {"BFGS", "L-BFGS-B", "CG"}
31
+
32
+ _DERIVATIVE_FREE_METHODS = {"Nelder-Mead", "Powell"}
33
+
34
+ def __init__(self, method: str, /, **kwargs: Any) -> None:
35
+ """
36
+ Args:
37
+ method (str): The optimization method to use (e.g., 'Newton-CG', 'BFGS').
38
+ **kwargs: Options to be passed to scipy.optimize.minimize (e.g., tol, maxiter).
39
+ """
40
+ self.method = method
41
+ self.solver_kwargs = kwargs
42
+
43
+ def minimize(self, form: NonLinearForm, x0: Vector) -> Vector:
44
+ """
45
+ Finds the minimum of a NonLinearForm starting from an initial guess.
46
+
47
+ Args:
48
+ form (NonLinearForm): The non-linear functional to minimize.
49
+ x0 (Vector): The initial guess in the Hilbert space.
50
+
51
+ Returns:
52
+ Vector: The vector that minimizes the form.
53
+ """
54
+ domain = form.domain
55
+
56
+ def fun(cx: np.ndarray) -> float:
57
+ x = domain.from_components(cx)
58
+ return form(x)
59
+
60
+ jac_wrapper = None
61
+ if form.has_gradient:
62
+
63
+ def jac_func(cx: np.ndarray) -> np.ndarray:
64
+ x = domain.from_components(cx)
65
+ grad_x = form.gradient(x)
66
+ return domain.to_components(grad_x)
67
+
68
+ jac_wrapper = jac_func
69
+
70
+ hess_wrapper = None
71
+ if form.has_hessian:
72
+
73
+ def hess_func(cx: np.ndarray) -> ScipyLinOp:
74
+ x = domain.from_components(cx)
75
+ hessian_op = form.hessian(x)
76
+ return hessian_op.matrix(galerkin=True)
77
+
78
+ hess_wrapper = hess_func
79
+
80
+ final_jac = (
81
+ jac_wrapper if self.method not in self._DERIVATIVE_FREE_METHODS else None
82
+ )
83
+ final_hess = hess_wrapper if self.method in self._HESSIAN_METHODS else None
84
+
85
+ options = self.solver_kwargs.copy()
86
+ tol = options.pop("tol", None)
87
+
88
+ if self.method in self._GRADIENT_METHODS:
89
+ if tol is not None and "gtol" not in options:
90
+ options["gtol"] = tol
91
+
92
+ cx0 = domain.to_components(x0)
93
+
94
+ result = minimize(
95
+ fun=fun,
96
+ x0=cx0,
97
+ method=self.method,
98
+ jac=final_jac,
99
+ hess=final_hess,
100
+ tol=tol,
101
+ options=options,
102
+ )
103
+
104
+ c_final = result.x
105
+ return domain.from_components(c_final)
106
+
107
+
108
+ def line_search(
109
+ form: NonLinearForm,
110
+ xk: Vector,
111
+ pk: Vector,
112
+ gfk: Vector = None,
113
+ old_fval: float = None,
114
+ old_old_fval: float = None,
115
+ c1: float = 0.0001,
116
+ c2: float = 0.9,
117
+ amax: float = None,
118
+ extra_condition: Callable[[float, Vector, float, Vector], bool] = None,
119
+ maxiter: int = 10,
120
+ ):
121
+ """
122
+ Wrapper for the scipy line_search method for application to a non-linear form.
123
+
124
+ Args:
125
+ form (NonLinearForm): The non-linear functional to minimize.
126
+ xk (Vector): The current point.
127
+ pk (Vector): The search direction.
128
+ gfk (Vector, optional): The gradient at x=xk. If not provided will be recalculated.
129
+ old_fval (float, optional): The function value at x=xk. If not provided will be recalculated.
130
+ old_old_fval (float, optional): The valur at the point proceeding x=xk.
131
+ c1 (float, optional): Parameter for Armijo condition rule.
132
+ c2 (float, optional): Parameter for curvature condition rule.
133
+ amax (float, optional): Maximum step size.
134
+ extra_condition (callable, optional): A callable of the form extra_condition(alpha, x, f, g) returning
135
+ a boolean. Arguments are the proposed step alpha and the corresponding x, f and g values. The line
136
+ search accepts the value of alpha only if this callable returns True. If the callable returns False
137
+ for the step length, the algorithm will continue with new iterates. The callable is only called for
138
+ iterates satisfying the strong Wolfe conditions.
139
+ maxiter (int, optional): Maximum number of iterations to perform.
140
+
141
+ Returns:
142
+ alpha (float | None): Alpha for which x_new = x0 + alpha * pk, or None if the
143
+ line search algorithm did not converge.
144
+ fc (int): Number of function evaluations made.
145
+ gc (int): Numner of gradient evaluations mades.
146
+ new_fval (float | None): New function value f(x_new)=f(x0+alpha*pk), or
147
+ None if the line search algorithm did not converge.
148
+ old_fval (float): Old function value f(x0).
149
+ new_slope (float | None): The local slope along the search direction at
150
+ the new value <myfprime(x_new), pk>, or None if the line search algorithm
151
+ did not converge.
152
+
153
+ Raises:
154
+ ValueError: If the non-linear form does not have a gradient set.
155
+ """
156
+
157
+ if not form.has_gradient:
158
+ raise ValueError("NonLinearForm must provide its gradient")
159
+
160
+ domain = form.domain
161
+
162
+ # Wrap the function.
163
+ def f(xc: np.ndarray) -> float:
164
+ x = domain.from_components(xc)
165
+ return form(x)
166
+
167
+ # Wrap the derivative. Note that this is given in
168
+ # terms of the components of the derivative (i.e., an element
169
+ # of the dual space) and not the gradient, this meaning that
170
+ # the standard Euclidean pairing with the components on the
171
+ # descent direction will yield the correct slope.
172
+ def myfprime(c: np.ndarray) -> np.ndarray:
173
+ x = domain.from_components(c)
174
+ g = form.derivative(x)
175
+ return domain.dual.to_components(g)
176
+
177
+ # Convert the initial vector to components.
178
+ xkc = domain.to_components(xk)
179
+
180
+ # Convert descent direction to components
181
+ pkc = domain.to_components(pk)
182
+
183
+ # If gradient provided, convert to its dual components.
184
+ gfkc = domain.to_dual(gfk).components if gfk is not None else None
185
+
186
+ # Wrap the extra condition, if provided.
187
+
188
+ if extra_condition is not None:
189
+
190
+ def _extra_condition(
191
+ alpha: float, xc: np.ndarray, f: float, gc: np.ndarray
192
+ ) -> bool:
193
+ x = domain.from_components(xc)
194
+ df = domain.dual.from_components(gc)
195
+ g = domain.from_dual(df)
196
+ return extra_condition(alpha, x, f, g)
197
+
198
+ return scipy_line_search(
199
+ f,
200
+ myfprime,
201
+ xkc,
202
+ pkc,
203
+ gfk=gfkc,
204
+ old_fval=old_fval,
205
+ old_old_fval=old_old_fval,
206
+ c1=c1,
207
+ c2=c2,
208
+ amax=amax,
209
+ extra_condition=_extra_condition,
210
+ maxiter=maxiter,
211
+ )
pygeoinf/parallel.py CHANGED
@@ -17,7 +17,7 @@ if TYPE_CHECKING:
17
17
  MatrixLike = Union[np.ndarray, ScipyLinOp]
18
18
 
19
19
 
20
- def parallel_mat_mat(A: "MatrixLike", B: np.ndarray, n_jobs: int = -1) -> np.ndarray:
20
+ def parallel_mat_mat(A: MatrixLike, B: np.ndarray, n_jobs: int = -1) -> np.ndarray:
21
21
  """
22
22
  Computes the matrix product A @ B in parallel by applying A to each column of B.
23
23
 
pygeoinf/random_matrix.py CHANGED
@@ -15,14 +15,10 @@ constructing approximate matrix decompositions" (2011).
15
15
 
16
16
  from typing import Tuple, Union
17
17
 
18
+ import warnings
19
+
18
20
  import numpy as np
19
- from scipy.linalg import (
20
- cho_factor,
21
- solve_triangular,
22
- eigh,
23
- svd,
24
- qr,
25
- )
21
+ from scipy.linalg import cho_factor, solve_triangular, eigh, svd, qr
26
22
  from scipy.sparse.linalg import LinearOperator as ScipyLinOp
27
23
 
28
24
  from .parallel import parallel_mat_mat
@@ -50,6 +46,8 @@ def fixed_rank_random_range(
50
46
  power: The number of power iterations to perform. Power iterations
51
47
  (multiplying by `A*A`) improves the accuracy of the approximation by
52
48
  amplifying the dominant singular values, but adds to the computational cost.
49
+ parallel: Whether to use parallel matrix multiplication.
50
+ n_jobs: Number of jobs for parallelism.
53
51
 
54
52
  Returns:
55
53
  An (m, rank) matrix with orthonormal columns whose span approximates
@@ -87,76 +85,180 @@ def fixed_rank_random_range(
87
85
 
88
86
 
89
87
  def variable_rank_random_range(
90
- matrix: MatrixLike, rank: int, /, *, power: int = 0, rtol: float = 1e-6
88
+ matrix: MatrixLike,
89
+ initial_rank: int,
90
+ /,
91
+ *,
92
+ max_rank: int = None,
93
+ power: int = 0,
94
+ block_size: int = 10,
95
+ rtol: float = 1e-4,
96
+ parallel: bool = False,
97
+ n_jobs: int = -1,
91
98
  ) -> np.ndarray:
92
99
  """
93
- Computes an orthonormal basis for a variable-rank approximation to the
94
- range of a matrix using a randomized method.
100
+ Computes a variable-rank orthonormal basis using a progressive sampling algorithm.
95
101
 
96
- The algorithm adaptively determines the rank required to meet a given
97
- error tolerance.
102
+ The algorithm starts with `initial_rank` samples, checks for convergence,
103
+ and then progressively draws new blocks of random samples until the desired
104
+ tolerance `rtol` is met or `max_rank` is reached.
98
105
 
99
106
  Args:
100
- matrix (matrix-like): An (m, n) matrix or LinearOperator whose range
101
- is to be approximated.
102
- rank (int): The maximum rank for the approximation. The algorithm
103
- may return a basis with a smaller rank.
104
- power (int): Exponent for power iterations. Note: This parameter is
105
- reserved for future functionality and is currently unused.
106
- rtol (float): The relative tolerance for the approximation error, used
107
- to determine the output rank.
107
+ matrix: The (m, n) matrix or LinearOperator.
108
+ initial_rank: The number of vectors to sample initially.
109
+ max_rank: A hard limit on the number of basis vectors. Defaults to min(m, n).
110
+ power: Number of power iterations to improve accuracy on the initial sample.
111
+ rtol: Relative tolerance for determining the output rank.
112
+ block_size: The number of new vectors to sample in each iteration.
113
+ parallel: Whether to use parallel matrix multiplication.
114
+ n_jobs: Number of jobs for parallelism.
108
115
 
109
116
  Returns:
110
- numpy.ndarray: An (m, k) matrix with orthonormal columns, where k <= rank.
111
- Its span approximates the range of the input matrix.
117
+ An (m, k) matrix with orthonormal columns that approximates the matrix's
118
+ range to the given tolerance.
119
+ """
120
+ m, n = matrix.shape
121
+ if max_rank is None:
122
+ max_rank = min(m, n)
112
123
 
113
- Notes:
114
- If the input matrix is a scipy LinearOperator, it must have the
115
- `matvec` method implemented.
124
+ # Initial Sample
125
+ random_matrix = np.random.randn(n, initial_rank)
126
+ if parallel:
127
+ ys = parallel_mat_mat(matrix, random_matrix, n_jobs)
128
+ else:
129
+ ys = matrix @ random_matrix
116
130
 
117
- This method is based on Algorithm 4.5 in Halko et al. 2011.
118
- """
131
+ # Power Iterations on initial sample for a better starting point
132
+ for _ in range(power):
133
+ ys, _ = qr(ys, mode="economic")
134
+ if parallel:
135
+ ys_tilde = parallel_mat_mat(matrix.T, ys, n_jobs)
136
+ ys = parallel_mat_mat(matrix, ys_tilde, n_jobs)
137
+ else:
138
+ ys_tilde = matrix.T @ ys
139
+ ys = matrix @ ys_tilde
119
140
 
120
- m, n = matrix.shape
141
+ # Form the initial basis
142
+ basis_vectors, _ = qr(ys, mode="economic")
143
+
144
+ # Progressively sample and check for convergence
145
+ converged = False
121
146
 
122
- random_vectors = [np.random.randn(n) for _ in range(rank)]
123
- ys = [matrix @ x for x in random_vectors]
124
- basis_vectors = []
147
+ # Dynamically estimate norm for tolerance calculation
148
+ tol = None
125
149
 
126
- def projection(xs: list, y: np.ndarray) -> np.ndarray:
127
- ps = [np.dot(x, y) for x in xs]
128
- for p, x in zip(ps, xs):
129
- y -= p * x
130
- return y
150
+ while basis_vectors.shape[1] < max_rank:
151
+ # Generate a NEW block of random vectors for error checking
152
+ test_vectors = np.random.randn(n, block_size)
153
+ if parallel:
154
+ y_test = parallel_mat_mat(matrix, test_vectors, n_jobs)
155
+ else:
156
+ y_test = matrix @ test_vectors
131
157
 
132
- norm = max(np.linalg.norm(y) for y in ys)
158
+ # Estimate norm for tolerance on the first pass
159
+ if tol is None:
160
+ # Estimate spectral norm from the first block of test vectors.
161
+ # A more stable estimate than from a single vector.
162
+ norm_estimate = np.linalg.norm(y_test) / np.sqrt(block_size)
163
+ tol = rtol * norm_estimate
133
164
 
134
- tol = rtol * norm / (10 * np.sqrt(2 / np.pi))
135
- error = 2 * tol
136
- j = -1
137
- while error > tol:
138
- j += 1
165
+ # Project test vectors onto current basis to find the residual
166
+ residual = y_test - basis_vectors @ (basis_vectors.T @ y_test)
167
+ error = np.linalg.norm(residual, ord=2)
139
168
 
140
- ys[j] = projection(basis_vectors, ys[j])
141
- ys[j] /= np.linalg.norm(ys[j])
142
- basis_vectors.append(ys[j])
169
+ # Check for convergence
170
+ if error < tol:
171
+ converged = True
172
+ break
143
173
 
144
- y = matrix @ np.random.randn(n)
145
- y = projection(basis_vectors, y)
146
- ys.append(y)
174
+ # If not converged, add the new information to the basis
175
+ new_basis, _ = qr(residual, mode="economic")
147
176
 
148
- for i in range(j + 1, j + rank):
149
- p = np.dot(basis_vectors[j], ys[i])
150
- ys[i] -= p * basis_vectors[j]
177
+ # Append new basis vectors, ensuring we don't exceed max_rank
178
+ cols_to_add = min(new_basis.shape[1], max_rank - basis_vectors.shape[1])
179
+ if cols_to_add <= 0:
180
+ break
151
181
 
152
- error = max(np.linalg.norm(ys[i]) for i in range(j + 1, j + rank + 1))
182
+ basis_vectors = np.hstack([basis_vectors, new_basis[:, :cols_to_add]])
153
183
 
154
- if j > min(n, m):
155
- raise RuntimeError("Convergence has failed")
184
+ if not converged and basis_vectors.shape[1] >= max_rank:
185
+ warnings.warn(
186
+ f"Tolerance {rtol} not met before reaching max_rank={max_rank}. "
187
+ "Result may be inaccurate. Consider increasing `max_rank` or `power`.",
188
+ UserWarning,
189
+ )
156
190
 
157
- qr_factor = np.column_stack(basis_vectors)
191
+ return basis_vectors
158
192
 
159
- return qr_factor
193
+
194
+ def random_range(
195
+ matrix: MatrixLike,
196
+ size_estimate: int,
197
+ /,
198
+ *,
199
+ method: str = "variable",
200
+ max_rank: int = None,
201
+ power: int = 2,
202
+ rtol: float = 1e-4,
203
+ block_size: int = 10,
204
+ parallel: bool = False,
205
+ n_jobs: int = -1,
206
+ ) -> np.ndarray:
207
+ """
208
+ A unified wrapper for randomized range finding algorithms.
209
+
210
+ Args:
211
+ matrix: The (m, n) matrix or LinearOperator to analyze.
212
+ size_estimate: For 'fixed' method, the exact target rank. For 'variable'
213
+ method, this is the initial rank to sample.
214
+ method ({'variable', 'fixed'}): The algorithm to use.
215
+ - 'variable': (Default) Progressively samples to find the rank needed
216
+ to meet tolerance `rtol`, stopping at `max_rank`.
217
+ - 'fixed': Returns a basis with exactly `size_estimate` columns.
218
+ max_rank: For 'variable' method, a hard limit on the rank. Ignored if
219
+ method='fixed'. Defaults to min(m, n).
220
+ power: Number of power iterations to improve accuracy.
221
+ rtol: Relative tolerance for the 'variable' method. Ignored if
222
+ method='fixed'.
223
+ block_size: Number of new vectors to sample per iteration in 'variable'
224
+ method. Ignored if method='fixed'.
225
+ parallel: Whether to use parallel matrix multiplication.
226
+ n_jobs: Number of jobs for parallelism.
227
+
228
+ Returns:
229
+ An (m, k) orthonormal matrix approximating the input matrix's range.
230
+
231
+ Raises:
232
+ ValueError: If an unknown method is specified.
233
+ """
234
+ if method == "variable":
235
+ return variable_rank_random_range(
236
+ matrix,
237
+ size_estimate,
238
+ max_rank=max_rank,
239
+ power=power,
240
+ block_size=block_size,
241
+ rtol=rtol,
242
+ parallel=parallel,
243
+ n_jobs=n_jobs,
244
+ )
245
+ elif method == "fixed":
246
+ if any([rtol != 1e-4, block_size != 10, max_rank is not None]):
247
+ warnings.warn(
248
+ "'rtol', 'block_size', and 'max_rank' are ignored when method='fixed'.",
249
+ UserWarning,
250
+ )
251
+ return fixed_rank_random_range(
252
+ matrix,
253
+ rank=size_estimate,
254
+ power=power,
255
+ parallel=parallel,
256
+ n_jobs=n_jobs,
257
+ )
258
+ else:
259
+ raise ValueError(
260
+ f"Unknown method '{method}'. Choose from 'fixed' or 'variable'."
261
+ )
160
262
 
161
263
 
162
264
  def random_svd(
@@ -220,28 +322,66 @@ def random_eig(
220
322
  return qr_factor @ eigenvectors, eigenvalues
221
323
 
222
324
 
223
- def random_cholesky(matrix: MatrixLike, qr_factor: np.ndarray) -> np.ndarray:
325
+ def random_cholesky(
326
+ matrix: MatrixLike, qr_factor: np.ndarray, *, rtol: float = 1e-12
327
+ ) -> np.ndarray:
224
328
  """
225
- Computes an approximate Cholesky factorisation for a symmetric positive-
226
- definite matrix from a low-rank range approximation.
329
+ Computes a robust approximate Cholesky factorization using a fallback strategy.
330
+
331
+ It first attempts a direct Cholesky factorization. If that fails, it falls
332
+ back to a method based on eigendecomposition.
227
333
 
228
334
  Args:
229
- matrix (matrix-like): The original symmetric positive-definite (n, n)
230
- matrix or LinearOperator.
335
+ matrix (matrix-like): The original symmetric (n, n) matrix.
231
336
  qr_factor (numpy.ndarray): An (n, k) orthonormal basis for the
232
337
  approximate range of the matrix.
338
+ rtol (float, optional): A relative tolerance used in the fallback path.
339
+ Any eigenvalue `s` such that `s < rtol * max(eigenvalues)` will be
340
+ treated as zero. Defaults to 1e-12.
233
341
 
234
342
  Returns:
235
343
  numpy.ndarray: The approximate Cholesky factor F, such that A ~= F @ F.T.
236
-
237
- Notes:
238
- Based on Algorithm 5.5 of Halko et al. 2011.
239
344
  """
240
- small_matrix_1 = matrix @ qr_factor
241
- small_matrix_2 = qr_factor.T @ small_matrix_1
242
- factor, lower = cho_factor(small_matrix_2, overwrite_a=True)
243
- identity_operator = np.identity(factor.shape[0])
244
- inverse_factor = solve_triangular(
245
- factor, identity_operator, overwrite_b=True, lower=lower
246
- )
247
- return small_matrix_1 @ inverse_factor
345
+ try:
346
+ # --- Fast Path: Try direct Cholesky factorization ---
347
+ small_matrix_1 = matrix @ qr_factor
348
+ small_matrix_2 = qr_factor.T @ small_matrix_1
349
+
350
+ factor, lower = cho_factor(small_matrix_2, overwrite_a=True)
351
+
352
+ identity_operator = np.identity(factor.shape[0])
353
+ inverse_factor = solve_triangular(
354
+ factor, identity_operator, overwrite_b=True, lower=lower
355
+ )
356
+ return small_matrix_1 @ inverse_factor
357
+
358
+ except np.linalg.LinAlgError:
359
+
360
+ # --- Fallback Path: Eigendecomposition ---
361
+ small_matrix = qr_factor.T @ (matrix @ qr_factor)
362
+ eigenvalues, eigenvectors = eigh(small_matrix, overwrite_a=True)
363
+
364
+ # Determine the threshold based on the largest eigenvalue.
365
+ # eigh returns eigenvalues in ascending order.
366
+ max_eigenvalue = eigenvalues[-1]
367
+
368
+ if max_eigenvalue > 0:
369
+ threshold = rtol * max_eigenvalue
370
+ else:
371
+ # If all eigenvalues are non-positive, all will be set to zero.
372
+ threshold = 0
373
+
374
+ # 2. Apply the threshold to create safe eigenvalues.
375
+ safe_eigenvalues = eigenvalues.copy()
376
+ safe_eigenvalues[eigenvalues < threshold] = 0.0
377
+
378
+ y_matrix = matrix @ qr_factor
379
+ temp_factor = y_matrix @ eigenvectors
380
+
381
+ # Conditionally compute the inverse square root.
382
+ sqrt_s = np.sqrt(safe_eigenvalues)
383
+ sqrt_s_inv = np.where(sqrt_s > 0, np.reciprocal(sqrt_s), 0.0)
384
+
385
+ cholesky_factor = temp_factor * sqrt_s_inv
386
+
387
+ return cholesky_factor
@@ -24,7 +24,7 @@ Key Classes
24
24
 
25
25
  from __future__ import annotations
26
26
 
27
- from typing import Callable, Tuple, Optional
27
+ from typing import Callable, Tuple, Optional, Any
28
28
  import matplotlib.pyplot as plt
29
29
  import numpy as np
30
30
  from scipy.fft import rfft, irfft
@@ -38,7 +38,7 @@ from pygeoinf.hilbert_space import (
38
38
  HilbertModule,
39
39
  MassWeightedHilbertModule,
40
40
  )
41
- from pygeoinf.operators import LinearOperator
41
+ from pygeoinf.linear_operators import LinearOperator
42
42
  from pygeoinf.linear_forms import LinearForm
43
43
  from .symmetric_space import (
44
44
  AbstractInvariantLebesgueSpace,
@@ -336,6 +336,16 @@ class Lebesgue(CircleHelper, HilbertModule, AbstractInvariantLebesgueSpace):
336
336
 
337
337
  return self.kmax == other.kmax and self.radius == other.radius
338
338
 
339
+ def is_element(self, u: Any) -> bool:
340
+ """
341
+ Checks if an object is a valid element of the space.
342
+ """
343
+ if not isinstance(u, np.ndarray):
344
+ return False
345
+ if not u.shape == (self.dim,):
346
+ return False
347
+ return True
348
+
339
349
  def invariant_automorphism_from_index_function(self, g: Callable[[int], float]):
340
350
  """
341
351
  Implements an invariant automorphism of the form f(Δ) using Fourier
@@ -38,7 +38,7 @@ from pygeoinf.hilbert_space import (
38
38
  HilbertModule,
39
39
  MassWeightedHilbertModule,
40
40
  )
41
- from pygeoinf.operators import LinearOperator
41
+ from pygeoinf.linear_operators import LinearOperator
42
42
  from pygeoinf.linear_forms import LinearForm
43
43
  from .symmetric_space import (
44
44
  AbstractInvariantLebesgueSpace,
@@ -448,6 +448,20 @@ class Lebesgue(SphereHelper, HilbertModule, AbstractInvariantLebesgueSpace):
448
448
 
449
449
  return self.lmax == other.lmax and self.radius == other.radius
450
450
 
451
+ def is_element(self, x: Any) -> bool:
452
+ """
453
+ Checks if an object is a valid element of the space.
454
+ """
455
+ if not isinstance(x, sh.SHGrid):
456
+ return False
457
+ if not x.lmax == self.lmax:
458
+ return False
459
+ if not x.grid == self._grid_name():
460
+ return False
461
+ if not x.extend == self.extend:
462
+ return False
463
+ return True
464
+
451
465
  def eigenfunction_norms(self) -> np.ndarray:
452
466
  """Returns a list of the norms of the eigenfunctions."""
453
467
  return np.fromiter(
@@ -36,7 +36,7 @@ import numpy as np
36
36
  from scipy.sparse import diags
37
37
 
38
38
  from pygeoinf.hilbert_space import EuclideanSpace, HilbertSpace
39
- from pygeoinf.operators import LinearOperator
39
+ from pygeoinf.linear_operators import LinearOperator
40
40
  from pygeoinf.linear_forms import LinearForm
41
41
  from pygeoinf.gaussian_measure import GaussianMeasure
42
42
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pygeoinf
3
- Version: 1.2.0
3
+ Version: 1.2.2
4
4
  Summary: A package for solving geophysical inference and inverse problems
5
5
  License: BSD-3-Clause
6
6
  Author: David Al-Attar and Dan Heathcote
@@ -0,0 +1,25 @@
1
+ pygeoinf/__init__.py,sha256=LG49pW8RUK-NxO0KSEHNVG_tBClUK5GFLtjvNLDRnjE,1419
2
+ pygeoinf/backus_gilbert.py,sha256=bXJ0JKh49elNKLm5cGJj_RBh0oXcH3hpR7U-QUFHj8M,3657
3
+ pygeoinf/direct_sum.py,sha256=SuW4OJuMjGme5nNhYTzcrTyo957g0OvNCC3GpQue5Bc,19419
4
+ pygeoinf/forward_problem.py,sha256=iQsTQ4CV4XAqWd48EzhA82NMySGJSQ0_PaEtfG40agw,10529
5
+ pygeoinf/gaussian_measure.py,sha256=EOUyBYT-K9u2ZD_uwPXDv17BJHk-L0RM55jfIR-DmXY,24020
6
+ pygeoinf/hilbert_space.py,sha256=StS2AoTnOFTrh3XRyZ6K9lhQDqJijDaJGMC8RRagoTQ,25247
7
+ pygeoinf/inversion.py,sha256=3FiujTK4PDBPjS0aYdo02nHQjsVFL4GDqv4gvg2YilA,6189
8
+ pygeoinf/linear_bayesian.py,sha256=L1cJkeHtba4fPXZ8CmiLRBtuG2fmzG228M_iEar-iP8,9643
9
+ pygeoinf/linear_forms.py,sha256=sgynBvlQ35CaH12PKU2vWPHh9ikrmQbD5IASCUQtlbw,9197
10
+ pygeoinf/linear_operators.py,sha256=ha6QHKHVBd_MLMNmk8zAoqm_yDM2dClb8C6p13jo7Ik,36333
11
+ pygeoinf/linear_optimisation.py,sha256=sO155SkGg5H1RR-jmULru7R4vlCPjUce--6Z52l3Pks,11147
12
+ pygeoinf/linear_solvers.py,sha256=fPcr4f2mhSK34cHdRXk9LsonQJ_gLhXQYwCYA4O6Jv4,15706
13
+ pygeoinf/nonlinear_forms.py,sha256=eQudA-HfedbURvRmzVvU8HfNCxHTuWUpdDoWe_KlA4Y,7067
14
+ pygeoinf/nonlinear_operators.py,sha256=1FvimPwMxt0h1qOvTTjGabm-2ctDO4bT71LLro-7t68,7069
15
+ pygeoinf/nonlinear_optimisation.py,sha256=xcIJX6Uw6HuJ3OySGXm3cDQ-BVgIVi3jjtOpIHNq8ks,7074
16
+ pygeoinf/parallel.py,sha256=VVFvNHszy4wSa9LuErIsch4NAkLaZezhdN9YpRROBJo,2267
17
+ pygeoinf/random_matrix.py,sha256=afEUFuoVbkFobhC9Jy9SuGb4Yib-fn3pQyiWUqXrA-8,13629
18
+ pygeoinf/symmetric_space/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
+ pygeoinf/symmetric_space/circle.py,sha256=7Bz9BfSkbDnoz5-HFwTsAQE4a09jUapBePwoCK0xYWw,18007
20
+ pygeoinf/symmetric_space/sphere.py,sha256=poasBQEXV5WNSA9LBuCY2lsxv79aV90jKP13FSoQUmU,21950
21
+ pygeoinf/symmetric_space/symmetric_space.py,sha256=Q3KtfCtHO0_8LjsdKtH-5WVhRQurt5Bdk4yx1D2F5YY,17977
22
+ pygeoinf-1.2.2.dist-info/LICENSE,sha256=GrTQnKJemVi69FSbHprq60KN0OJGsOSR-joQoTq-oD8,1501
23
+ pygeoinf-1.2.2.dist-info/METADATA,sha256=avOFENnp8CogJepyHc0BfcyN7wq2PHtpXYCrE0KscQ0,15363
24
+ pygeoinf-1.2.2.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
25
+ pygeoinf-1.2.2.dist-info/RECORD,,