sparse-ir 1.1.6__py3-none-any.whl → 2.0.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sparse_ir/sampling.py CHANGED
@@ -1,157 +1,168 @@
1
- # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others
2
- # SPDX-License-Identifier: MIT
3
- import numpy as np
4
- from warnings import warn
5
-
6
- from . import _util
7
-
8
-
9
- class AbstractSampling:
10
- """Base class for sparse sampling.
1
+ """
2
+ High-level Python classes for sparse sampling
3
+ """
11
4
 
12
- Encodes the "basis transformation" of a propagator from the truncated IR
13
- basis coefficients ``G_ir[l]`` to time/frequency sampled on sparse points
14
- ``G(x[i])`` together with its inverse, a least squares fit::
5
+ import numpy as np
6
+ from ctypes import POINTER, c_double, c_int, byref
7
+ from pylibsparseir.core import c_double_complex, tau_sampling_new, tau_sampling_new_with_matrix, matsubara_sampling_new, matsubara_sampling_new_with_matrix, _lib
8
+ from pylibsparseir.constants import COMPUTATION_SUCCESS, SPIR_ORDER_ROW_MAJOR
9
+ from . import augment
15
10
 
16
- ________________ ___________________
17
- | | evaluate | |
18
- | Basis |---------------->| Value on |
19
- | coefficients |<----------------| sampling points |
20
- |________________| fit |___________________|
11
+ class TauSampling:
12
+ """Sparse sampling in imaginary time.
21
13
 
14
+ Allows the transformation between the IR basis and a set of sampling points
15
+ in (scaled/unscaled) imaginary time.
22
16
  """
23
- def evaluate(self, al, axis=None, *, points=None):
24
- """Evaluate the basis coefficients at sampling points.
25
-
26
- Arguments:
27
- al (array):
28
- Array where the `l`-th item along `axis` corresponds to the
29
- `l`-th basis coefficient
30
- axis (integer):
31
- Axis or dimension of `al` along which to evaluate the function.
32
- Defaults to the last, i.e., rightmost axis.
33
- points (vector):
34
- Points on which the results should be evaluated. Defaults
35
- to the sampling points for which the sampling objects was
36
- created.
37
-
38
- .. versionadded:: 1.1
39
-
40
- Return:
41
- Array where the `n`-th item along `axis` corresponds to the
42
- value on the `n`-th sampling point (or value on `point[n]`, if
43
- given.)
44
-
45
- Note:
46
- If `points` is given, a new sampling is created at each invocation,
47
- which can result in a performance hit. Consider caching sampling
48
- objects or simply using the `.u()` and `.uhat()` methods of the
49
- underlying basis.
17
+
18
+ def __init__(self, basis, sampling_points=None, use_positive_taus=False):
50
19
  """
51
- if points is not None:
52
- return self._for_sampling_points(points).evaluate(al, axis)
53
-
54
- return self.matrix.matmul(al, axis)
55
-
56
- def fit(self, ax, axis=None, *, points=None):
57
- """Fit the basis coefficients from the sampling points.
58
-
59
- Arguments:
60
- ax (array):
61
- Array where the `n`-th item along `axis` corresponds to the
62
- value on the `n`-th sampling point (or value on `point[n]`, if
63
- given.)
64
- axis (integer):
65
- Axis or dimension of `ax` along which to fit the function.
66
- Defaults to the last, i.e., rightmost axis.
67
- points (vector):
68
- Points on which the `ax` is given. Defaults to the sampling
69
- points for which the sampling objects was created.
70
-
71
- .. versionadded:: 1.1
72
-
73
- Return:
74
- Array where the `l`-th item along `axis` corresponds to the
75
- `l`-th basis coefficient
76
-
77
- Note:
78
- If `points` is given, a new sampling is created at each invocation,
79
- which can result in a performance hit. Consider caching sampling
80
- objects.
20
+ Initialize tau sampling.
21
+
22
+ Parameters:
23
+ -----------
24
+ basis : FiniteTempBasis
25
+ Finite temperature basis
26
+ sampling_points : array_like, optional
27
+ Tau sampling points. If None, use default.
28
+ use_positive_taus : bool, optional
29
+ If `use_positive_taus=False`, the sampling points are within
30
+ the range [-β/2, β/2] and the distribution is symmetric.
31
+ If `use_positive_taus=True`, the sampling points are
32
+ folded to the positive tau domain [0, β), which is
33
+ the default behavior of sparseir 1.x.x.
81
34
  """
82
- if points is not None:
83
- return self._for_sampling_points(points).fit(ax, axis)
84
-
85
- matrix = self.matrix
86
- if self.basis.is_well_conditioned and not (matrix.cond <= 1e8):
87
- warn(f"Sampling matrix is poorly conditioned "
88
- f"(kappa = {matrix.cond:.2g})", ConditioningWarning)
89
-
90
- return matrix.lstsq(ax, axis)
91
-
92
- @property
93
- def cond(self):
94
- """Condition number of the fitting problem"""
95
- return self.matrix.cond
35
+ self.basis = basis
96
36
 
97
- @property
98
- def sampling_points(self):
99
- """Set of sampling points"""
100
- raise NotImplementedError()
37
+ if sampling_points is None:
38
+ self.sampling_points = basis.default_tau_sampling_points()
39
+ if use_positive_taus:
40
+ self.sampling_points = np.mod(self.sampling_points, basis.beta)
41
+ else:
42
+ self.sampling_points = np.asarray(sampling_points, dtype=np.float64)
101
43
 
102
- @property
103
- def matrix(self):
104
- """Evaluation matrix is decomposed form"""
105
- raise NotImplementedError()
44
+ self.sampling_points = np.sort(self.sampling_points)
45
+ if isinstance(basis, augment.AugmentedBasis):
46
+ # Create sampling object
47
+ matrix = basis.u(self.sampling_points).T
48
+ self._ptr = tau_sampling_new_with_matrix(basis, basis.statistics, self.sampling_points, matrix)
49
+ else:
50
+ # Create sampling object
51
+ self._ptr = tau_sampling_new(basis._ptr, self.sampling_points)
106
52
 
107
53
  @property
108
- def basis(self):
109
- """Basis instance"""
110
- raise NotImplementedError()
54
+ def tau(self):
55
+ """Tau sampling points."""
56
+ return self.sampling_points
111
57
 
112
- def _for_sampling_points(self, x):
113
- raise RuntimeError("Changing sampling points is not possible")
58
+ def evaluate(self, al, axis=0):
59
+ """
60
+ Transform basis coefficients to sampling points.
61
+
62
+ Parameters:
63
+ -----------
64
+ al : array_like
65
+ Basis coefficients
66
+ axis : int, optional
67
+ Axis along which to transform
68
+
69
+ Returns:
70
+ --------
71
+ ndarray
72
+ Values at sampling points
73
+ """
74
+ al = np.ascontiguousarray(al)
75
+ output_dims = list(al.shape)
76
+ ndim = len(output_dims)
77
+ input_dims = np.asarray(al.shape, dtype=np.int32)
78
+ output_dims[axis] = len(self.sampling_points)
79
+ if al.dtype.kind == "f":
80
+ output = np.zeros(output_dims, dtype=np.float64)
81
+
82
+ status = _lib.spir_sampling_eval_dd(
83
+ self._ptr,
84
+ SPIR_ORDER_ROW_MAJOR,
85
+ ndim,
86
+ input_dims.ctypes.data_as(POINTER(c_int)),
87
+ axis,
88
+ al.ctypes.data_as(POINTER(c_double)),
89
+ output.ctypes.data_as(POINTER(c_double))
90
+ )
91
+ elif al.dtype.kind == "c":
92
+ output = np.zeros(output_dims, dtype=c_double_complex)
93
+
94
+ status = _lib.spir_sampling_eval_zz(
95
+ self._ptr,
96
+ SPIR_ORDER_ROW_MAJOR,
97
+ ndim,
98
+ input_dims.ctypes.data_as(POINTER(c_int)),
99
+ axis,
100
+ al.ctypes.data_as(POINTER(c_double_complex)),
101
+ output.ctypes.data_as(POINTER(c_double_complex))
102
+ )
103
+ output = output['real'] + 1j * output['imag']
104
+ else:
105
+ raise ValueError(f"Unsupported dtype: {al.dtype}")
114
106
 
107
+ if status != COMPUTATION_SUCCESS:
108
+ raise RuntimeError(f"Failed to evaluate sampling: {status}")
115
109
 
116
- class TauSampling(AbstractSampling):
117
- """Sparse sampling in imaginary time.
110
+ return output
118
111
 
119
- Allows the transformation between the IR basis and a set of sampling points
120
- in (scaled/unscaled) imaginary time.
121
- """
122
- def __init__(self, basis, sampling_points=None):
123
- if sampling_points is None:
124
- sampling_points = basis.default_tau_sampling_points()
112
+ def fit(self, ax, axis=0):
113
+ """
114
+ Fit basis coefficients from sampling point values.
115
+ """
116
+ ax = np.ascontiguousarray(ax)
117
+ ndim = len(ax.shape)
118
+ input_dims = np.asarray(ax.shape, dtype=np.int32)
119
+ output_dims = list(ax.shape)
120
+ output_dims[axis] = self.basis.size
121
+ if ax.dtype.kind == "f":
122
+ output = np.zeros(output_dims, dtype=np.float64)
123
+ status = _lib.spir_sampling_fit_dd(
124
+ self._ptr,
125
+ SPIR_ORDER_ROW_MAJOR,
126
+ ndim,
127
+ input_dims.ctypes.data_as(POINTER(c_int)),
128
+ axis,
129
+ ax.ctypes.data_as(POINTER(c_double)),
130
+ output.ctypes.data_as(POINTER(c_double))
131
+ )
132
+ elif ax.dtype.kind == "c":
133
+ output = np.zeros(output_dims, dtype=c_double_complex)
134
+ status = _lib.spir_sampling_fit_zz(
135
+ self._ptr,
136
+ SPIR_ORDER_ROW_MAJOR,
137
+ ndim,
138
+ input_dims.ctypes.data_as(POINTER(c_int)),
139
+ axis,
140
+ ax.ctypes.data_as(POINTER(c_double_complex)),
141
+ output.ctypes.data_as(POINTER(c_double_complex))
142
+ )
143
+ output = output['real'] + 1j * output['imag']
125
144
  else:
126
- sampling_points = np.asarray(sampling_points)
127
- if sampling_points.ndim != 1:
128
- raise ValueError("sampling points must be vector")
145
+ raise ValueError(f"Unsupported dtype: {ax.dtype}")
129
146
 
130
- matrix = basis.u(sampling_points).T
131
- self._basis = basis
132
- self._sampling_points = sampling_points
133
- self._matrix = DecomposedMatrix(matrix)
147
+ if status != COMPUTATION_SUCCESS:
148
+ raise RuntimeError(f"Failed to fit sampling: {status}")
134
149
 
135
- @property
136
- def basis(self): return self._basis
137
-
138
- @property
139
- def sampling_points(self): return self._sampling_points
150
+ return output
140
151
 
141
152
  @property
142
- def matrix(self): return self._matrix
143
-
144
- @property
145
- def tau(self):
146
- """Sampling points in (reduced) imaginary time"""
147
- return self._sampling_points
153
+ def cond(self):
154
+ """Condition number of the sampling matrix."""
155
+ cond = c_double()
156
+ status = _lib.spir_sampling_get_cond_num(self._ptr, byref(cond))
157
+ if status != COMPUTATION_SUCCESS:
158
+ raise RuntimeError(f"Failed to get condition number: {status}")
159
+ return cond.value
148
160
 
149
- def _for_sampling_points(self, x):
150
- x = np.asarray(x)
151
- return TauSampling(self._basis, x)
161
+ def __repr__(self):
162
+ return f"TauSampling(n_points={len(self.sampling_points)})"
152
163
 
153
164
 
154
- class MatsubaraSampling(AbstractSampling):
165
+ class MatsubaraSampling:
155
166
  """Sparse sampling in Matsubara frequencies.
156
167
 
157
168
  Allows the transformation between the IR basis and a set of sampling points
@@ -166,256 +177,134 @@ class MatsubaraSampling(AbstractSampling):
166
177
  case, sparse sampling is performed over non-negative frequencies only,
167
178
  cutting away half of the necessary sampling space.
168
179
  """
169
- def __init__(self, basis, sampling_points=None, *, positive_only=False):
180
+
181
+ def __init__(self, basis, sampling_points=None, positive_only=False):
182
+ """
183
+ Initialize Matsubara sampling.
184
+
185
+ Parameters:
186
+ -----------
187
+ basis : FiniteTempBasis
188
+ Finite temperature basis
189
+ sampling_points : array_like, optional
190
+ Matsubara frequency indices. If None, use default.
191
+ positive_only : bool, optional
192
+ If True, use only positive frequencies
193
+ """
194
+ self.basis = basis
195
+ self.positive_only = positive_only
196
+
170
197
  if sampling_points is None:
171
- sampling_points = basis.default_matsubara_sampling_points(
172
- positive_only=positive_only)
198
+ self.sampling_points = basis.default_matsubara_sampling_points(positive_only=positive_only)
173
199
  else:
174
- sampling_points = _util.check_reduced_matsubara(sampling_points)
175
- if sampling_points.ndim != 1:
176
- raise ValueError("sampling points must be vector")
177
- sampling_points = np.sort(sampling_points)
178
- if positive_only and not sampling_points[0] >= 0:
179
- raise ValueError("invalid negative sampling frequencies")
180
-
181
- matrix = basis.uhat(sampling_points).T
182
- self._basis = basis
183
- self._sampling_points = sampling_points
184
- self._positive_only = positive_only
185
-
186
- if positive_only:
187
- ssvd_result = _split_complex(matrix, sampling_points[0] == 0)
188
- self._matrix = SplitDecomposedMatrix(matrix, ssvd_result)
200
+ self.sampling_points = np.asarray(sampling_points, dtype=np.int64)
201
+
202
+ if isinstance(basis, augment.AugmentedBasis):
203
+ # Create sampling object
204
+ matrix = basis.uhat(self.sampling_points).T
205
+ matrix = np.ascontiguousarray(matrix, dtype=np.complex128)
206
+
207
+ self._ptr = matsubara_sampling_new_with_matrix(
208
+ basis.statistics,
209
+ basis.size,
210
+ positive_only,
211
+ self.sampling_points,
212
+ matrix
213
+ )
189
214
  else:
190
- self._matrix = DecomposedMatrix(matrix)
191
-
192
- @property
193
- def basis(self): return self._basis
194
-
195
- @property
196
- def sampling_points(self): return self._sampling_points
197
-
198
- @property
199
- def matrix(self): return self._matrix
200
-
201
- @property
202
- def positive_only(self):
203
- """Sampling is performed only on non-negative sampling frequencies"""
204
- return self._positive_only
215
+ # Create sampling object
216
+ self._ptr = matsubara_sampling_new(basis._ptr, positive_only, self.sampling_points)
205
217
 
206
218
  @property
207
219
  def wn(self):
208
- """Sampling points as (reduced) Matsubara frequencies"""
209
- return self._sampling_points
210
-
211
- def _for_sampling_points(self, x):
212
- x = np.asarray(x)
213
- return MatsubaraSampling(self._basis, x,
214
- positive_only=self._positive_only)
215
-
220
+ """Matsubara frequency indices."""
221
+ return self.sampling_points
216
222
 
217
- class DecomposedMatrix:
218
- """Matrix in SVD decomposed form for fast and accurate fitting.
219
-
220
- Stores a matrix ``A`` together with its thin SVD form::
221
-
222
- A == (u * s) @ vH.
223
-
224
- This allows for fast and accurate least squares fits using ``A.lstsq(x)``.
225
- """
226
- def __init__(self, a, svd_result=None):
227
- a = np.asarray(a)
228
- if a.ndim != 2:
229
- raise ValueError("a must be of matrix form")
230
- if svd_result is None:
231
- u, s, vH = np.linalg.svd(a, full_matrices=False)
223
+ def evaluate(self, al, axis=0):
224
+ """
225
+ Transform basis coefficients to sampling points.
226
+
227
+ Parameters:
228
+ -----------
229
+ al : array_like
230
+ Basis coefficients
231
+ axis : int, optional
232
+ Axis along which to transform
233
+
234
+ Returns:
235
+ --------
236
+ ndarray
237
+ Values at Matsubara frequencies (complex)
238
+ """
239
+ # For better numerical stability, we need to make the input array contiguous.
240
+ al = np.ascontiguousarray(al)
241
+ output_dims = list(al.shape)
242
+ ndim = len(output_dims)
243
+ input_dims = np.asarray(al.shape, dtype=np.int32)
244
+ output_dims[axis] = len(self.sampling_points)
245
+ output_cdouble_complex = np.zeros(output_dims, dtype=c_double_complex)
246
+ if al.dtype.kind == "f":
247
+ status = _lib.spir_sampling_eval_dz(
248
+ self._ptr,
249
+ SPIR_ORDER_ROW_MAJOR,
250
+ ndim,
251
+ input_dims.ctypes.data_as(POINTER(c_int)),
252
+ axis,
253
+ al.ctypes.data_as(POINTER(c_double)),
254
+ output_cdouble_complex.ctypes.data_as(POINTER(c_double_complex))
255
+ )
256
+ output = output_cdouble_complex['real'] + 1j * output_cdouble_complex['imag']
257
+ elif al.dtype.kind == "c":
258
+ status = _lib.spir_sampling_eval_zz(
259
+ self._ptr,
260
+ SPIR_ORDER_ROW_MAJOR,
261
+ ndim,
262
+ input_dims.ctypes.data_as(POINTER(c_int)),
263
+ axis,
264
+ al.ctypes.data_as(POINTER(c_double_complex)),
265
+ output_cdouble_complex.ctypes.data_as(POINTER(c_double_complex))
266
+ )
267
+ output = output_cdouble_complex['real'] + 1j * output_cdouble_complex['imag']
232
268
  else:
233
- u, s, vH = _util.check_svd_result(svd_result, a.shape)
234
-
235
- # Remove singular values which are exactly zero
236
- where = s.astype(bool)
237
- if not where.all():
238
- u, s, vH = u[:, where], s[where], vH[where]
239
-
240
- self._a = a
241
- self._uH = np.array(u.conj().T)
242
- self._s = s
243
- self._v = np.array(vH.conj().T)
244
-
245
- def __matmul__(self, x):
246
- """Matrix-matrix multiplication."""
247
- return self._a @ x
248
-
249
- def matmul(self, x, axis=None):
250
- """Compute ``A @ x`` (optionally along specified axis of x)"""
251
- return _matop_along_axis(self._a.__matmul__, x, axis)
252
-
253
- def _lstsq(self, x):
254
- r = self._uH @ x
255
- r = r / (self._s[:, None] if r.ndim > 1 else self._s)
256
- return self._v @ r
257
-
258
- def lstsq(self, x, axis=None):
259
- """Return ``y`` such that ``np.linalg.norm(A @ y - x)`` is minimal"""
260
- return _matop_along_axis(self._lstsq, x, axis)
261
-
262
- def __array__(self, dtype=""):
263
- """Convert to numpy array."""
264
- return self._a if dtype == "" else self._a.astype(dtype)
265
-
266
- @property
267
- def a(self):
268
- """Full matrix"""
269
- return self._a
270
-
271
- @property
272
- def u(self):
273
- """Left singular vectors, aranged column-wise"""
274
- return self._uH.conj().T
275
-
276
- @property
277
- def s(self):
278
- """Most significant, nonzero singular values"""
279
- return self._s
280
-
281
- @property
282
- def vH(self):
283
- """Right singular vectors, transposed"""
284
- return self._v.conj().T
285
-
286
- @property
287
- def cond(self):
288
- """Condition number of matrix"""
289
- return self._s[0] / self._s[-1]
290
-
291
-
292
- class SplitDecomposedMatrix:
293
- """Matrix in "split" SVD decomposed form for fast and accurate fitting.
269
+ raise ValueError(f"Unsupported dtype: {al.dtype}")
294
270
 
295
- Stores a matrix ``A`` together with its "split SVD" form::
271
+ if status != COMPUTATION_SUCCESS:
272
+ raise RuntimeError(f"Failed to evaluate sampling: {status}")
296
273
 
297
- A == u * s @ vT
274
+ return output
298
275
 
299
- where `vT` is a real matrix and `u` is a complex matrix. The "split" SVD
300
- form differs from the SVD in that the least squares fit has to be
301
- constructed as follows:
302
-
303
- fit(A, x) == vT.T / s @ (u.conj().T * x).real
304
-
305
- This again allows for fast and accurate least squares fits using
306
- ``A.lstsq(x)``. This is useful in the case where.
307
- """
308
- def __init__(self, a, ssvd_result):
309
- a = np.asarray(a)
310
- if a.ndim != 2:
311
- raise ValueError("a must be of matrix form")
312
-
313
- u, s, vT = _util.check_svd_result(ssvd_result, a.shape)
314
- if np.issubdtype(vT.dtype, np.complexfloating):
315
- raise ValueError("Split SVD part vT shall be a real matrix")
316
- if not np.issubdtype(u.dtype, np.complexfloating):
317
- raise ValueError("Split SVD part u shall be a complex matrix")
318
-
319
- # Remove singular values which are exactly zero
320
- where = s.astype(bool)
321
- if not where.all():
322
- u, s, vT = u[:, where], s[where], vT[where]
323
-
324
- self._a = a
325
- self._urealT = np.array(u.T.real)
326
- self._uimagT = np.array(u.T.imag)
327
- self._s = s
328
- self._v = np.array(vT.T)
329
-
330
- def __matmul__(self, x):
331
- """Matrix-matrix multiplication."""
332
- x = np.asarray(x)
333
- if np.issubdtype(x.dtype, np.complexfloating):
334
- warn("Expecting array of real numbers in expansion", UserWarning, 2)
335
- return self._a @ x
336
-
337
- def matmul(self, x, axis=None):
338
- """Compute ``A @ x`` (optionally along specified axis of x)"""
339
- return _matop_along_axis(self._a.__matmul__, x, axis)
340
-
341
- def _lstsq(self, x):
342
- r = self._urealT @ x.real
343
- r += self._uimagT @ x.imag
344
- r = r / (self._s[:, None] if r.ndim > 1 else self._s)
345
- return self._v @ r
346
-
347
- def lstsq(self, x, axis=None):
348
- """Return ``y`` such that ``np.linalg.norm(A @ y - x)`` is minimal"""
349
- return _matop_along_axis(self._lstsq, x, axis)
350
-
351
- def __array__(self, dtype=""):
352
- """Convert to numpy array."""
353
- return self._a if dtype == "" else self._a.astype(dtype)
354
-
355
- @property
356
- def a(self):
357
- """Full matrix"""
358
- return self._a
359
-
360
- @property
361
- def u(self):
362
- """Split left singular vectors, aranged column-wise"""
363
- return (self._urealT + 1j * self._uimagT).T
364
-
365
- @property
366
- def s(self):
367
- """Most significant, nonzero singular values"""
368
- return self._s
369
-
370
- @property
371
- def vH(self):
372
- """Right singular vectors, transposed"""
373
- return self._v.conj().T
276
+ def fit(self, ax, axis=0):
277
+ """
278
+ Fit basis coefficients from Matsubara frequency values.
279
+ """
280
+ ax = np.ascontiguousarray(ax)
281
+ ndim = len(ax.shape)
282
+ input_dims = np.asarray(ax.shape, dtype=np.int32)
283
+ output_dims = list(ax.shape)
284
+ output_dims[axis] = self.basis.size
285
+ output = np.zeros(output_dims, dtype=c_double_complex)
286
+
287
+ status = _lib.spir_sampling_fit_zz(
288
+ self._ptr,
289
+ SPIR_ORDER_ROW_MAJOR,
290
+ ndim,
291
+ input_dims.ctypes.data_as(POINTER(c_int)),
292
+ axis,
293
+ ax.ctypes.data_as(POINTER(c_double_complex)),
294
+ output.ctypes.data_as(POINTER(c_double_complex))
295
+ )
296
+ if status != COMPUTATION_SUCCESS:
297
+ raise RuntimeError(f"Failed to fit sampling: {status}")
298
+ return output['real']
374
299
 
375
300
  @property
376
301
  def cond(self):
377
- """Condition number of matrix"""
378
- return self._s[0] / self._s[-1]
379
-
380
-
381
- class ConditioningWarning(RuntimeWarning):
382
- """Warns about a poorly conditioned problem.
383
-
384
- This warning is issued if the library detects a poorly conditioned fitting
385
- problem. This essentially means there is a high degree of ambiguity in how
386
- to choose the solution. One must therefore expect to lose significant
387
- precision in the parameter values.
388
- """
389
- pass
390
-
391
-
392
- def _matop_along_axis(op, x, axis=None):
393
- if axis is None:
394
- return op(x)
395
-
396
- x = np.asarray(x)
397
- target_axis = max(x.ndim - 2, 0)
398
- x = np.moveaxis(x, axis, target_axis)
399
- r = op(x)
400
- return np.moveaxis(r, target_axis, axis)
401
-
402
-
403
- def _split_complex(mat, has_zero=False, svd_algo=np.linalg.svd):
404
- mat = np.asarray(mat)
405
- n, _l = mat.shape
406
- if not np.issubdtype(mat.dtype, np.complexfloating):
407
- raise ValueError("mat must be complex matrix")
408
-
409
- # split real and imaginary part into separate matrices
410
- offset_imag = 1 if has_zero else 0
411
- rmat = np.vstack((mat.real, mat[offset_imag:].imag))
412
-
413
- # perform real-valued SVD
414
- ur, s, vT = svd_algo(rmat, full_matrices=False)
415
-
416
- # undo the split of the resulting vT vector
417
- u = np.empty((n, s.size), mat.dtype)
418
- u.real = ur[:n]
419
- u[:offset_imag].imag = 0
420
- u[offset_imag:].imag = ur[n:]
421
- return u, s, vT
302
+ """Condition number of the sampling matrix."""
303
+ cond = c_double()
304
+ status = _lib.spir_sampling_get_cond_num(self._ptr, byref(cond))
305
+ if status != COMPUTATION_SUCCESS:
306
+ raise RuntimeError(f"Failed to get condition number: {status}")
307
+ return cond.value
308
+
309
+ def __repr__(self):
310
+ return f"MatsubaraSampling(n_points={len(self.sampling_points)}, positive_only={self.positive_only})"