sparse-ir 1.1.6__py3-none-any.whl → 2.0.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sparse_ir/sve.py CHANGED
@@ -1,18 +1,58 @@
1
- # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others
2
- # SPDX-License-Identifier: MIT
3
- from warnings import warn
4
- import numpy as np
1
+ """
2
+ SVE (Singular Value Expansion) functionality for SparseIR.
5
3
 
6
- from . import _gauss
7
- from . import poly
8
- from . import svd
9
- from . import kernel
4
+ This module provides Python wrappers for SVE computation and results.
5
+ """
10
6
 
11
- HAVE_XPREC = svd._ddouble is not None
7
+ import ctypes
8
+ from ctypes import c_int, byref
12
9
 
10
+ from pylibsparseir.core import _lib, sve_result_new, sve_result_get_svals, sve_result_get_size
11
+ from pylibsparseir.constants import COMPUTATION_SUCCESS, SPIR_ORDER_ROW_MAJOR
12
+ from .abstract import AbstractKernel
13
+ from .kernel import LogisticKernel, RegularizedBoseKernel
13
14
 
14
- def compute(K, eps=None, cutoff=None, n_sv=None, n_gauss=None, dtype=float,
15
- work_dtype=None, sve_strat=None, svd_strat=None):
15
+ class SVEResult:
16
+ """
17
+ Result of a singular value expansion (SVE).
18
+
19
+ Contains the singular values and basis functions resulting from
20
+ the SVE of an integral kernel.
21
+ """
22
+
23
+ def __init__(self, kernel: AbstractKernel, epsilon: float):
24
+ """
25
+ Compute SVE of the given kernel.
26
+
27
+ Parameters
28
+ ----------
29
+ kernel : LogisticKernel or RegularizedBoseKernel
30
+ Kernel to compute SVE for
31
+ epsilon : float
32
+ Desired accuracy of the expansion
33
+ """
34
+ if not isinstance(kernel, (LogisticKernel, RegularizedBoseKernel)):
35
+ raise TypeError("kernel must be LogisticKernel or RegularizedBoseKernel")
36
+
37
+ self._kernel = kernel # Store kernel for later use
38
+ self._epsilon = epsilon
39
+
40
+ self._ptr = sve_result_new(kernel._ptr, epsilon)
41
+
42
+ def __len__(self):
43
+ return sve_result_get_size(self._ptr)
44
+
45
+ @property
46
+ def s(self):
47
+ return sve_result_get_svals(self._ptr)
48
+
49
+ def __del__(self):
50
+ """Clean up SVE resources."""
51
+ if hasattr(self, '_ptr') and self._ptr:
52
+ _lib.spir_sve_result_release(self._ptr)
53
+
54
+
55
+ def compute(kernel, epsilon):
16
56
  """Perform truncated singular value expansion of a kernel.
17
57
 
18
58
  Perform a truncated singular value expansion (SVE) of an integral
@@ -30,356 +70,14 @@ def compute(K, eps=None, cutoff=None, n_sv=None, n_gauss=None, dtype=float,
30
70
  using a collocation).
31
71
 
32
72
  Arguments:
33
- K (kernel.AbstractKernel):
73
+ kernel (kernel.AbstractKernel):
34
74
  Integral kernel to take SVE from
35
- eps (float):
75
+ epsilon (float):
36
76
  Accuracy target for the basis: attempt to have singular values down
37
- to a relative magnitude of ``eps``, and have each singular value
38
- and singular vector be accurate to ``eps``. A ``work_dtype`` with
39
- a machine epsilon of ``eps**2`` or lower is required to satisfy
40
- this. Defaults to ``2.2e-16`` if xprec is available, and ``1e-8``
41
- otherwise.
42
- cutoff (float):
43
- Relative cutoff for the singular values. A ``work_dtype`` with
44
- machine epsilon of ``cutoff`` is required to satisfy this.
45
- Defaults to a small multiple of the machine epsilon.
46
-
47
- Note that ``cutoff`` and ``eps`` serve distinct purposes. ``cutoff``
48
- reprsents the accuracy to which the kernel is reproduced, whereas
49
- ``eps`` is the accuracy to which the singular values and vectors
50
- are guaranteed.
51
- n_sv (int):
52
- Maximum basis size. If given, only at most the ``n_sv`` most
53
- significant singular values and associated singular functions are
54
- returned.
55
- n_gauss (int):
56
- Order of Legendre polynomials. Defaults to kernel hinted value.
57
- dtype (np.dtype):
58
- Data type of the result.
59
- work_dtype (np.dtype):
60
- Working data type. Defaults to a data type with machine epsilon of
61
- at most ``eps**2`` and at most ``cutoff``, or otherwise most
62
- accurate data type available.
63
- sve_strat (AbstractSVE):
64
- SVE to SVD translation strategy. Defaults to ``SamplingSVE``,
65
- optionally wrapped inside of a ``CentrosymmSVE`` if the kernel
66
- is centrosymmetric.
67
- svd_strat ('fast' or 'default' or 'accurate'):
68
- SVD solver. Defaults to fast (ID/RRQR) based solution
69
- when accuracy goals are moderate, and more accurate Jacobi-based
70
- algorithm otherwise.
77
+ to a relative magnitude of ``epsilon``, and have each singular value
78
+ and singular vector be accurate to ``epsilon``.
71
79
 
72
80
  Returns:
73
81
  An ``SVEResult`` containing the truncated singular value expansion.
74
82
  """
75
- safe_eps, work_dtype, svd_strat = _safe_eps(eps, work_dtype, svd_strat)
76
- if sve_strat is None:
77
- sve_strat = CentrosymmSVE if K.is_centrosymmetric else SamplingSVE
78
- if cutoff is None:
79
- cutoff = 2 * svd.finfo(work_dtype).eps
80
- sve = sve_strat(K, safe_eps, n_gauss=n_gauss, dtype=work_dtype)
81
- u, s, v = zip(*(svd.compute(matrix, sve.nsvals_hint, svd_strat)
82
- for matrix in sve.matrices))
83
- u, s, v = _truncate(u, s, v, cutoff, n_sv)
84
- return sve.postprocess(u, s, v, dtype)
85
-
86
-
87
- class AbstractSVE:
88
- """Truncated singular value expansion (SVE) of an integral kernel.
89
-
90
- Given an integral kernel `K`, this provides methods for computing its
91
- truncated singular value expansion (SVE), given by::
92
-
93
- K(x, y) == sum(s[l] * u[l](x) * v[l](y) for l in range(L)),
94
-
95
- where `L` is the truncation, `u[l](x)` is the `l`-th left singular
96
- function, `s[l]` is the `l`-th singular value, and `v[l](y)` is the `l`-th
97
- right singular function. The left and right singular functions form
98
- orthonormal systems on their respective spaces.
99
-
100
- Computing the SVE involves introducing two sets of basis functions on the
101
- `x` and `y` axis and then translating the SVE into one or more matrices,
102
- the computing the singular value decomposition of those matrices, and
103
- finally postprocessing the data.
104
- """
105
- @property
106
- def matrices(self):
107
- """SVD problems underlying the SVE."""
108
- raise NotImplementedError()
109
-
110
- def postprocess(self, u, s, v, dtype=None):
111
- """Constructs the SVE result from the SVD"""
112
- raise NotImplementedError()
113
-
114
-
115
- class SVEResult:
116
- """Result of singular value expansion"""
117
- def __init__(self, u, s, v, K, eps=None):
118
- self.u = u
119
- self.s = s
120
- self.v = v
121
-
122
- # In addition to its SVE, we remember the type of kernel and also the
123
- # accuracy to which the SVE was computed.
124
- self.K = K
125
- self.eps = eps
126
-
127
- def part(self, eps=None, max_size=None):
128
- if eps is None:
129
- eps = self.eps
130
- cut = (self.s >= eps * self.s[0]).sum()
131
- if max_size is not None and max_size < cut:
132
- cut = max_size
133
- if cut == self.s.size:
134
- return self.u, self.s, self.v
135
- else:
136
- return self.u[:cut], self.s[:cut], self.v[:cut]
137
-
138
- def __iter__(self):
139
- return iter((self.u, self.s, self.v))
140
-
141
-
142
- class SamplingSVE(AbstractSVE):
143
- """SVE to SVD translation by sampling technique [1].
144
-
145
- Maps the singular value expansion (SVE) of a kernel ``K`` onto the singular
146
- value decomposition of a matrix ``A``. This is achieved by chosing two
147
- sets of Gauss quadrature rules: ``(x, wx)`` and ``(y, wy)`` and
148
- approximating the integrals in the SVE equations by finite sums. This
149
- implies that the singular values of the SVE are well-approximated by the
150
- singular values of the following matrix::
151
-
152
- A[i, j] = sqrt(wx[i]) * K(x[i], y[j]) * sqrt(wy[j])
153
-
154
- and the values of the singular functions at the Gauss sampling points can
155
- be reconstructed from the singular vectors ``u`` and ``v`` as follows::
156
-
157
- u[l,i] ≈ sqrt(wx[i]) u[l](x[i])
158
- v[l,j] ≈ sqrt(wy[j]) u[l](y[j])
159
-
160
- [1] P. Hansen, Discrete Inverse Problems, Ch. 3.1
161
- """
162
- def __init__(self, K, eps, *, n_gauss=None, dtype=float):
163
- self.K = K
164
- sve_hints = K.sve_hints(eps)
165
- if n_gauss is None:
166
- n_gauss = sve_hints.ngauss
167
-
168
- self.eps = eps
169
- self.n_gauss = n_gauss
170
- self.nsvals_hint = sve_hints.nsvals
171
- self._rule = _gauss.legendre(n_gauss, dtype)
172
- self._segs_x = sve_hints.segments_x.astype(dtype)
173
- self._segs_y = sve_hints.segments_y.astype(dtype)
174
- self._gauss_x = self._rule.piecewise(self._segs_x)
175
- self._gauss_y = self._rule.piecewise(self._segs_y)
176
- self._sqrtw_x = np.sqrt(self._gauss_x.w)
177
- self._sqrtw_y = np.sqrt(self._gauss_y.w)
178
-
179
- @property
180
- def matrices(self):
181
- result = kernel.matrix_from_gauss(self.K, self._gauss_x, self._gauss_y)
182
- result *= self._sqrtw_x[:, None]
183
- result *= self._sqrtw_y[None, :]
184
- return result,
185
-
186
- def postprocess(self, u, s, v, dtype=None):
187
- u, = u
188
- s, = s
189
- v, = v
190
- if dtype is None:
191
- dtype = np.result_type(u, s, v)
192
-
193
- s = s.astype(dtype)
194
- u_x = u / self._sqrtw_x[:,None]
195
- v_y = v / self._sqrtw_y[:,None]
196
-
197
- u_x = u_x.reshape(self._segs_x.size - 1, self.n_gauss, s.size)
198
- v_y = v_y.reshape(self._segs_y.size - 1, self.n_gauss, s.size)
199
-
200
- cmat = _gauss.legendre_collocation(self._rule)
201
- # lx,ixs -> ils -> lis
202
- u_data = (cmat @ u_x).transpose(1, 0, 2)
203
- v_data = (cmat @ v_y).transpose(1, 0, 2)
204
-
205
- dsegs_x = self._segs_x[1:] - self._segs_x[:-1]
206
- dsegs_y = self._segs_y[1:] - self._segs_y[:-1]
207
- u_data *= np.sqrt(.5 * dsegs_x)[None,:,None]
208
- v_data *= np.sqrt(.5 * dsegs_y)[None,:,None]
209
-
210
- # Construct polynomial
211
- ulx = poly.PiecewiseLegendrePoly(
212
- u_data.astype(dtype), self._segs_x.astype(dtype))
213
- vly = poly.PiecewiseLegendrePoly(
214
- v_data.astype(dtype), self._segs_y.astype(dtype))
215
- _canonicalize(ulx, vly)
216
- return SVEResult(ulx, s, vly, self.K, self.eps)
217
-
218
-
219
- class CentrosymmSVE(AbstractSVE):
220
- """SVE of centrosymmetric kernel in block-diagonal (even/odd) basis.
221
-
222
- For a centrosymmetric kernel ``K``, i.e., a kernel satisfying:
223
- ``K(x, y) == K(-x, -y)``, one can make the following ansatz for the
224
- singular functions::
225
-
226
- u[l](x) = ured[l](x) + sign[l] * ured[l](-x)
227
- v[l](y) = vred[l](y) + sign[l] * ured[l](-y)
228
-
229
- where ``sign[l]`` is either +1 or -1. This means that the singular value
230
- expansion can be block-diagonalized into an even and an odd part by
231
- (anti-)symmetrizing the kernel::
232
-
233
- Keven = K(x, y) + K(x, -y)
234
- Kodd = K(x, y) - K(x, -y)
235
-
236
- The l-th basis function, restricted to the positive interval, is then
237
- the singular function of one of these kernels. If the kernel generates a
238
- Chebyshev system [1], then even and odd basis functions alternate.
239
-
240
- [1]: A. Karlin, Total Positivity (1968).
241
- """
242
- def __init__(self, K, eps, *, InnerSVE=None, **inner_args):
243
- if InnerSVE is None:
244
- InnerSVE = SamplingSVE
245
- self.K = K
246
- self.eps = eps
247
-
248
- # Inner kernels for even and odd functions
249
- self.even = InnerSVE(K.get_symmetrized(+1), eps, **inner_args)
250
- self.odd = InnerSVE(K.get_symmetrized(-1), eps, **inner_args)
251
-
252
- # Now extract the hints
253
- self.nsvals_hint = max(self.even.nsvals_hint, self.odd.nsvals_hint)
254
-
255
- @property
256
- def matrices(self):
257
- m, = self.even.matrices
258
- yield m
259
- m, = self.odd.matrices
260
- yield m
261
-
262
- def postprocess(self, u, s, v, dtype):
263
- u_even, s_even, v_even = self.even.postprocess(u[:1], s[:1], v[:1], dtype)
264
- u_odd, s_odd, v_odd = self.odd.postprocess(u[1:], s[1:], v[1:], dtype)
265
-
266
- # Merge two sets - data is [legendre, segment, l]
267
- u_data = np.concatenate([u_even.data, u_odd.data], axis=2)
268
- v_data = np.concatenate([v_even.data, v_odd.data], axis=2)
269
- s = np.concatenate([s_even, s_odd])
270
- signs = np.concatenate([np.ones(s_even.size), -np.ones(s_odd.size)])
271
-
272
- # Sort: now for totally positive kernels like defined in this module,
273
- # this strictly speaking is not necessary as we know that the even/odd
274
- # functions intersperse.
275
- sort = s.argsort()[::-1]
276
- u_data = u_data[:, :, sort]
277
- v_data = v_data[:, :, sort]
278
- s = s[sort]
279
- signs = signs[sort]
280
-
281
- # Extend to the negative side
282
- inv_sqrt2 = 1/np.sqrt(np.array(2, dtype=u_data.dtype))
283
- u_data *= inv_sqrt2
284
- v_data *= inv_sqrt2
285
- poly_flip_x = ((-1)**np.arange(u_data.shape[0]))[:, None, None]
286
- u_neg = u_data[:, ::-1, :] * poly_flip_x * signs
287
- v_neg = v_data[:, ::-1, :] * poly_flip_x * signs
288
- u_data = np.concatenate([u_neg, u_data], axis=1)
289
- v_data = np.concatenate([v_neg, v_data], axis=1)
290
-
291
- # TODO: this relies on specific symmetrization behaviour ...
292
- full_hints = self.K.sve_hints(self.eps)
293
- u = poly.PiecewiseLegendrePoly(u_data, full_hints.segments_x, symm=signs)
294
- v = poly.PiecewiseLegendrePoly(v_data, full_hints.segments_y, symm=signs)
295
- return SVEResult(u, s, v, self.K, self.eps)
296
-
297
-
298
- def _safe_eps(eps_required, work_dtype, svd_strat):
299
- # First, choose the working dtype based on the eps required
300
- if work_dtype is None:
301
- if eps_required is None or eps_required < 1e-8:
302
- work_dtype = svd.MAX_DTYPE
303
- else:
304
- work_dtype = np.float64
305
- else:
306
- work_dtype = np.dtype(work_dtype)
307
-
308
- # Next, work out the actual epsilon
309
- if work_dtype == np.float64:
310
- # This is technically a bit too low (the true value is about 1.5e-8),
311
- # but it's not too far off and easier to remember for the user.
312
- safe_eps = 1e-8
313
- else:
314
- safe_eps = np.sqrt(svd.finfo(work_dtype).eps)
315
-
316
- # Work out the SVD strategy to be used. If the user sets this, we
317
- # assume they know what they are doing and do not warn if they compute
318
- # the basis.
319
- warn_acc = False
320
- if svd_strat is None:
321
- if eps_required is not None and eps_required < safe_eps:
322
- svd_strat = 'accurate'
323
- warn_acc = True
324
- else:
325
- svd_strat = 'fast'
326
-
327
- if warn_acc:
328
- msg = (f"\n"
329
- f"Requested accuracy is {eps_required:.2g}, which is below the\n"
330
- f"accuracy {safe_eps:.2g} for the work data type {work_dtype}.\n"
331
- f"Expect singular values and basis functions for large l to\n"
332
- f"have lower precision than the cutoff.\n")
333
- if not HAVE_XPREC:
334
- msg += "Install the xprec package to gain more precision.\n"
335
- warn(msg, UserWarning, 3)
336
-
337
- return safe_eps, work_dtype, svd_strat
338
-
339
-
340
- def _canonicalize(ulx, vly):
341
- """Canonicalize basis.
342
-
343
- Each SVD (u_l, v_l) pair is unique only up to a global phase, which may
344
- differ from implementation to implementation and also platform. We
345
- fix that gauge by demanding u_l(1) > 0. This ensures a diffeomorphic
346
- connection to the Legendre polynomials for lambda_ -> 0.
347
- """
348
- gauge = np.sign(ulx(1))
349
- ulx.data[None, None, :] *= 1/gauge
350
- vly.data[None, None, :] *= gauge
351
-
352
-
353
- def _truncate(u, s, v, rtol=0, lmax=None):
354
- """Truncate singular value expansion.
355
-
356
- Arguments:
357
-
358
- - ``u``, ``s``, ``v``: Thin singular value expansion
359
- - ``rtol`` : If given, only singular values satisfying
360
- ``s[l]/s[0] > rtol`` are retained.
361
- - ``lmax`` : If given, at most the ``lmax`` most significant singular
362
- values are retained.
363
- """
364
- if lmax is not None and (lmax < 0 or int(lmax) != lmax):
365
- raise ValueError("invalid value of maximum number of singular values")
366
- if not (0 <= rtol <= 1):
367
- raise ValueError("invalid relative tolerance")
368
-
369
- sall = np.hstack(s)
370
-
371
- # Determine singular value cutoff. Note that by selecting a cutoff even
372
- # in the case of lmax, we make sure to never remove parts of a degenerate
373
- # singular value space, rather, we reduce the size of the basis.
374
- ssort = np.sort(sall)
375
- cutoff = rtol * ssort[-1]
376
- if lmax is not None and lmax < sall.size:
377
- cutoff = max(cutoff, ssort[sall.size - lmax - 1])
378
-
379
- # Determine how many singular values survive in each group
380
- scount = [(si > cutoff).sum() for si in s]
381
-
382
- u_cut = [ui[:, :counti] for (ui, counti) in zip(u, scount)]
383
- s_cut = [si[:counti] for (si, counti) in zip(s, scount)]
384
- v_cut = [vi[:, :counti] for (vi, counti) in zip(v, scount)]
385
- return u_cut, s_cut, v_cut
83
+ return SVEResult(kernel, epsilon)
@@ -0,0 +1,23 @@
1
+ Metadata-Version: 2.4
2
+ Name: sparse-ir
3
+ Version: 2.0.0a2
4
+ Summary: Python bindings for the libsparseir library, providing efficient sparse intermediate representation for many-body physics calculations
5
+ Author: SpM-lab
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/SpM-lab/sparse-r
8
+ Project-URL: Repository, https://github.com/SpM-lab/sparse-r
9
+ Project-URL: Bug Tracker, https://github.com/SpM-lab/sparse-r/issues
10
+ Keywords: physics,many-body,green-functions,sparse-ir
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Topic :: Scientific/Engineering :: Physics
16
+ Requires-Python: >=3.10
17
+ Description-Content-Type: text/markdown
18
+ License-File: LICENSE.txt
19
+ Requires-Dist: numpy
20
+ Requires-Dist: scipy
21
+ Requires-Dist: pylibsparseir<0.2.0,>=0.1.0
22
+ Requires-Dist: sphinx-rtd-theme>=3.0.2
23
+ Dynamic: license-file
@@ -0,0 +1,16 @@
1
+ sparse_ir/__init__.py,sha256=9Nm4eII2S027rAVH0KRoamwoXRRGOP04YbJeO96dArc,1190
2
+ sparse_ir/_util.py,sha256=93s7tGcXxz3mwHsBQmAh-6EQ26xLbUeFHVtlnzQxRps,3004
3
+ sparse_ir/abstract.py,sha256=6W8x8f0eBoc3s3EZ83fZzo2Ygoc9NZiBfnN2af2DW_M,5590
4
+ sparse_ir/augment.py,sha256=RqeH_l_INn3i7_r1PRP3Ef6gZnR0QqmdHujv-U9hTfI,10514
5
+ sparse_ir/basis.py,sha256=A-ggn878zlkrvDijK14V2mkSLwqhLp3O-cnFF5bvOIk,8519
6
+ sparse_ir/basis_set.py,sha256=xP7VCzP2a9kuK7X-9OCUWYL-ikbylD4gMJF1DfqQLlw,4725
7
+ sparse_ir/dlr.py,sha256=XHUG_cmRWC3kDHruozQ-3X4Md44TpUXX1YeVVw-e27M,7107
8
+ sparse_ir/kernel.py,sha256=4MjdDwWB8jK8Mrtj08W52rCO2VEVjBl7lZIa2lib3rA,3129
9
+ sparse_ir/poly.py,sha256=-KPZN2PaKsPz0CjibdFsey2dhe4GOy9SUy5ho14fY2E,9038
10
+ sparse_ir/sampling.py,sha256=8bRtkytmx2jsCKNv0XtV97_2JX0TBwUB-tckutsthnw,11176
11
+ sparse_ir/sve.py,sha256=Usv5xZM6EOce9sIKvVICiZ9guS44pWk7TChmU30QjH4,2862
12
+ sparse_ir-2.0.0a2.dist-info/licenses/LICENSE.txt,sha256=3tGlA0QNYsfjETaQqJO0Ixne5PQ16PNVJiDcGsgHER0,1079
13
+ sparse_ir-2.0.0a2.dist-info/METADATA,sha256=8MRdXwEHMqq0jeI3h96ERFzA82dbWNIvSkwWBsQRg6c,966
14
+ sparse_ir-2.0.0a2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
+ sparse_ir-2.0.0a2.dist-info/top_level.txt,sha256=UsscWAzJg7fKo9qmIwW8jnG7CAfhFzWYBOTXVySzuA0,10
16
+ sparse_ir-2.0.0a2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.42.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5