sparse-ir 1.1.7__py3-none-any.whl → 2.0.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sparse_ir/_gauss.py DELETED
@@ -1,260 +0,0 @@
1
- # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others
2
- # SPDX-License-Identifier: MIT
3
- import warnings
4
- import numpy as np
5
-
6
- import scipy.linalg as sp_linalg
7
- import numpy.polynomial.legendre as np_legendre
8
-
9
-
10
- class Rule:
11
- """Quadrature rule.
12
-
13
- Approximation of an integral by a weighted sum over discrete points:
14
-
15
- ∫ f(x) * omega(x) * dx ~ sum(f(xi) * wi for (xi, wi) in zip(x, w))
16
-
17
- where we generally have superexponential convergence for smooth ``f(x)``
18
- with the number of quadrature points.
19
- """
20
- def __init__(self, x, w, x_forward=None, x_backward=None, a=-1, b=1):
21
- x = np.asarray(x)
22
- if x_forward is None:
23
- x_forward = x - a
24
- if x_backward is None:
25
- x_backward = b - x
26
-
27
- self.x = x
28
- self.w = np.asarray(w)
29
- self.x_forward = np.asarray(x_forward)
30
- self.x_backward = np.asarray(x_backward)
31
- self.a = a
32
- self.b = b
33
-
34
- def reseat(self, a, b):
35
- """Reseat current quadrature rule to new domain"""
36
- scaling = (b - a) / (self.b - self.a)
37
- x = scaling * (self.x - (self.b + self.a)/2) + (b + a)/2
38
- w = self.w * scaling
39
- x_forward = self.x_forward * scaling
40
- x_backward = self.x_backward * scaling
41
- return Rule(x, w, x_forward, x_backward, a, b)
42
-
43
- def scale(self, factor):
44
- """Scale weights by factor"""
45
- return Rule(self.x, self.w * factor, self.x_forward, self.x_backward,
46
- self.a, self.b)
47
-
48
- def piecewise(self, edges):
49
- """Piecewise quadrature with the same quadrature rule, but scaled"""
50
- edges = np.asarray(edges)
51
- start = edges[:-1]
52
- stop = edges[1:]
53
- if not (stop > start).all():
54
- raise ValueError("segments ends must be ordered ascendingly")
55
-
56
- return self.join(*(self.reseat(start_i, stop_i)
57
- for (start_i, stop_i) in zip(start, stop)))
58
-
59
- def astype(self, dtype):
60
- dtype = np.dtype(dtype)
61
- return Rule(self.x.astype(dtype), self.w.astype(dtype),
62
- self.x_forward.astype(dtype), self.x_backward.astype(dtype),
63
- dtype.type(self.a), dtype.type(self.b))
64
-
65
- @staticmethod
66
- def join(*gauss_list):
67
- """Join multiple Gauss quadratures together"""
68
- if not gauss_list:
69
- return Rule((), ())
70
-
71
- a = gauss_list[0].a
72
- b = gauss_list[-1].b
73
- prev_b = a
74
- parts = []
75
-
76
- for curr in gauss_list:
77
- if curr.a != prev_b:
78
- raise ValueError("Gauss rules must be ascending")
79
- prev_b = curr.b
80
- x_forward = curr.x_forward + (curr.a - a)
81
- x_backward = curr.x_backward + (b - curr.b)
82
- parts.append((curr.x, curr.w, x_forward, x_backward))
83
-
84
- x, w, x_forward, x_backward = map(np.hstack, zip(*parts))
85
- return Rule(x, w, x_forward, x_backward, a, b)
86
-
87
-
88
- def legendre(n, dtype=float):
89
- """Gauss-Legendre quadrature"""
90
- return rule_from_recurrence(*_legendre_recurrence(n, dtype))
91
-
92
-
93
- def legendre_collocation(rule, n=None):
94
- """Generate collocation matrix from Gauss-Legendre rule"""
95
- if n is None:
96
- n = rule.x.size
97
-
98
- res = np_legendre.legvander(rule.x, n - 1).T.copy()
99
- res *= rule.w
100
-
101
- invnorm = np.arange(0.5, n + 0.5, dtype=rule.x.dtype)
102
- res *= invnorm[:,None]
103
- return res
104
-
105
-
106
- def rule_from_recurrence(alpha, beta, a, b):
107
- """Make new Gauss scheme based on recurrence coefficients.
108
-
109
- Given a set of polynomials ``P[n]`` defined by the following three-term
110
- recurrence relation::
111
-
112
- P[0](x) == 1
113
- P[1](x) == x - alpha[0]
114
- P[n+1](x) == (x - alpha[n]) * P[n] - beta[n] * P[n-1]
115
-
116
- we construct both a set of quadrature points ``x`` and weights ``w`` for
117
- Gaussian quadrature. It is usually a good idea to work in extended
118
- precision for extra acccuracy in the quadrature rule.
119
- """
120
- dtype = np.result_type(alpha, beta)
121
-
122
- # First approximation of roots by finding eigenvalues of tridiagonal system
123
- # corresponding to the recursion
124
- beta[0] = b - a
125
- beta_is_pos = beta >= 0
126
- if not beta_is_pos.all():
127
- raise NotImplementedError("scipy solver cannot handle complex")
128
-
129
- sqrt_beta = np.sqrt(beta[1:])
130
- x = sp_linalg.eigvalsh_tridiagonal(alpha, sqrt_beta)
131
- x = x.astype(dtype)
132
-
133
- # These roots are usually only accurate to 100 ulps or so, so we improve
134
- # on them using a few iterations of the Newton method.
135
- prevdiff = 1.0
136
- maxiter = 5
137
- for _ in range(maxiter):
138
- p, dp, _, _ = _polyvalderiv(x, alpha, beta)
139
- diff = p / dp
140
- x -= diff
141
-
142
- # check convergence without relying on ATOL
143
- currdiff = np.abs(diff).max()
144
- #print(currdiff)
145
- if not (2 * currdiff <= prevdiff):
146
- break
147
- prevdiff = currdiff
148
- else:
149
- warnings.warn("Newton iteration did not converge, error = {.2g}"
150
- .format(currdiff))
151
-
152
- # Now we know that the weights are proportional to the following:
153
- _, dp1, p0, _ = _polyvalderiv(x, alpha, beta)
154
- with np.errstate(over='ignore'):
155
- w = 1 / (dp1 * p0)
156
- w *= beta[0] / w.sum(initial=dtype.type(0))
157
- return Rule(x, w, x - a, b - x, a, b)
158
-
159
-
160
- def _polyvalderiv(x, alpha, beta):
161
- """Return value and derivative of polynomial.
162
-
163
- Given a set of polynomials ``P[n]`` defined by a three-term recurrence,
164
- we evaluate both value and derviative for the highest polynomial and
165
- the second highest one.
166
- """
167
- n = len(alpha)
168
- p0 = np.ones_like(x)
169
- p1 = x - alpha[0] * p0
170
- dp0 = np.zeros_like(x)
171
- dp1 = p0
172
- for k in range(1, n):
173
- x_minus_alpha = x - alpha[k]
174
- p2 = x_minus_alpha * p1 - beta[k] * p0
175
- dp2 = p1 + x_minus_alpha * dp1 - beta[k] * dp0
176
- p0 = p1
177
- p1 = p2
178
- dp0 = dp1
179
- dp1 = dp2
180
-
181
- return p1, dp1, p0, dp0
182
-
183
-
184
- def _legendre_recurrence(n, dtype=float):
185
- """Returns the alpha, beta for Gauss-Legendre integration"""
186
- # The Legendre polynomials are defined by the following recurrence:
187
- #
188
- # (n + 1) * P[n+1](x) == (2 * n + 1) * x * P[n](x) - n * P[n-1](x)
189
- #
190
- # To normalize this, we realize that the prefactor of the highest power
191
- # of P[n] is (2n -1)!! / n!, which we divide by to obtain the "scaled"
192
- # beta values.
193
- dtype = np.dtype(dtype)
194
- k = np.arange(n, dtype=dtype)
195
- ksq = k**2
196
- alpha = np.zeros_like(k)
197
- beta = ksq / (4 * ksq - 1)
198
- beta[0] = 2
199
- one = dtype.type(1)
200
- return alpha, beta, -one, one
201
-
202
-
203
- class NestedRule(Rule):
204
- """Nested Gauss quadrature rule."""
205
- def __init__(self, x, w, v, x_forward=None, x_backward=None, a=-1, b=1):
206
- super().__init__(x, w, x_forward, x_backward, a, b)
207
- self.v = np.asarray(v)
208
- self.vsel = slice(1, None, 2)
209
-
210
- def reseat(self, a, b):
211
- """Reseat current quadrature rule to new domain"""
212
- res = super().reseat(a, b)
213
- new_v = (b - a) / (self.b - self.a) * self.v
214
- return NestedRule(res.x, res.w, new_v, res.x_forward, res.x_backward,
215
- res.a, res.b)
216
-
217
- def scale(self, factor):
218
- """Scale weights by factor"""
219
- res = super().scale(factor)
220
- new_v = factor * self.v
221
- return NestedRule(res.x, res.w, new_v, res.x_forward, res.x_backward,
222
- res.a, res.b)
223
-
224
- def astype(self, dtype):
225
- dtype = np.dtype(dtype)
226
- res = super().astype(dtype)
227
- new_v = self.v.astype(dtype)
228
- return NestedRule(res.x, res.w, new_v, res.x_forward, res.x_backward,
229
- res.a, res.b)
230
-
231
-
232
- def kronrod_31_15():
233
- x = (-0.99800229869339710, -0.98799251802048540, -0.96773907567913910,
234
- -0.93727339240070600, -0.89726453234408190, -0.84820658341042720,
235
- -0.79041850144246600, -0.72441773136017010, -0.65099674129741700,
236
- -0.57097217260853880, -0.48508186364023970, -0.39415134707756340,
237
- -0.29918000715316884, -0.20119409399743451, -0.10114206691871750,
238
- +0.00000000000000000, +0.10114206691871750, +0.20119409399743451,
239
- +0.29918000715316884, +0.39415134707756340, +0.48508186364023970,
240
- +0.57097217260853880, +0.65099674129741700, +0.72441773136017010,
241
- +0.79041850144246600, +0.84820658341042720, +0.89726453234408190,
242
- +0.93727339240070600, +0.96773907567913910, +0.98799251802048540,
243
- +0.99800229869339710)
244
- w = (0.005377479872923349, 0.015007947329316122, 0.025460847326715320,
245
- 0.035346360791375850, 0.044589751324764880, 0.053481524690928090,
246
- 0.062009567800670640, 0.069854121318728260, 0.076849680757720380,
247
- 0.083080502823133020, 0.088564443056211760, 0.093126598170825320,
248
- 0.096642726983623680, 0.099173598721791960, 0.100769845523875590,
249
- 0.101330007014791540, 0.100769845523875590, 0.099173598721791960,
250
- 0.096642726983623680, 0.093126598170825320, 0.088564443056211760,
251
- 0.083080502823133020, 0.076849680757720380, 0.069854121318728260,
252
- 0.062009567800670640, 0.053481524690928090, 0.044589751324764880,
253
- 0.035346360791375850, 0.025460847326715320, 0.015007947329316122,
254
- 0.005377479872923349)
255
- v = (0.03075324199611727, 0.07036604748810812, 0.10715922046717194,
256
- 0.13957067792615432, 0.16626920581699392, 0.18616100001556220,
257
- 0.19843148532711158, 0.20257824192556130, 0.19843148532711158,
258
- 0.18616100001556220, 0.16626920581699392, 0.13957067792615432,
259
- 0.10715922046717194, 0.07036604748810812, 0.03075324199611727)
260
- return NestedRule(np.array(x), np.array(w), np.array(v))
sparse_ir/_roots.py DELETED
@@ -1,140 +0,0 @@
1
- # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others
2
- # SPDX-License-Identifier: MIT
3
- """
4
- Auxiliary module for root finding routines.
5
- """
6
- import numpy as np
7
-
8
-
9
- def find_all(f, xgrid, type='continuous'):
10
- """Find all roots of function between gridpoints"""
11
- xgrid = np.asarray(xgrid)
12
- if xgrid.ndim != 1:
13
- raise ValueError("grid must be a one-dimensional array")
14
-
15
- # First, extract roots that lie directly on the grid points
16
- fx = f(xgrid)
17
- hit = fx == 0
18
- x_hit = xgrid[hit]
19
-
20
- # Next, find out where the sign changes (sign bit flips) remove the
21
- # previously found points from consideration (we need to remove both
22
- # directions for transitions + -> - and - -> +)
23
- sign_change = np.signbit(fx[:-1]) != np.signbit(fx[1:])
24
- sign_change &= ~hit[:-1] & ~hit[1:]
25
- if not sign_change.any():
26
- return x_hit
27
-
28
- # sign_change[i] being set means that the sign changes from xgrid[i] to
29
- # xgrid[i+1]. This means a corresponds to those xgrid[i] and b to those
30
- # xgrid[i+1] where sign_change[i] is set.
31
- where_a = np.hstack((sign_change, False))
32
- where_b = np.hstack((False, sign_change))
33
- a = xgrid[where_a]
34
- b = xgrid[where_b]
35
- fa = fx[where_a]
36
- fb = fx[where_b]
37
-
38
- # Depending on whether we have a discrete or continuous function, do
39
- # this.
40
- if type == 'continuous':
41
- xeps = np.finfo(xgrid.dtype).eps * np.abs(xgrid).max()
42
- x_bisect = _bisect_cont(f, a, b, fa, fb, xeps)
43
- elif type == 'discrete':
44
- x_bisect = _bisect_discr(f, a, b, fa, fb)
45
- else:
46
- raise ValueError("invalid type")
47
- return np.sort(np.hstack([x_hit, x_bisect]))
48
-
49
-
50
- def _bisect_cont(f, a, b, fa, fb, xeps):
51
- """Bisect roots already found"""
52
- while True:
53
- mid = 0.5 * (a + b)
54
- fmid = f(mid)
55
- towards_a = np.signbit(fa) != np.signbit(fmid)
56
- a = np.where(towards_a, a, mid)
57
- fa = np.where(towards_a, fa, fmid)
58
- b = np.where(towards_a, mid, b)
59
- fb = np.where(towards_a, fmid, fb)
60
- found = b - a < xeps
61
- if found.any():
62
- break
63
-
64
- roots = mid[found]
65
- if found.all():
66
- return roots
67
- more = _bisect_cont(f, a[~found], b[~found], fa[~found], fb[~found], xeps)
68
- return np.hstack([roots, more])
69
-
70
-
71
- def _bisect_discr(f, a, b, fa, fb):
72
- """Bisect roots already found"""
73
- while True:
74
- mid = (a + b) // 2
75
- found = a == mid
76
- if found.any():
77
- break
78
-
79
- fmid = f(mid)
80
- towards_a = np.signbit(fa) != np.signbit(fmid)
81
- a = np.where(towards_a, a, mid)
82
- fa = np.where(towards_a, fa, fmid)
83
- b = np.where(towards_a, mid, b)
84
- fb = np.where(towards_a, fmid, fb)
85
-
86
- roots = mid[found]
87
- if found.all():
88
- return roots
89
- more = _bisect_discr(f, a[~found], b[~found], fa[~found], fb[~found])
90
- return np.hstack([roots, more])
91
-
92
-
93
- def discrete_extrema(f, xgrid):
94
- """Find extrema of Bessel-like discrete function"""
95
- fx = f(xgrid)
96
- absfx = np.abs(fx)
97
-
98
- # Forward differences: where[i] now means that the secant changes sign
99
- # fx[i+1]. This means that the extremum is STRICTLY between x[i] and
100
- # x[i+2]
101
- gx = fx[1:] - fx[:-1]
102
- sgx = np.signbit(gx)
103
- where = sgx[:-1] != sgx[1:]
104
- where_a = np.hstack([where, False, False])
105
- where_b = np.hstack([False, False, where])
106
-
107
- a = xgrid[where_a]
108
- b = xgrid[where_b]
109
- absf_a = absfx[where_a]
110
- absf_b = absfx[where_b]
111
- res = [_bisect_discr_extremum(f, *args)
112
- for args in zip(a, b, absf_a, absf_b)]
113
-
114
- # We consider the outer point to be extremua if there is a decrease
115
- # in magnitude or a sign change inwards
116
- sfx = np.signbit(fx)
117
- if absfx[0] > absfx[1] or sfx[0] != sfx[1]:
118
- res.insert(0, xgrid[0])
119
- if absfx[-1] > absfx[-2] or sfx[-1] != sfx[-2]:
120
- res.append(xgrid[-1])
121
-
122
- return np.array(res)
123
-
124
-
125
- def _bisect_discr_extremum(f, a, b, absf_a, absf_b):
126
- """Bisect extremum of f on the set {a+1, ..., b-1}"""
127
- d = b - a
128
- if d <= 1:
129
- return a if absf_a > absf_b else b
130
- if d == 2:
131
- return a + 1
132
-
133
- m = (a + b) // 2
134
- n = m + 1
135
- absf_m = np.abs(f(m))
136
- absf_n = np.abs(f(n))
137
- if absf_m > absf_n:
138
- return _bisect_discr_extremum(f, a, n, absf_a, absf_n)
139
- else:
140
- return _bisect_discr_extremum(f, m, b, absf_m, absf_b)
sparse_ir/adapter.py DELETED
@@ -1,267 +0,0 @@
1
- # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others
2
- # SPDX-License-Identifier: MIT
3
- """
4
- Drop-in replacement for the irbasis module.
5
-
6
- This is designed to be a drop-in replacement for ``irbasis``, where the basis
7
- can be computed on-the-fly for arbitrary values of Lambda. In other words,
8
- you should be able to replace ``irbasis`` with ``sparse_ir.adapter`` and
9
- everything should hopefully still work.
10
-
11
- Note however that on-the-fly computation typically has lower accuracy unless
12
- xprec is available. Thus, by default we only populate the basis down to
13
- singular values of ~1e-9 and emit a warning. You can squelch the warning by
14
- setting `WARN_ACCURACY` to false.
15
- """
16
- # Do not import additional public symbols into this namespace, always use
17
- # underscores - this module should look as much as possible like `irbasis`!
18
- import numpy as _np
19
- from warnings import warn as _warn
20
-
21
- from . import sve as _sve
22
- from . import poly as _poly
23
- from . import kernel as _kernel
24
-
25
- try:
26
- import xprec as _xprec
27
- except ImportError:
28
- ACCURACY = 1.0e-9
29
- WARN_ACCURACY = True
30
- else:
31
- ACCURACY = 1.0e-15
32
- WARN_ACCURACY = False
33
-
34
-
35
- def load(statistics, Lambda, h5file=None):
36
- if WARN_ACCURACY:
37
- _warn("xprec package is not found - expect degraded accuracy!\n"
38
- "To squelch this warning, set WARN_ACCURACY to False.")
39
-
40
- kernel_type = {"F": _kernel.LogisticKernel,
41
- "B": _kernel.RegularizedBoseKernel}[statistics]
42
- kernel = kernel_type(float(Lambda))
43
- sve_result = _sve.compute(kernel)
44
- return Basis(statistics, Lambda, sve_result)
45
-
46
-
47
- class Basis:
48
- def __init__(self, statistics, Lambda, sve_result):
49
- u, s, v = sve_result.part()
50
- self._statistics = statistics
51
- self._Lambda = Lambda
52
- self._u = u
53
- self._s = s
54
- self._v = v
55
-
56
- conv_radius = 40 * Lambda
57
- even_odd = {'F': 'odd', 'B': 'even'}[statistics]
58
- self._uhat = _poly.PiecewiseLegendreFT(u, even_odd, conv_radius)
59
-
60
- @property
61
- def Lambda(self):
62
- """Dimensionless parameter of IR basis"""
63
- return self._Lambda
64
-
65
- @property
66
- def statistics(self):
67
- """Statistics, either "F" for fermions or "B" for bosons"""
68
- return self._statistics
69
-
70
- def dim(self):
71
- """Return dimension of basis"""
72
- return self._s.size
73
-
74
- def sl(self, l=None):
75
- """Return the singular value for the l-th basis function"""
76
- return _select(self._s, l)
77
-
78
- def ulx(self, l, x):
79
- """Return value of basis function for x"""
80
- return _selectvals(self._u, l, x)
81
-
82
- def d_ulx(self, l, x, order, section=None):
83
- """Return (higher-order) derivatives of u_l(x)"""
84
- return _selectvals(self._u.deriv(order), l, x)
85
-
86
- def vly(self, l, y):
87
- """Return value of basis function for y"""
88
- return _selectvals(self._v, l, y)
89
-
90
- def d_vly(self, l, y, order):
91
- """Return (higher-order) derivatives of v_l(y)"""
92
- return _selectvals(self._v.deriv(order), l, y)
93
-
94
- def compute_unl(self, n, whichl=None):
95
- """Compute transformation matrix from IR to Matsubara frequencies"""
96
- n = _np.ravel(n)
97
- nn = 2 * n + self._uhat.zeta
98
- return _np.squeeze(_select(self._uhat, whichl)(nn).T)
99
-
100
- def num_sections_x(self):
101
- "Number of sections of piecewise polynomial representation of u_l(x)"
102
- return self._u.nsegments
103
-
104
- @property
105
- def section_edges_x(self):
106
- """End points of sections for u_l(x)"""
107
- return self._u.knots
108
-
109
- def num_sections_y(self):
110
- "Number of sections of piecewise polynomial representation of v_l(y)"
111
- return self._v.nsegments
112
-
113
- @property
114
- def section_edges_y(self):
115
- """End points of sections for v_l(y)"""
116
- return self._v.knots
117
-
118
- def sampling_points_x(self, whichl):
119
- """Computes "optimal" sampling points in x space for given basis"""
120
- return sampling_points_x(self, whichl)
121
-
122
- def sampling_points_y(self, whichl):
123
- """Computes "optimal" sampling points in y space for given basis"""
124
- return sampling_points_y(self, whichl)
125
-
126
- def sampling_points_matsubara(self, whichl):
127
- """Computes sampling points in Matsubara domain for given basis"""
128
- return sampling_points_matsubara(self, whichl)
129
-
130
-
131
-
132
- def _select(p, l):
133
- return p if l is None else p[l]
134
-
135
-
136
- def _selectvals(p, l, x):
137
- return p(x) if l is None else p.value(l, x)
138
-
139
-
140
- """" CODE BELOW IS TAKEN FROM IRBAIS FOR COMPATIBLITITY"""
141
- def _find_roots(ulx):
142
- """Find all roots in (-1, 1) using double exponential mesh + bisection"""
143
- Nx = 10000
144
- eps = 1e-14
145
- tvec = _np.linspace(-3, 3, Nx) # 3 is a very safe option.
146
- xvec = _np.tanh(0.5 * _np.pi * _np.sinh(tvec))
147
-
148
- zeros = []
149
- for i in range(Nx - 1):
150
- if ulx(xvec[i]) * ulx(xvec[i + 1]) < 0:
151
- a = xvec[i + 1]
152
- b = xvec[i]
153
- u_a = ulx(a)
154
- while a - b > eps:
155
- half_point = 0.5 * (a + b)
156
- if ulx(half_point) * u_a > 0:
157
- a = half_point
158
- else:
159
- b = half_point
160
- zeros.append(0.5 * (a + b))
161
- return _np.array(zeros)
162
-
163
-
164
- def _start_guesses(n=1000):
165
- "Construct points on a logarithmically extended linear interval"
166
- x1 = _np.arange(n)
167
- x2 = _np.array(_np.exp(_np.linspace(_np.log(n), _np.log(1E+8), n)), dtype=int)
168
- x = _np.unique(_np.hstack((x1, x2)))
169
- return x
170
-
171
-
172
- def _get_unl_real(basis_xy, x, l):
173
- "Return highest-order basis function on the Matsubara axis"
174
- unl = basis_xy.compute_unl(x, l)
175
-
176
- # Purely real functions
177
- zeta = 1 if basis_xy.statistics == 'F' else 0
178
- if l % 2 == zeta:
179
- assert _np.allclose(unl.imag, 0)
180
- return unl.real
181
- else:
182
- assert _np.allclose(unl.real, 0)
183
- return unl.imag
184
-
185
-
186
- def _sampling_points(fn):
187
- "Given a discretized 1D function, return the location of the extrema"
188
- fn = _np.asarray(fn)
189
- fn_abs = _np.abs(fn)
190
- sign_flip = fn[1:] * fn[:-1] < 0
191
- sign_flip_bounds = _np.hstack((0, sign_flip.nonzero()[0] + 1, fn.size))
192
- points = []
193
- for segment in map(slice, sign_flip_bounds[:-1], sign_flip_bounds[1:]):
194
- points.append(fn_abs[segment].argmax() + segment.start)
195
- return _np.asarray(points)
196
-
197
-
198
- def _full_interval(sample, stat):
199
- if stat == 'F':
200
- return _np.hstack((-sample[::-1]-1, sample))
201
- else:
202
- # If we have a bosonic basis and even order (odd maximum), we have a
203
- # root at zero. We have to artifically add that zero back, otherwise
204
- # the condition number will blow up.
205
- if sample[0] == 0:
206
- sample = sample[1:]
207
- return _np.hstack((-sample[::-1], 0, sample))
208
-
209
-
210
- def _get_mats_sampling(basis_xy, lmax=None):
211
- "Generate Matsubara sampling points from extrema of basis functions"
212
- if lmax is None:
213
- lmax = basis_xy.dim()-1
214
-
215
- x = _start_guesses()
216
- y = _get_unl_real(basis_xy, x, lmax)
217
- x_idx = _sampling_points(y)
218
-
219
- sample = x[x_idx]
220
- return _full_interval(sample, basis_xy.statistics)
221
-
222
-
223
- def sampling_points_x(b, whichl):
224
- """Computes "optimal" sampling points in x space for given basis"""
225
- xroots = _find_roots(b._u[whichl])
226
- xroots_ex = _np.hstack((-1.0, xroots, 1.0))
227
- return 0.5 * (xroots_ex[:-1] + xroots_ex[1:])
228
-
229
-
230
- def sampling_points_y(b, whichl):
231
- """Computes "optimal" sampling points in y space for given basis"""
232
-
233
- roots_positive_half = 0.5 * _find_roots(lambda y: b.vly(whichl, (y + 1)/2)) + 0.5
234
- if whichl % 2 == 0:
235
- roots_ex = _np.sort(
236
- _np.hstack([-1, -roots_positive_half, roots_positive_half, 1]))
237
- else:
238
- roots_ex = _np.sort(
239
- _np.hstack([-1, -roots_positive_half, 0, roots_positive_half, 1]))
240
- return 0.5 * (roots_ex[:-1] + roots_ex[1:])
241
-
242
-
243
- def sampling_points_matsubara(b, whichl):
244
- """
245
- Computes "optimal" sampling points in Matsubara domain for given basis
246
-
247
- Parameters
248
- ----------
249
- b :
250
- basis object
251
- whichl: int
252
- Index of reference basis function "l"
253
-
254
- Returns
255
- -------
256
- sampling_points: 1D array of int
257
- sampling points in Matsubara domain
258
-
259
- """
260
- stat = b.statistics
261
-
262
- assert stat == 'F' or stat == 'B' or stat == 'barB'
263
-
264
- if whichl > b.dim()-1:
265
- raise RuntimeError("Too large whichl")
266
-
267
- return _get_mats_sampling(b, whichl)