freealg 0.1.11__py3-none-any.whl → 0.7.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. freealg/__init__.py +8 -2
  2. freealg/__version__.py +1 -1
  3. freealg/_algebraic_form/__init__.py +12 -0
  4. freealg/_algebraic_form/_branch_points.py +288 -0
  5. freealg/_algebraic_form/_constraints.py +139 -0
  6. freealg/_algebraic_form/_continuation_algebraic.py +706 -0
  7. freealg/_algebraic_form/_decompress.py +641 -0
  8. freealg/_algebraic_form/_decompress2.py +204 -0
  9. freealg/_algebraic_form/_edge.py +330 -0
  10. freealg/_algebraic_form/_homotopy.py +323 -0
  11. freealg/_algebraic_form/_moments.py +448 -0
  12. freealg/_algebraic_form/_sheets_util.py +145 -0
  13. freealg/_algebraic_form/_support.py +309 -0
  14. freealg/_algebraic_form/algebraic_form.py +1232 -0
  15. freealg/_free_form/__init__.py +16 -0
  16. freealg/{_chebyshev.py → _free_form/_chebyshev.py} +75 -43
  17. freealg/_free_form/_decompress.py +993 -0
  18. freealg/_free_form/_density_util.py +243 -0
  19. freealg/_free_form/_jacobi.py +359 -0
  20. freealg/_free_form/_linalg.py +508 -0
  21. freealg/{_pade.py → _free_form/_pade.py} +42 -208
  22. freealg/{_plot_util.py → _free_form/_plot_util.py} +37 -22
  23. freealg/{_sample.py → _free_form/_sample.py} +58 -22
  24. freealg/_free_form/_series.py +454 -0
  25. freealg/_free_form/_support.py +214 -0
  26. freealg/_free_form/free_form.py +1362 -0
  27. freealg/_geometric_form/__init__.py +13 -0
  28. freealg/_geometric_form/_continuation_genus0.py +175 -0
  29. freealg/_geometric_form/_continuation_genus1.py +275 -0
  30. freealg/_geometric_form/_elliptic_functions.py +174 -0
  31. freealg/_geometric_form/_sphere_maps.py +63 -0
  32. freealg/_geometric_form/_torus_maps.py +118 -0
  33. freealg/_geometric_form/geometric_form.py +1094 -0
  34. freealg/_util.py +56 -110
  35. freealg/distributions/__init__.py +7 -1
  36. freealg/distributions/_chiral_block.py +494 -0
  37. freealg/distributions/_deformed_marchenko_pastur.py +726 -0
  38. freealg/distributions/_deformed_wigner.py +386 -0
  39. freealg/distributions/_kesten_mckay.py +29 -15
  40. freealg/distributions/_marchenko_pastur.py +224 -95
  41. freealg/distributions/_meixner.py +47 -37
  42. freealg/distributions/_wachter.py +29 -17
  43. freealg/distributions/_wigner.py +27 -14
  44. freealg/visualization/__init__.py +12 -0
  45. freealg/visualization/_glue_util.py +32 -0
  46. freealg/visualization/_rgb_hsv.py +125 -0
  47. freealg-0.7.12.dist-info/METADATA +172 -0
  48. freealg-0.7.12.dist-info/RECORD +53 -0
  49. {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/WHEEL +1 -1
  50. freealg/_decompress.py +0 -180
  51. freealg/_jacobi.py +0 -218
  52. freealg/_support.py +0 -85
  53. freealg/freeform.py +0 -967
  54. freealg-0.1.11.dist-info/METADATA +0 -140
  55. freealg-0.1.11.dist-info/RECORD +0 -24
  56. /freealg/{_damp.py → _free_form/_damp.py} +0 -0
  57. {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/licenses/AUTHORS.txt +0 -0
  58. {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/licenses/LICENSE.txt +0 -0
  59. {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,243 @@
1
+ # SPDX-FileCopyrightText: Copyright 2025, Siavash Ameli <sameli@berkeley.edu>
2
+ # SPDX-License-Identifier: BSD-3-Clause
3
+ # SPDX-FileType: SOURCE
4
+ #
5
+ # This program is free software: you can redistribute it and/or modify it under
6
+ # the terms of the license found in the LICENSE.txt file in the root directory
7
+ # of this source tree.
8
+
9
+
10
+ # =======
11
+ # Imports
12
+ # =======
13
+
14
+ import numpy
15
+ from scipy.stats import gaussian_kde
16
+ from scipy.stats import beta
17
+ # from statsmodels.nonparametric.kde import KDEUnivariate
18
+ from scipy.optimize import minimize
19
+ import matplotlib.pyplot as plt
20
+ import texplot
21
+ from ._plot_util import _auto_bins
22
+
23
+ # Fallback to previous API
24
+ if not hasattr(numpy, 'trapezoid'):
25
+ numpy.trapezoid = numpy.trapz
26
+
27
+ __all__ = ['kde', 'force_density']
28
+
29
+
30
+ # ===
31
+ # kde
32
+ # ===
33
+
34
+ def kde(eig, xs, lam_m, lam_p, h, kernel='beta', plot=False):
35
+ """
36
+ Kernel density estimation of eigenvalues.
37
+
38
+ Parameters
39
+ ----------
40
+
41
+ eig : numpy.array
42
+ 1D array of samples of size `n`.
43
+
44
+ xs : numpy.array
45
+ 1D array of evaluation grid (must lie within ``[lam_m, lam_p]``)
46
+
47
+ lam_m : float
48
+ Lower end of the support endpoints with ``lam_m < lam_p``.
49
+
50
+ lam_p : float
51
+ Upper end of the support endpoints with ``lam_m < lam_p``.
52
+
53
+ h : float
54
+ Kernel bandwidth in rescaled units where ``0 < h < 1``.
55
+
56
+ kernel : {``'gaussian'``, ``'beta'``}, default= ``'beta'``
57
+ Kernel function using either Gaussian or Beta distribution.
58
+
59
+ plot : bool, default=False
60
+ If `True`, the KDE is plotted.
61
+
62
+ Returns
63
+ -------
64
+
65
+ pdf : numpy.ndarray
66
+ Probability distribution function with the same length as ``xs``.
67
+
68
+ See Also
69
+ --------
70
+
71
+ freealg.supp
72
+ freealg.sample
73
+
74
+ References
75
+ ----------
76
+
77
+ .. [1] `R-package documentation for Beta kernel
78
+ <https://search.r-project.org/CRAN/refmans/DELTD/html/Beta.html>`__
79
+
80
+ .. [2] Chen, S. X. (1999). Beta Kernel estimators for density functions.
81
+ *Computational Statistics and Data Analysis* 31 p. 131--145.
82
+
83
+ Notes
84
+ -----
85
+
86
+ In Beta kernel density estimation, the shape parameters :math:`a` and
87
+ :math:`b` of the :math:`\\mathrm{Beta}(a, b)` distribution are computed
88
+ for each data point :math:`u` as:
89
+
90
+ .. math::
91
+
92
+ a = (u / h) + 1.0
93
+ b = ((1.0 - u) / h) + 1.0
94
+
95
+ This is a standard way of using Beta kernel (see R-package documentation
96
+ [1]_). These equations are derived from *moment matching* method, where
97
+
98
+ .. math::
99
+
100
+ \\mathrm{Mean}(\\mathrm{Beta}(a,b)) = u
101
+ \\mathrm{Var}(\\mathrm{Beta}(a,b)) = (1-u) u h
102
+
103
+ Solving these two equations for :math:`a` and :math:`b` yields the
104
+ relations above. See [2]_ (page 134).
105
+ """
106
+
107
+ if kernel == 'gaussian':
108
+ pdf = gaussian_kde(eig, bw_method=h)(xs)
109
+
110
+ # Adaptive KDE
111
+ # k = KDEUnivariate(eig)
112
+ # k.fit(kernel='gau', bw='silverman', fft=False, weights=None,
113
+ # gridsize=1024, adaptive=True)
114
+ # pdf = k.evaluate(xs)
115
+
116
+ elif kernel == 'beta':
117
+
118
+ span = lam_p - lam_m
119
+ if span <= 0:
120
+ raise ValueError('"lam_p" must be larger than "lam_m".')
121
+
122
+ # map samples and grid to [0, 1]
123
+ u = (eig - lam_m) / span
124
+ t = (xs - lam_m) / span
125
+
126
+ # keep only samples strictly inside (0,1)
127
+ if (u.min() < 0) or (u.max() > 1):
128
+ u = u[(u > 0) & (u < 1)]
129
+
130
+ n = u.size
131
+ if n == 0:
132
+ return numpy.zeros_like(xs, dtype=float)
133
+
134
+ # Shape parameters "a" and "b" or the kernel Beta(a, b), which is
135
+ # computed for each data point "u" (see notes above). These are
136
+ # vectorized.
137
+ a = (u / h) + 1.0
138
+ b = ((1.0 - u) / h) + 1.0
139
+
140
+ # # tiny positive number to keep shape parameters > 0
141
+ eps = 1e-6
142
+ a = numpy.clip(a, eps, None)
143
+ b = numpy.clip(b, eps, None)
144
+
145
+ # Beta kernel
146
+ pdf_matrix = beta.pdf(t[None, :], a[:, None], b[:, None])
147
+
148
+ # Average and re-normalize back to x variable
149
+ pdf = pdf_matrix.sum(axis=0) / (n * span)
150
+
151
+ # Exact zeros outside [lam_m, lam_p]
152
+ pdf[(t < 0) | (t > 1)] = 0.0
153
+
154
+ else:
155
+ raise NotImplementedError('"kernel" is invalid.')
156
+
157
+ if plot:
158
+ with texplot.theme(use_latex=False):
159
+ fig, ax = plt.subplots(figsize=(6, 4))
160
+
161
+ x_min = numpy.min(xs)
162
+ x_max = numpy.max(xs)
163
+ bins = numpy.linspace(x_min, x_max, _auto_bins(eig))
164
+ _ = ax.hist(eig, bins, density=True, color='silver',
165
+ edgecolor='none', label='Samples histogram')
166
+ ax.plot(xs, pdf, color='black', label='KDE')
167
+ ax.set_xlabel(r'$x$')
168
+ ax.set_ylabel(r'$\\rho(x)$')
169
+ ax.set_xlim([xs[0], xs[-1]])
170
+ ax.set_ylim(bottom=0)
171
+ ax.set_title('Kernel Density Estimation')
172
+ ax.legend(fontsize='x-small')
173
+ plt.show()
174
+
175
+ return pdf
176
+
177
+
178
+ # =============
179
+ # force density
180
+ # =============
181
+
182
+ def force_density(psi0, support, density, grid, alpha=0.0, beta=0.0):
183
+ """
184
+ Starting from psi0 (raw projection), solve
185
+ min 0.5 ||psi - psi0||^2
186
+ s.t. F_pos psi >= 0 (positivity on grid)
187
+ psi[0] = psi0[0] (mass)
188
+ f(lam_m) psi = 0 (zero at left edge)
189
+ f(lam_p) psi = 0 (zero at right edge)
190
+ """
191
+
192
+ lam_m, lam_p = support
193
+
194
+ # Objective and gradient
195
+ def fun(psi):
196
+ return 0.5 * numpy.dot(psi-psi0, psi-psi0)
197
+
198
+ def grad(psi):
199
+ return psi - psi0
200
+
201
+ # Constraints:
202
+ constraints = []
203
+
204
+ # Enforce positivity
205
+ constraints.append({'type': 'ineq',
206
+ 'fun': lambda psi: density(grid, psi)})
207
+
208
+ # Enforce unit mass
209
+ constraints.append({
210
+ 'type': 'eq',
211
+ 'fun': lambda psi: numpy.trapz(density(grid, psi), grid) - 1.0
212
+ })
213
+
214
+ # Enforce zero at left edge
215
+ if beta <= 0.0 and beta > -0.5:
216
+ constraints.append({
217
+ 'type': 'eq',
218
+ 'fun': lambda psi: density(numpy.array([lam_m]), psi)[0]
219
+ })
220
+
221
+ # Enforce zero at right edge
222
+ if alpha <= 0.0 and alpha > -0.5:
223
+ constraints.append({
224
+ 'type': 'eq',
225
+ 'fun': lambda psi: density(numpy.array([lam_p]), psi)[0]
226
+ })
227
+
228
+ # Solve a small quadratic programming
229
+ res = minimize(fun, psi0, jac=grad,
230
+ constraints=constraints,
231
+ # method='trust-constr',
232
+ method='SLSQP',
233
+ options={'maxiter': 1000, 'ftol': 1e-9, 'eps': 1e-8})
234
+
235
+ psi = res.x
236
+
237
+ # Normalize first mode to unit mass
238
+ x = numpy.linspace(lam_m, lam_p, 1000)
239
+ rho = density(x, psi)
240
+ mass = numpy.trapezoid(rho, x)
241
+ psi[0] = psi[0] / mass
242
+
243
+ return psi
@@ -0,0 +1,359 @@
1
+ # SPDX-FileCopyrightText: Copyright 2025, Siavash Ameli <sameli@berkeley.edu>
2
+ # SPDX-License-Identifier: BSD-3-Clause
3
+ # SPDX-FileType: SOURCE
4
+ #
5
+ # This program is free software: you can redistribute it and/or modify it under
6
+ # the terms of the license found in the LICENSE.txt file in the root directory
7
+ # of this source tree.
8
+
9
+
10
+ # =======
11
+ # Imports
12
+ # =======
13
+
14
+ import numpy
15
+ from scipy.special import eval_jacobi, roots_jacobi
16
+ from scipy.special import gammaln, beta as Beta
17
+ from ._series import wynn_epsilon, wynn_rho, levin_u, weniger_delta, \
18
+ brezinski_theta
19
+
20
+ __all__ = ['jacobi_sample_proj', 'jacobi_kernel_proj', 'jacobi_density',
21
+ 'jacobi_stieltjes']
22
+
23
+
24
+ # ==============
25
+ # jacobi sq norm
26
+ # ==============
27
+
28
+ def jacobi_sq_norm(k, alpha, beta):
29
+ """
30
+ Norm of P_k
31
+ Special-case k = 0 to avoid gamma(0) issues when alpha + beta + 1 = 0.
32
+ """
33
+
34
+ if k == 0:
35
+ return 2.0**(alpha + beta + 1) * Beta(alpha + 1, beta + 1)
36
+
37
+ # Use logs instead to avoid overflow in gamma function.
38
+ lg_num = (alpha + beta + 1) * numpy.log(2.0) \
39
+ + gammaln(k + alpha + 1) \
40
+ + gammaln(k + beta + 1)
41
+
42
+ lg_den = numpy.log(2*k + alpha + beta + 1) \
43
+ + gammaln(k + 1) \
44
+ + gammaln(k + alpha + beta + 1)
45
+
46
+ return numpy.exp(lg_num - lg_den)
47
+
48
+
49
+ # ==================
50
+ # jacobi sample proj
51
+ # ==================
52
+
53
+ def jacobi_sample_proj(eig, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
54
+ """
55
+ """
56
+
57
+ lam_m, lam_p = support
58
+
59
+ # Convert to [-1, 1] interval
60
+ x = (2.0 * eig - (lam_p + lam_m)) / (lam_p - lam_m)
61
+
62
+ psi = numpy.empty(K + 1)
63
+
64
+ # Empirical moments and coefficients
65
+ for k in range(K + 1):
66
+ moment = numpy.mean(eval_jacobi(k, alpha, beta, x))
67
+ N_k = jacobi_sq_norm(k, alpha, beta) # normalization
68
+
69
+ if k == 0:
70
+ # Do not penalize at k=0, as this keeps unit mass.
71
+ # k=0 has unit mass, while k>0 has zero mass by orthogonality.
72
+ penalty = 0
73
+ else:
74
+ penalty = reg * (k / (K + 1))**2
75
+
76
+ # Add regularization on the diagonal
77
+ psi[k] = moment / (N_k + penalty)
78
+
79
+ return psi
80
+
81
+
82
+ # ==================
83
+ # jacobi kernel proj
84
+ # ==================
85
+
86
+ def jacobi_kernel_proj(xs, pdf, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
87
+ """
88
+ Same moments as `jacobi_proj`, but the target is a *continuous* density
89
+ given on a grid (xs, pdf).
90
+ """
91
+
92
+ lam_m, lam_p = support
93
+ t = (2.0 * xs - (lam_p + lam_m)) / (lam_p - lam_m) # map to [-1,1]
94
+ psi = numpy.empty(K + 1)
95
+
96
+ for k in range(K + 1):
97
+ Pk = eval_jacobi(k, alpha, beta, t)
98
+ N_k = jacobi_sq_norm(k, alpha, beta)
99
+
100
+ # \int P_k(t) w(t) \rho(t) dt. w(t) cancels with pdf already being rho
101
+ moment = numpy.trapz(Pk * pdf, xs)
102
+
103
+ if k == 0:
104
+ penalty = 0
105
+ else:
106
+ penalty = reg * (k / (K + 1))**2
107
+
108
+ psi[k] = moment / (N_k + penalty)
109
+
110
+ return psi
111
+
112
+
113
+ # ==============
114
+ # jacobi density
115
+ # ==============
116
+
117
+ def jacobi_density(x, psi, support, alpha=0.0, beta=0.0):
118
+ """
119
+ Reconstruct Jacobi approximation of density.
120
+
121
+ Parameters
122
+ ----------
123
+
124
+ psi : array_like, shape (K+1, )
125
+ Jacobi expansion coefficients.
126
+
127
+ x : array_like
128
+ Points (in original eigenvalue scale) to evaluate at.
129
+
130
+ support : tuple (lam_m, lam_p)
131
+
132
+ alpha : float
133
+ Jacobi parameter.
134
+
135
+ beta : float
136
+ Jacobi parameter.
137
+
138
+ Returns
139
+ -------
140
+
141
+ rho : ndarray
142
+ """
143
+
144
+ lam_m, lam_p = support
145
+ t = (2 * x - (lam_p + lam_m)) / (lam_p - lam_m)
146
+ w = (1 - t)**alpha * (1 + t)**beta
147
+
148
+ # The function eval_jacobi does not accept complex256 type
149
+ down_cast = False
150
+ if numpy.issubdtype(t.dtype, numpy.complexfloating) and \
151
+ t.itemsize > numpy.dtype(numpy.complex128).itemsize:
152
+ t = t.astype(numpy.complex128)
153
+ down_cast = True
154
+
155
+ P = numpy.vstack([eval_jacobi(k, alpha, beta, t) for k in range(len(psi))])
156
+
157
+ rho_t = w * (psi @ P) # density in t-variable
158
+ rho_x = rho_t * (2.0 / (lam_p - lam_m)) # back to x-variable
159
+
160
+ # Case up to complex256
161
+ if down_cast:
162
+ rho_x = rho_x.astype(t.dtype)
163
+
164
+ return rho_x
165
+
166
+
167
+ # ================
168
+ # jacobi stieltjes
169
+ # ================
170
+
171
+ def jacobi_stieltjes(z, cache, psi, support, alpha=0.0, beta=0.0, n_quad=None,
172
+ continuation='pade', dtype=numpy.complex128):
173
+ """
174
+ Compute m(z) = sum_k psi_k * m_k(z) where
175
+
176
+ .. math::
177
+
178
+ m_k(z) = \\int \\frac{w^{(alpha, beta)}(t) P_k^{(alpha, beta)}(t)}{
179
+ (u(z)-t)} \\mathrm{d} t
180
+
181
+ Each m_k is evaluated *separately* with a Gauss-Jacobi rule sized
182
+ for that k. This follows the user's request: 1 quadrature rule per P_k.
183
+
184
+ Parameters
185
+ ----------
186
+
187
+ z : complex or ndarray
188
+
189
+ cache : dict
190
+ Pass a dict to enable cross-call caching.
191
+
192
+ psi : (K+1,) array_like
193
+
194
+ support : (lambda_minus, lambda_plus)
195
+
196
+ alpha, beta : float
197
+
198
+ n_quad : int, default=None
199
+ Number of Gauss-Jacobi quadrature points.
200
+
201
+ continuation : str, default= ``'pade'``
202
+ Method of analytic continuation.
203
+
204
+ dtype : numpy.type, default=numpy.complex128
205
+ Data type for complex arrays. This might enhance series acceleration.
206
+
207
+ Returns
208
+ -------
209
+
210
+ m1 : ndarray
211
+ Same shape as z
212
+
213
+ m2 : ndarray
214
+ Same shape as z
215
+ """
216
+
217
+ if not isinstance(cache, dict):
218
+ raise TypeError('"cache" must be a dict; pass a persistent dict '
219
+ '(e.g., self.cache).')
220
+
221
+ # Number of quadratures
222
+ if 'n_quad' not in cache:
223
+ if n_quad is None:
224
+ # Set number of quadratures based on Bernstein ellipse. Here using
225
+ # an evaluation point a with distance delta from support, to
226
+ # achieve the quadrature error below tol.
227
+ tol = 1e-16
228
+ delta = 1e-2
229
+ n_quad = int(-numpy.log(tol) / (2.0 * numpy.sqrt(delta)))
230
+ n_quad = max(n_quad, psi.size)
231
+ cache['n_quad'] = n_quad
232
+ else:
233
+ n_quad = cache['n_quad']
234
+
235
+ # Quadrature nodes and weights
236
+ if ('t_nodes' not in cache) or ('w_nodes' not in cache):
237
+ t_nodes, w_nodes = roots_jacobi(n_quad, alpha, beta) # (n_quad,)
238
+ cache['t_nodes'] = t_nodes
239
+ cache['w_nodes'] = w_nodes
240
+ else:
241
+ t_nodes = cache['t_nodes']
242
+ w_nodes = cache['w_nodes']
243
+
244
+ z = numpy.asarray(z, dtype=dtype)
245
+ lam_minus, lam_plus = support
246
+ span = lam_plus - lam_minus
247
+ centre = 0.5 * (lam_plus + lam_minus)
248
+
249
+ # Map z to u in the standard [-1,1] domain
250
+ u = (2.0 / span) * (z - centre)
251
+
252
+ # Cauchy Kernel (flattened for all z)
253
+ u_flat = u.ravel()
254
+ ker = (1.0 / (t_nodes[:, None] - u_flat[None, :])).astype(
255
+ dtype, copy=False) # (n_quad, Ny*Nx)
256
+
257
+ if continuation == 'pade':
258
+
259
+ if 'integrand_nodes' not in cache:
260
+
261
+ # Compute sum_k psi_k P_k (call it s_node)
262
+ s_nodes = numpy.zeros_like(t_nodes, dtype=dtype)
263
+ for k, psi_k in enumerate(psi):
264
+
265
+ # Evaluate P_k at the quadrature nodes
266
+ P_k_nodes = eval_jacobi(k, alpha, beta, t_nodes) # (n_quad,)
267
+ s_nodes += psi_k * P_k_nodes
268
+
269
+ integrand_nodes = (2.0 / span) * (w_nodes * s_nodes).astype(dtype)
270
+ cache['integrand_nodes'] = integrand_nodes
271
+
272
+ else:
273
+ integrand_nodes = cache['integrand_nodes']
274
+
275
+ Q_flat = (integrand_nodes[:, None] * ker).sum(axis=0)
276
+ m_total = Q_flat.reshape(z.shape)
277
+
278
+ return m_total
279
+
280
+ else:
281
+
282
+ # Continuation is not Pade. This is one of Wynn, Levin, etc. These
283
+ # methods need the series for m for 1, ..., k.
284
+
285
+ if 'B' not in cache:
286
+ # All P_k at quadrature nodes (real), row-scale by weights
287
+ P_nodes = numpy.empty((psi.size, n_quad), dtype=w_nodes.dtype)
288
+ for k in range(psi.size):
289
+ P_nodes[k, :] = eval_jacobi(k, alpha, beta, t_nodes)
290
+
291
+ # All P_k * w shape (K+1, n_quad)
292
+ B = (2.0 / span) * (P_nodes * w_nodes[None, :]).astype(
293
+ dtype, copy=False)
294
+ cache['B'] = B
295
+
296
+ else:
297
+ B = cache['B']
298
+
299
+ # Principal branch. 2D matrix for all k
300
+ m_k_all = B @ ker
301
+
302
+ # Compute m on secondary branch from the principal branch, which is
303
+ # m_k = m_k + 2 \pi i rho_k(z), and rho(z) is the analytic extension of
304
+ # rho_k(x) using the k-th basis. Basically, rho_k(z) is w * P_k(z).
305
+
306
+ # Lower-half-plane jump for ALL k at once (vectorized)
307
+ mask_m = (z.imag <= 0)
308
+ if numpy.any(mask_m):
309
+ idx = numpy.flatnonzero(mask_m.ravel())
310
+ u_m = u_flat[idx].astype(dtype, copy=False) # complex
311
+
312
+ # Scipy's eval_jacobi tops out at complex128 type. If u_m is
313
+ # complex256, downcast to complex128.
314
+ if u_m.dtype.itemsize > numpy.dtype(numpy.complex128).itemsize:
315
+ u_m_eval = u_m.astype(numpy.complex128, copy=False)
316
+ down_cast = True
317
+ else:
318
+ u_m_eval = u_m
319
+ down_cast = False
320
+
321
+ # P_k at complex u_m (all means for all k = 1,...,K)
322
+ P_all_m = numpy.empty((psi.size, u_m.size), dtype=dtype)
323
+ for k in range(psi.size):
324
+ P_all_m[k, :] = eval_jacobi(k, alpha, beta, u_m_eval)
325
+
326
+ # Jacobi weight. Must match jacobi_density's branch
327
+ w_m = numpy.power(1.0 - u_m, alpha) * numpy.power(1.0 + u_m, beta)
328
+
329
+ # rho_k(z) in x-units is (2/span) * w(u) * P_k(u)
330
+ rho_all = ((2.0 / span) * w_m[None, :] * P_all_m).astype(
331
+ dtype, copy=False)
332
+
333
+ if down_cast:
334
+ rho_all = rho_all.astype(dtype)
335
+
336
+ # compute analytic extension of rho(z) to lower-half plane for when
337
+ # rho is just the k-th Jacobi basis: w(z) P_k(z). For this, we
338
+ m_k_all[:, idx] = m_k_all[:, idx] + (2.0 * numpy.pi * 1j) * rho_all
339
+
340
+ # Partial sums S_k = sum_{j<=k} psi_j * m_j
341
+ WQ = (psi[:, None].astype(dtype, copy=False) * m_k_all)
342
+ m_partial = numpy.cumsum(WQ, axis=0)
343
+
344
+ if continuation == 'wynn-eps':
345
+ S = wynn_epsilon(m_partial)
346
+ elif continuation == 'wynn-rho':
347
+ S = wynn_rho(m_partial)
348
+ elif continuation == 'levin':
349
+ S = levin_u(m_partial)
350
+ elif continuation == 'weniger':
351
+ S = weniger_delta(m_partial)
352
+ elif continuation == 'brezinski':
353
+ S = brezinski_theta(m_partial)
354
+ else:
355
+ # No acceleration (likely diverges in the lower-half plane)
356
+ S = m_partial[-1, :]
357
+
358
+ m_total = S.reshape(z.shape)
359
+ return m_total