freealg 0.5.4__py3-none-any.whl → 0.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- freealg/__init__.py +4 -1
- freealg/__version__.py +1 -1
- freealg/_chebyshev.py +31 -26
- freealg/_decompress.py +20 -15
- freealg/_jacobi.py +140 -53
- freealg/_pade.py +10 -10
- freealg/_sample.py +42 -18
- freealg/_series.py +9 -9
- freealg/_support.py +35 -22
- freealg/_util.py +138 -35
- freealg/distributions/_kesten_mckay.py +1 -1
- freealg/distributions/_marchenko_pastur.py +1 -1
- freealg/distributions/_meixner.py +2 -2
- freealg/distributions/_wachter.py +1 -1
- freealg/freeform.py +98 -69
- {freealg-0.5.4.dist-info → freealg-0.6.1.dist-info}/METADATA +1 -1
- freealg-0.6.1.dist-info/RECORD +26 -0
- freealg-0.5.4.dist-info/RECORD +0 -26
- {freealg-0.5.4.dist-info → freealg-0.6.1.dist-info}/WHEEL +0 -0
- {freealg-0.5.4.dist-info → freealg-0.6.1.dist-info}/licenses/AUTHORS.txt +0 -0
- {freealg-0.5.4.dist-info → freealg-0.6.1.dist-info}/licenses/LICENSE.txt +0 -0
- {freealg-0.5.4.dist-info → freealg-0.6.1.dist-info}/top_level.txt +0 -0
freealg/__init__.py
CHANGED
|
@@ -8,9 +8,12 @@
|
|
|
8
8
|
|
|
9
9
|
from .freeform import FreeForm
|
|
10
10
|
from ._linalg import eigvalsh, cond, norm, trace, slogdet
|
|
11
|
+
from ._support import supp
|
|
12
|
+
from ._sample import sample
|
|
13
|
+
from ._util import kde
|
|
11
14
|
from . import distributions
|
|
12
15
|
|
|
13
16
|
__all__ = ['FreeForm', 'distributions', 'eigvalsh', 'cond', 'norm', 'trace',
|
|
14
|
-
'slogdet']
|
|
17
|
+
'slogdet', 'supp', 'sample', 'kde']
|
|
15
18
|
|
|
16
19
|
from .__version__ import __version__ # noqa: F401 E402
|
freealg/__version__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.6.1"
|
freealg/_chebyshev.py
CHANGED
|
@@ -43,10 +43,10 @@ def chebyshev_sample_proj(eig, support, K=10, reg=0.0):
|
|
|
43
43
|
The assumed compact support of rho.
|
|
44
44
|
|
|
45
45
|
K : int
|
|
46
|
-
Highest Chebyshev
|
|
46
|
+
Highest Chebyshev-II order.
|
|
47
47
|
|
|
48
48
|
reg : float
|
|
49
|
-
Tikhonov
|
|
49
|
+
Tikhonov-style ridge on each coefficient (defaults to 0).
|
|
50
50
|
|
|
51
51
|
Returns
|
|
52
52
|
-------
|
|
@@ -57,10 +57,10 @@ def chebyshev_sample_proj(eig, support, K=10, reg=0.0):
|
|
|
57
57
|
|
|
58
58
|
lam_m, lam_p = support
|
|
59
59
|
|
|
60
|
-
# Map to [
|
|
60
|
+
# Map to [-1,1] interval
|
|
61
61
|
t = (2 * eig - (lam_m + lam_p)) / (lam_p - lam_m)
|
|
62
62
|
|
|
63
|
-
# Inner
|
|
63
|
+
# Inner-product norm of each U_k under w(t) = sqrt{1-t^2} is \\pi/2
|
|
64
64
|
norm = numpy.pi / 2
|
|
65
65
|
|
|
66
66
|
psi = numpy.empty(K+1)
|
|
@@ -92,12 +92,12 @@ def chebyshev_kernel_proj(xs, pdf, support, K=10, reg=0.0):
|
|
|
92
92
|
Projection of a *continuous* density given on a grid (xs, pdf)
|
|
93
93
|
onto the Chebyshev-II basis.
|
|
94
94
|
|
|
95
|
-
xs : 1-D numpy array (original x
|
|
95
|
+
xs : 1-D numpy array (original x-axis, not the t-variable)
|
|
96
96
|
pdf : same shape as xs, integrates to 1 on xs
|
|
97
97
|
"""
|
|
98
98
|
|
|
99
99
|
lam_m, lam_p = support
|
|
100
|
-
t = (2.0 * xs - (lam_m + lam_p)) / (lam_p - lam_m) # map to [
|
|
100
|
+
t = (2.0 * xs - (lam_m + lam_p)) / (lam_p - lam_m) # map to [-1,1]
|
|
101
101
|
|
|
102
102
|
norm = numpy.pi / 2.0
|
|
103
103
|
psi = numpy.empty(K + 1)
|
|
@@ -140,15 +140,15 @@ def chebyshev_density(x, psi, support):
|
|
|
140
140
|
-------
|
|
141
141
|
|
|
142
142
|
rho_x : ndarray, same shape as x
|
|
143
|
-
Approximated spectral density on the original x
|
|
143
|
+
Approximated spectral density on the original x-axis.
|
|
144
144
|
"""
|
|
145
145
|
|
|
146
146
|
lam_m, lam_p = support
|
|
147
147
|
|
|
148
|
-
# Map to [
|
|
148
|
+
# Map to [-1,1] interval
|
|
149
149
|
t = (2 * numpy.asarray(x) - (lam_m + lam_p)) / (lam_p - lam_m)
|
|
150
150
|
|
|
151
|
-
# Weight sqrt{1
|
|
151
|
+
# Weight sqrt{1-t^2} (clip for numerical safety)
|
|
152
152
|
w = numpy.sqrt(numpy.clip(1 - t**2, a_min=0, a_max=None))
|
|
153
153
|
|
|
154
154
|
# Summation approximation
|
|
@@ -165,22 +165,23 @@ def chebyshev_density(x, psi, support):
|
|
|
165
165
|
# chebushev stieltjes
|
|
166
166
|
# ===================
|
|
167
167
|
|
|
168
|
-
def chebyshev_stieltjes(z, psi, support, continuation='pade'
|
|
168
|
+
def chebyshev_stieltjes(z, psi, support, continuation='pade',
|
|
169
|
+
dtype=numpy.complex128):
|
|
169
170
|
"""
|
|
170
|
-
Compute the Stieltjes transform m(z) for a Chebyshev
|
|
171
|
+
Compute the Stieltjes transform m(z) for a Chebyshev-II expansion
|
|
171
172
|
|
|
172
|
-
rho(x) = (2/(lam_p - lam_m)) * sqrt(1
|
|
173
|
+
rho(x) = (2/(lam_p - lam_m)) * sqrt(1-t(x)^2) * sum_{k=0}^K psi_k U_k(t(x))
|
|
173
174
|
|
|
174
|
-
via the closed
|
|
175
|
+
via the closed-form
|
|
175
176
|
|
|
176
|
-
\\int_{-1}^1 U_k(t) sqrt(1
|
|
177
|
+
\\int_{-1}^1 U_k(t) sqrt(1-t^2)/(u - t) dt = \\pi J(u)^(k+1),
|
|
177
178
|
|
|
178
179
|
where
|
|
179
180
|
|
|
180
|
-
u = (2(z
|
|
181
|
+
u = (2(z-center))/span,
|
|
181
182
|
center = (lam_p + lam_m)/2,
|
|
182
183
|
span = lam_p - lam_m,
|
|
183
|
-
J(u) = u
|
|
184
|
+
J(u) = u - sqrt(u^2-1)
|
|
184
185
|
|
|
185
186
|
and then
|
|
186
187
|
|
|
@@ -193,7 +194,7 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
|
|
|
193
194
|
Points in the complex plane.
|
|
194
195
|
|
|
195
196
|
psi : array_like, shape (K+1,)
|
|
196
|
-
Chebyshev
|
|
197
|
+
Chebyshev-II coefficients \\psi.
|
|
197
198
|
|
|
198
199
|
support : tuple
|
|
199
200
|
The support interval of the original density.
|
|
@@ -201,6 +202,9 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
|
|
|
201
202
|
continuation : str, default= ``'pade'``
|
|
202
203
|
Methof of analytiv continuation.
|
|
203
204
|
|
|
205
|
+
dtype : numpy.type, default=numpy.complex128
|
|
206
|
+
Data type for complex arrays. This might enhance series acceleration.
|
|
207
|
+
|
|
204
208
|
Returns
|
|
205
209
|
-------
|
|
206
210
|
|
|
@@ -208,12 +212,13 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
|
|
|
208
212
|
The Stieltjes transform m(z) on the same shape as z.
|
|
209
213
|
"""
|
|
210
214
|
|
|
211
|
-
z = numpy.asarray(z, dtype=
|
|
215
|
+
z = numpy.asarray(z, dtype=dtype)
|
|
216
|
+
|
|
212
217
|
lam_m, lam_p = support
|
|
213
218
|
span = lam_p - lam_m
|
|
214
219
|
center = 0.5 * (lam_m + lam_p)
|
|
215
220
|
|
|
216
|
-
# Map z
|
|
221
|
+
# Map z to u in the standard [-1,1] domain
|
|
217
222
|
u = (2.0 * (z - center)) / span
|
|
218
223
|
|
|
219
224
|
# Inverse-Joukowski: pick branch sqrt with +Im
|
|
@@ -227,16 +232,16 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
|
|
|
227
232
|
|
|
228
233
|
# This depends on the method of analytic continuation
|
|
229
234
|
if continuation == 'pade':
|
|
230
|
-
#
|
|
235
|
+
# Horner summation for S0(J) = sum_{k=0}^K psi_k * J**k
|
|
231
236
|
K = len(psi) - 1
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
S =
|
|
237
|
+
S0 = numpy.zeros_like(J)
|
|
238
|
+
for k in range(K, -1, -1):
|
|
239
|
+
S0 = psi[k] + J * S0
|
|
240
|
+
S = J * S0
|
|
236
241
|
|
|
237
242
|
else:
|
|
238
|
-
# Flatten J before passing to
|
|
239
|
-
psi_zero = numpy.concatenate([[0], psi])
|
|
243
|
+
# Flatten J before passing to any of the acceleration methods.
|
|
244
|
+
psi_zero = numpy.concatenate([[0.0], psi])
|
|
240
245
|
Sn = partial_sum(psi_zero, J.ravel(), p=0)
|
|
241
246
|
|
|
242
247
|
if continuation == 'wynn-eps':
|
freealg/_decompress.py
CHANGED
|
@@ -567,7 +567,8 @@ def _newton_method(f, z_init, a, support, enforce_wall=False, tol=1e-4,
|
|
|
567
567
|
|
|
568
568
|
def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
569
569
|
alpha=0.5, max_bt=1, eps=1e-30, step_factor=5.0,
|
|
570
|
-
post_smooth=True, jump_tol=10.0,
|
|
570
|
+
post_smooth=True, jump_tol=10.0, dtype=numpy.complex128,
|
|
571
|
+
verbose=False):
|
|
571
572
|
"""
|
|
572
573
|
Solves :math:``f(z) = a`` for many starting points simultaneously using the
|
|
573
574
|
secant method in the complex plane.
|
|
@@ -581,7 +582,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
581
582
|
Two initial guesses. ``z1`` may be broadcast to ``z0``.
|
|
582
583
|
|
|
583
584
|
a : complex or array_like, optional
|
|
584
|
-
Right
|
|
585
|
+
Right-hand-side targets (broadcasted to ``z0``). Defaults to ``0+0j``.
|
|
585
586
|
|
|
586
587
|
tol : float, optional
|
|
587
588
|
Convergence criterion on ``|f(z) - a|``. Defaults to ``1e-12``.
|
|
@@ -590,10 +591,10 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
590
591
|
Maximum number of secant iterations. Defaults to ``100``.
|
|
591
592
|
|
|
592
593
|
alpha : float, optional
|
|
593
|
-
Back
|
|
594
|
+
Back-tracking shrink factor (``0 < alpha < 1``). Defaults to ``0.5``.
|
|
594
595
|
|
|
595
596
|
max_bt : int, optional
|
|
596
|
-
Maximum back
|
|
597
|
+
Maximum back-tracking trials per iteration. Defaults to ``0``.
|
|
597
598
|
|
|
598
599
|
eps : float, optional
|
|
599
600
|
Safeguard added to tiny denominators. Defaults to ``1e-30``.
|
|
@@ -607,6 +608,9 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
607
608
|
Sensitivity of the clean-up pass; larger tolerance implies fewer
|
|
608
609
|
re-solves.
|
|
609
610
|
|
|
611
|
+
dtype : {``'complex128'``, ``'complex256'``}, default = ``'complex128'``
|
|
612
|
+
Data type for inner computations of complex variables.
|
|
613
|
+
|
|
610
614
|
verbose : bool, optional
|
|
611
615
|
If *True*, prints progress every 10 iterations.
|
|
612
616
|
|
|
@@ -622,9 +626,9 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
622
626
|
|
|
623
627
|
# Broadcast inputs
|
|
624
628
|
z0, z1, a = numpy.broadcast_arrays(
|
|
625
|
-
numpy.asarray(z0,
|
|
626
|
-
numpy.asarray(z1,
|
|
627
|
-
numpy.asarray(a,
|
|
629
|
+
numpy.asarray(z0, dtype=dtype),
|
|
630
|
+
numpy.asarray(z1, dtype=dtype),
|
|
631
|
+
numpy.asarray(a, dtype=dtype),
|
|
628
632
|
)
|
|
629
633
|
orig_shape = z0.shape
|
|
630
634
|
z0, z1, a = (x.ravel() for x in (z0, z1, a))
|
|
@@ -677,7 +681,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
677
681
|
if not worse.any():
|
|
678
682
|
break
|
|
679
683
|
|
|
680
|
-
# Book
|
|
684
|
+
# Book-keeping
|
|
681
685
|
newly_conv = (numpy.abs(f2) < tol) & active
|
|
682
686
|
converged[newly_conv] = True
|
|
683
687
|
iterations[newly_conv] = k + 1
|
|
@@ -691,7 +695,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
691
695
|
if verbose and k % 10 == 0:
|
|
692
696
|
print(f"Iter {k}: {converged.sum()} / {n_points} converged")
|
|
693
697
|
|
|
694
|
-
# Non
|
|
698
|
+
# Non-converged points
|
|
695
699
|
remaining = ~converged
|
|
696
700
|
roots[remaining] = z1[remaining]
|
|
697
701
|
residuals[remaining] = numpy.abs(f1[remaining])
|
|
@@ -723,7 +727,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
723
727
|
new_root, new_res, new_iter = _secant_complex(
|
|
724
728
|
f, z_first, z_second, a[bad], tol=tol, max_iter=max_iter,
|
|
725
729
|
alpha=alpha, max_bt=max_bt, eps=eps, step_factor=step_factor,
|
|
726
|
-
post_smooth=False, # avoid recursion
|
|
730
|
+
dtype=dtype, post_smooth=False, # avoid recursion
|
|
727
731
|
)
|
|
728
732
|
|
|
729
733
|
roots[bad] = new_root
|
|
@@ -894,7 +898,7 @@ def decompress(freeform, alpha, x, roots_init=None, method='newton',
|
|
|
894
898
|
# Initialize roots below the real axis
|
|
895
899
|
if roots_init is None:
|
|
896
900
|
roots_init = numpy.full(x.shape, numpy.mean(freeform.support) - 0.1j,
|
|
897
|
-
dtype=
|
|
901
|
+
dtype=freeform.dtype)
|
|
898
902
|
|
|
899
903
|
# Finding roots
|
|
900
904
|
if method == 'newton':
|
|
@@ -925,11 +929,11 @@ def decompress(freeform, alpha, x, roots_init=None, method='newton',
|
|
|
925
929
|
|
|
926
930
|
elif method == 'secant':
|
|
927
931
|
z0 = numpy.full(x.shape, numpy.mean(freeform.support) + 0.1j,
|
|
928
|
-
dtype=
|
|
932
|
+
dtype=freeform.dtype)
|
|
929
933
|
z1 = z0 - 0.2j
|
|
930
934
|
|
|
931
935
|
roots, _, _ = _secant_complex(_char_z, z0, z1, a=target, tol=tolerance,
|
|
932
|
-
max_iter=max_iter)
|
|
936
|
+
max_iter=max_iter, dtype=freeform.dtype)
|
|
933
937
|
else:
|
|
934
938
|
raise NotImplementedError('"method" is invalid.')
|
|
935
939
|
|
|
@@ -960,7 +964,8 @@ def decompress(freeform, alpha, x, roots_init=None, method='newton',
|
|
|
960
964
|
# =======================
|
|
961
965
|
|
|
962
966
|
def reverse_characteristics(freeform, z_inits, T, iterations=500,
|
|
963
|
-
step_size=0.1, tolerance=1e-8
|
|
967
|
+
step_size=0.1, tolerance=1e-8,
|
|
968
|
+
dtype=numpy.complex128):
|
|
964
969
|
"""
|
|
965
970
|
"""
|
|
966
971
|
|
|
@@ -975,7 +980,7 @@ def reverse_characteristics(freeform, z_inits, T, iterations=500,
|
|
|
975
980
|
target_z, target_t = numpy.meshgrid(z_inits, t_eval)
|
|
976
981
|
|
|
977
982
|
z = numpy.full(target_z.shape, numpy.mean(freeform.support) - 0.1j,
|
|
978
|
-
dtype=
|
|
983
|
+
dtype=dtype)
|
|
979
984
|
|
|
980
985
|
# Broken Newton steps can produce a lot of warnings. Removing them for now.
|
|
981
986
|
with numpy.errstate(all='ignore'):
|
freealg/_jacobi.py
CHANGED
|
@@ -97,7 +97,7 @@ def jacobi_kernel_proj(xs, pdf, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
|
|
|
97
97
|
Pk = eval_jacobi(k, alpha, beta, t)
|
|
98
98
|
N_k = jacobi_sq_norm(k, alpha, beta)
|
|
99
99
|
|
|
100
|
-
#
|
|
100
|
+
# \int P_k(t) w(t) \rho(t) dt. w(t) cancels with pdf already being rho
|
|
101
101
|
moment = numpy.trapz(Pk * pdf, xs)
|
|
102
102
|
|
|
103
103
|
if k == 0:
|
|
@@ -144,11 +144,23 @@ def jacobi_density(x, psi, support, alpha=0.0, beta=0.0):
|
|
|
144
144
|
lam_m, lam_p = support
|
|
145
145
|
t = (2 * x - (lam_p + lam_m)) / (lam_p - lam_m)
|
|
146
146
|
w = (1 - t)**alpha * (1 + t)**beta
|
|
147
|
+
|
|
148
|
+
# The function eval_jacobi does not accept complex256 type
|
|
149
|
+
down_cast = False
|
|
150
|
+
if numpy.issubdtype(t.dtype, numpy.complexfloating) and \
|
|
151
|
+
t.itemsize > numpy.dtype(numpy.complex128).itemsize:
|
|
152
|
+
t = t.astype(numpy.complex128)
|
|
153
|
+
down_cast = True
|
|
154
|
+
|
|
147
155
|
P = numpy.vstack([eval_jacobi(k, alpha, beta, t) for k in range(len(psi))])
|
|
148
156
|
|
|
149
157
|
rho_t = w * (psi @ P) # density in t-variable
|
|
150
158
|
rho_x = rho_t * (2.0 / (lam_p - lam_m)) # back to x-variable
|
|
151
159
|
|
|
160
|
+
# Case up to complex256
|
|
161
|
+
if down_cast:
|
|
162
|
+
rho_x = rho_x.astype(t.dtype)
|
|
163
|
+
|
|
152
164
|
return rho_x
|
|
153
165
|
|
|
154
166
|
|
|
@@ -156,33 +168,41 @@ def jacobi_density(x, psi, support, alpha=0.0, beta=0.0):
|
|
|
156
168
|
# jacobi stieltjes
|
|
157
169
|
# ================
|
|
158
170
|
|
|
159
|
-
def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0,
|
|
160
|
-
continuation='pade'):
|
|
171
|
+
def jacobi_stieltjes(z, cache, psi, support, alpha=0.0, beta=0.0, n_quad=None,
|
|
172
|
+
continuation='pade', dtype=numpy.complex128):
|
|
161
173
|
"""
|
|
162
174
|
Compute m(z) = sum_k psi_k * m_k(z) where
|
|
163
175
|
|
|
164
|
-
|
|
176
|
+
.. math::
|
|
165
177
|
|
|
166
|
-
|
|
167
|
-
|
|
178
|
+
m_k(z) = \\int \\frac{w^{(alpha, beta)}(t) P_k^{(alpha, beta)}(t)}{
|
|
179
|
+
(u(z)-t)} \\mathrm{d} t
|
|
180
|
+
|
|
181
|
+
Each m_k is evaluated *separately* with a Gauss-Jacobi rule sized
|
|
182
|
+
for that k. This follows the user's request: 1 quadrature rule per P_k.
|
|
168
183
|
|
|
169
184
|
Parameters
|
|
170
185
|
----------
|
|
171
186
|
|
|
172
187
|
z : complex or ndarray
|
|
173
188
|
|
|
189
|
+
cache : dict
|
|
190
|
+
Pass a dict to enable cross-call caching.
|
|
191
|
+
|
|
174
192
|
psi : (K+1,) array_like
|
|
175
193
|
|
|
176
194
|
support : (lambda_minus, lambda_plus)
|
|
177
195
|
|
|
178
196
|
alpha, beta : float
|
|
179
197
|
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
n_quad = max(n_base, k+1).
|
|
198
|
+
n_quad : int, default=None
|
|
199
|
+
Number of Gauss-Jacobi quadrature points.
|
|
183
200
|
|
|
184
201
|
continuation : str, default= ``'pade'``
|
|
185
|
-
|
|
202
|
+
Method of analytic continuation.
|
|
203
|
+
|
|
204
|
+
dtype : numpy.type, default=numpy.complex128
|
|
205
|
+
Data type for complex arrays. This might enhance series acceleration.
|
|
186
206
|
|
|
187
207
|
Returns
|
|
188
208
|
-------
|
|
@@ -194,67 +214,132 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
|
|
|
194
214
|
Same shape as z
|
|
195
215
|
"""
|
|
196
216
|
|
|
197
|
-
|
|
217
|
+
if not isinstance(cache, dict):
|
|
218
|
+
raise TypeError('"cache" must be a dict; pass a persistent dict '
|
|
219
|
+
'(e.g., self.cache).')
|
|
220
|
+
|
|
221
|
+
# Number of quadratures
|
|
222
|
+
if 'n_quad' not in cache:
|
|
223
|
+
if n_quad is None:
|
|
224
|
+
# Set number of quadratures based on Bernstein ellipse. Here using
|
|
225
|
+
# an evaluation point a with distance delta from support, to
|
|
226
|
+
# achieve the quadrature error below tol.
|
|
227
|
+
tol = 1e-16
|
|
228
|
+
delta = 1e-2
|
|
229
|
+
n_quad = int(-numpy.log(tol) / (2.0 * numpy.sqrt(delta)))
|
|
230
|
+
n_quad = max(n_quad, psi.size)
|
|
231
|
+
cache['n_quad'] = n_quad
|
|
232
|
+
else:
|
|
233
|
+
n_quad = cache['n_quad']
|
|
234
|
+
|
|
235
|
+
# Quadrature nodes and weights
|
|
236
|
+
if ('t_nodes' not in cache) or ('w_nodes' not in cache):
|
|
237
|
+
t_nodes, w_nodes = roots_jacobi(n_quad, alpha, beta) # (n_quad,)
|
|
238
|
+
cache['t_nodes'] = t_nodes
|
|
239
|
+
cache['w_nodes'] = w_nodes
|
|
240
|
+
else:
|
|
241
|
+
t_nodes = cache['t_nodes']
|
|
242
|
+
w_nodes = cache['w_nodes']
|
|
243
|
+
|
|
244
|
+
z = numpy.asarray(z, dtype=dtype)
|
|
198
245
|
lam_minus, lam_plus = support
|
|
199
246
|
span = lam_plus - lam_minus
|
|
200
247
|
centre = 0.5 * (lam_plus + lam_minus)
|
|
201
248
|
|
|
202
|
-
# Map z
|
|
249
|
+
# Map z to u in the standard [-1,1] domain
|
|
203
250
|
u = (2.0 / span) * (z - centre)
|
|
204
251
|
|
|
205
|
-
|
|
252
|
+
# Cauchy Kernel (flattened for all z)
|
|
253
|
+
u_flat = u.ravel()
|
|
254
|
+
ker = (1.0 / (t_nodes[:, None] - u_flat[None, :])).astype(
|
|
255
|
+
dtype, copy=False) # (n_quad, Ny*Nx)
|
|
256
|
+
|
|
257
|
+
if continuation == 'pade':
|
|
206
258
|
|
|
207
|
-
|
|
208
|
-
# Stores m with the ravel size of z.
|
|
209
|
-
m_partial = numpy.zeros((psi.size, z.size), dtype=numpy.complex128)
|
|
259
|
+
if 'integrand_nodes' not in cache:
|
|
210
260
|
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
t_nodes, w_nodes = roots_jacobi(n_quad, alpha, beta) # (n_quad,)
|
|
261
|
+
# Compute sum_k psi_k P_k (call it s_node)
|
|
262
|
+
s_nodes = numpy.zeros_like(t_nodes, dtype=dtype)
|
|
263
|
+
for k, psi_k in enumerate(psi):
|
|
215
264
|
|
|
216
|
-
|
|
217
|
-
|
|
265
|
+
# Evaluate P_k at the quadrature nodes
|
|
266
|
+
P_k_nodes = eval_jacobi(k, alpha, beta, t_nodes) # (n_quad,)
|
|
267
|
+
s_nodes += psi_k * P_k_nodes
|
|
218
268
|
|
|
219
|
-
|
|
220
|
-
|
|
269
|
+
integrand_nodes = (2.0 / span) * (w_nodes * s_nodes).astype(dtype)
|
|
270
|
+
cache['integrand_nodes'] = integrand_nodes
|
|
221
271
|
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
Q_k = (integrand[:, None, None] / diff).sum(axis=0)
|
|
272
|
+
else:
|
|
273
|
+
integrand_nodes = cache['integrand_nodes']
|
|
225
274
|
|
|
226
|
-
|
|
227
|
-
|
|
275
|
+
Q_flat = (integrand_nodes[:, None] * ker).sum(axis=0)
|
|
276
|
+
m_total = Q_flat.reshape(z.shape)
|
|
228
277
|
|
|
229
|
-
|
|
230
|
-
if continuation != 'pade':
|
|
278
|
+
return m_total
|
|
231
279
|
|
|
232
|
-
|
|
233
|
-
# when rho is just the k-th Jacobi basis: w(z) P_k(z). FOr this,
|
|
234
|
-
# we create a psi array (called unit_psi_j), with all zeros, except
|
|
235
|
-
# its k-th element is one. Ten we call jacobi_density.
|
|
236
|
-
unit_psi_k = numpy.zeros_like(psi)
|
|
237
|
-
unit_psi_k[k] = 1.0
|
|
280
|
+
else:
|
|
238
281
|
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
z_m = z[mask_m]
|
|
282
|
+
# Continuation is not Pade. This is one of Wynn, Levin, etc. These
|
|
283
|
+
# methods need the series for m for 1, ..., k.
|
|
242
284
|
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
285
|
+
if 'B' not in cache:
|
|
286
|
+
# All P_k at quadrature nodes (real), row-scale by weights
|
|
287
|
+
P_nodes = numpy.empty((psi.size, n_quad), dtype=w_nodes.dtype)
|
|
288
|
+
for k in range(psi.size):
|
|
289
|
+
P_nodes[k, :] = eval_jacobi(k, alpha, beta, t_nodes)
|
|
246
290
|
|
|
247
|
-
#
|
|
248
|
-
|
|
249
|
-
|
|
291
|
+
# All P_k * w shape (K+1, n_quad)
|
|
292
|
+
B = (2.0 / span) * (P_nodes * w_nodes[None, :]).astype(
|
|
293
|
+
dtype, copy=False)
|
|
294
|
+
cache['B'] = B
|
|
295
|
+
|
|
296
|
+
else:
|
|
297
|
+
B = cache['B']
|
|
250
298
|
|
|
251
|
-
#
|
|
252
|
-
|
|
299
|
+
# Principal branch. 2D matrix for all k
|
|
300
|
+
m_k_all = B @ ker
|
|
253
301
|
|
|
254
|
-
|
|
255
|
-
|
|
302
|
+
# Compute m on secondary branch from the principal branch, which is
|
|
303
|
+
# m_k = m_k + 2 \pi i rho_k(z), and rho(z) is the analytic extension of
|
|
304
|
+
# rho_k(x) using the k-th basis. Basically, rho_k(z) is w * P_k(z).
|
|
256
305
|
|
|
257
|
-
|
|
306
|
+
# Lower-half-plane jump for ALL k at once (vectorized)
|
|
307
|
+
mask_m = (z.imag <= 0)
|
|
308
|
+
if numpy.any(mask_m):
|
|
309
|
+
idx = numpy.flatnonzero(mask_m.ravel())
|
|
310
|
+
u_m = u_flat[idx].astype(dtype, copy=False) # complex
|
|
311
|
+
|
|
312
|
+
# Scipy's eval_jacobi tops out at complex128 type. If u_m is
|
|
313
|
+
# complex256, downcast to complex128.
|
|
314
|
+
if u_m.dtype.itemsize > numpy.dtype(numpy.complex128).itemsize:
|
|
315
|
+
u_m_eval = u_m.astype(numpy.complex128, copy=False)
|
|
316
|
+
down_cast = True
|
|
317
|
+
else:
|
|
318
|
+
u_m_eval = u_m
|
|
319
|
+
down_cast = False
|
|
320
|
+
|
|
321
|
+
# P_k at complex u_m (all means for all k = 1,...,K)
|
|
322
|
+
P_all_m = numpy.empty((psi.size, u_m.size), dtype=dtype)
|
|
323
|
+
for k in range(psi.size):
|
|
324
|
+
P_all_m[k, :] = eval_jacobi(k, alpha, beta, u_m_eval)
|
|
325
|
+
|
|
326
|
+
# Jacobi weight. Must match jacobi_density's branch
|
|
327
|
+
w_m = numpy.power(1.0 - u_m, alpha) * numpy.power(1.0 + u_m, beta)
|
|
328
|
+
|
|
329
|
+
# rho_k(z) in x-units is (2/span) * w(u) * P_k(u)
|
|
330
|
+
rho_all = ((2.0 / span) * w_m[None, :] * P_all_m).astype(
|
|
331
|
+
dtype, copy=False)
|
|
332
|
+
|
|
333
|
+
if down_cast:
|
|
334
|
+
rho_all = rho_all.astype(dtype)
|
|
335
|
+
|
|
336
|
+
# compute analytic extension of rho(z) to lower-half plane for when
|
|
337
|
+
# rho is just the k-th Jacobi basis: w(z) P_k(z). For this, we
|
|
338
|
+
m_k_all[:, idx] = m_k_all[:, idx] + (2.0 * numpy.pi * 1j) * rho_all
|
|
339
|
+
|
|
340
|
+
# Partial sums S_k = sum_{j<=k} psi_j * m_j
|
|
341
|
+
WQ = (psi[:, None].astype(dtype, copy=False) * m_k_all)
|
|
342
|
+
m_partial = numpy.cumsum(WQ, axis=0)
|
|
258
343
|
|
|
259
344
|
if continuation == 'wynn-eps':
|
|
260
345
|
S = wynn_epsilon(m_partial)
|
|
@@ -266,7 +351,9 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
|
|
|
266
351
|
S = weniger_delta(m_partial)
|
|
267
352
|
elif continuation == 'brezinski':
|
|
268
353
|
S = brezinski_theta(m_partial)
|
|
354
|
+
else:
|
|
355
|
+
# No acceleration (likely diverges in the lower-half plane)
|
|
356
|
+
S = m_partial[-1, :]
|
|
269
357
|
|
|
270
358
|
m_total = S.reshape(z.shape)
|
|
271
|
-
|
|
272
|
-
return m_total
|
|
359
|
+
return m_total
|
freealg/_pade.py
CHANGED
|
@@ -27,8 +27,8 @@ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
|
|
|
27
27
|
"""
|
|
28
28
|
Generate q real poles outside [lam_m, lam_p].
|
|
29
29
|
|
|
30
|
-
|
|
31
|
-
|
|
30
|
+
* even q : q/2 on each side (Chebyshev-like layout)
|
|
31
|
+
* odd q : (q+1)/2 on the *left*, (q-1)/2 on the right
|
|
32
32
|
so q=1 => single pole on whichever side `odd_side` says.
|
|
33
33
|
|
|
34
34
|
safety >= 1: 1, then poles start half an interval away; >1 pushes them
|
|
@@ -73,13 +73,13 @@ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
|
|
|
73
73
|
|
|
74
74
|
def _encode_poles(a, lam_m, lam_p):
|
|
75
75
|
"""
|
|
76
|
-
Map real pole a_j
|
|
76
|
+
Map real pole a_j => unconstrained s_j,
|
|
77
77
|
so that the default left-of-interval pole stays left.
|
|
78
78
|
"""
|
|
79
79
|
|
|
80
80
|
# half-width of the interval
|
|
81
81
|
d = 0.5 * (lam_p - lam_m)
|
|
82
|
-
# if a < lam_m, we want s
|
|
82
|
+
# if a < lam_m, we want s >= 0; if a > lam_p, s < 0
|
|
83
83
|
return numpy.where(
|
|
84
84
|
a < lam_m,
|
|
85
85
|
numpy.log((lam_m - a) / d), # zero at a = lam_m - d
|
|
@@ -93,13 +93,13 @@ def _encode_poles(a, lam_m, lam_p):
|
|
|
93
93
|
|
|
94
94
|
def _decode_poles(s, lam_m, lam_p):
|
|
95
95
|
"""
|
|
96
|
-
Inverse map s_j
|
|
96
|
+
Inverse map s_j => real pole a_j outside the interval.
|
|
97
97
|
"""
|
|
98
98
|
|
|
99
99
|
d = 0.5 * (lam_p - lam_m)
|
|
100
100
|
return numpy.where(
|
|
101
101
|
s >= 0,
|
|
102
|
-
lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m
|
|
102
|
+
lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m-d (left)
|
|
103
103
|
lam_p + d * numpy.exp(-s) # maps s=0 to a=lam_p+d (right)
|
|
104
104
|
)
|
|
105
105
|
|
|
@@ -186,7 +186,7 @@ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
|
|
|
186
186
|
else:
|
|
187
187
|
skip = 0 # all entries are residues
|
|
188
188
|
|
|
189
|
-
# add
|
|
189
|
+
# add lambda only for the residue positions
|
|
190
190
|
n = ATA.shape[0]
|
|
191
191
|
for i in range(skip, n):
|
|
192
192
|
ATA[i, i] += pade_reg
|
|
@@ -343,7 +343,7 @@ def eval_pade(z, pade_sol):
|
|
|
343
343
|
"""
|
|
344
344
|
|
|
345
345
|
# z_arr = numpy.asanyarray(z) # shape=(M,N)
|
|
346
|
-
# flat = z_arr.ravel() # shape=(M
|
|
346
|
+
# flat = z_arr.ravel() # shape=(M*N,)
|
|
347
347
|
# c, D = pade_sol['c'], pade_sol['D']
|
|
348
348
|
# poles = pade_sol['poles']
|
|
349
349
|
# resid = pade_sol['resid']
|
|
@@ -362,7 +362,7 @@ def eval_pade(z, pade_sol):
|
|
|
362
362
|
|
|
363
363
|
out = c + D*z
|
|
364
364
|
for bj, rj in zip(poles, resid):
|
|
365
|
-
out += rj/(z - bj) # each is an (N,) op, no
|
|
365
|
+
out += rj/(z - bj) # each is an (N,) op, no N*q temp
|
|
366
366
|
return out
|
|
367
367
|
|
|
368
368
|
|
|
@@ -384,7 +384,7 @@ def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
|
|
|
384
384
|
b_j in (-infty, lam_m - delta] cup [lam_p + delta, infty)
|
|
385
385
|
|
|
386
386
|
Approach:
|
|
387
|
-
- Brute
|
|
387
|
+
- Brute-force all 2^q left/right assignments for denominator roots
|
|
388
388
|
- Global search with differential_evolution, fallback to zeros if needed
|
|
389
389
|
- Local refinement with least_squares
|
|
390
390
|
|