freealg 0.5.3__tar.gz → 0.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {freealg-0.5.3 → freealg-0.6.0}/PKG-INFO +1 -1
- {freealg-0.5.3 → freealg-0.6.0}/freealg/__init__.py +4 -1
- freealg-0.6.0/freealg/__version__.py +1 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_chebyshev.py +24 -19
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_decompress.py +20 -15
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_jacobi.py +21 -6
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_linalg.py +1 -1
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_pade.py +10 -10
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_sample.py +42 -18
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_series.py +9 -9
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_support.py +35 -22
- freealg-0.6.0/freealg/_util.py +256 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg/distributions/_kesten_mckay.py +1 -1
- {freealg-0.5.3 → freealg-0.6.0}/freealg/distributions/_marchenko_pastur.py +1 -1
- {freealg-0.5.3 → freealg-0.6.0}/freealg/distributions/_meixner.py +2 -2
- {freealg-0.5.3 → freealg-0.6.0}/freealg/distributions/_wachter.py +1 -1
- {freealg-0.5.3 → freealg-0.6.0}/freealg/freeform.py +50 -48
- {freealg-0.5.3 → freealg-0.6.0}/freealg.egg-info/PKG-INFO +1 -1
- freealg-0.5.3/freealg/__version__.py +0 -1
- freealg-0.5.3/freealg/_util.py +0 -153
- {freealg-0.5.3 → freealg-0.6.0}/AUTHORS.txt +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/CHANGELOG.rst +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/LICENSE.txt +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/MANIFEST.in +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/README.rst +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_damp.py +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg/_plot_util.py +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg/distributions/__init__.py +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg/distributions/_wigner.py +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg.egg-info/SOURCES.txt +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg.egg-info/dependency_links.txt +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg.egg-info/not-zip-safe +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg.egg-info/requires.txt +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/freealg.egg-info/top_level.txt +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/pyproject.toml +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/requirements.txt +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/setup.cfg +0 -0
- {freealg-0.5.3 → freealg-0.6.0}/setup.py +0 -0
|
@@ -8,9 +8,12 @@
|
|
|
8
8
|
|
|
9
9
|
from .freeform import FreeForm
|
|
10
10
|
from ._linalg import eigvalsh, cond, norm, trace, slogdet
|
|
11
|
+
from ._support import supp
|
|
12
|
+
from ._sample import sample
|
|
13
|
+
from ._util import kde
|
|
11
14
|
from . import distributions
|
|
12
15
|
|
|
13
16
|
__all__ = ['FreeForm', 'distributions', 'eigvalsh', 'cond', 'norm', 'trace',
|
|
14
|
-
'slogdet']
|
|
17
|
+
'slogdet', 'supp', 'sample', 'kde']
|
|
15
18
|
|
|
16
19
|
from .__version__ import __version__ # noqa: F401 E402
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.6.0"
|
|
@@ -43,10 +43,10 @@ def chebyshev_sample_proj(eig, support, K=10, reg=0.0):
|
|
|
43
43
|
The assumed compact support of rho.
|
|
44
44
|
|
|
45
45
|
K : int
|
|
46
|
-
Highest Chebyshev
|
|
46
|
+
Highest Chebyshev-II order.
|
|
47
47
|
|
|
48
48
|
reg : float
|
|
49
|
-
Tikhonov
|
|
49
|
+
Tikhonov-style ridge on each coefficient (defaults to 0).
|
|
50
50
|
|
|
51
51
|
Returns
|
|
52
52
|
-------
|
|
@@ -57,10 +57,10 @@ def chebyshev_sample_proj(eig, support, K=10, reg=0.0):
|
|
|
57
57
|
|
|
58
58
|
lam_m, lam_p = support
|
|
59
59
|
|
|
60
|
-
# Map to [
|
|
60
|
+
# Map to [-1,1] interval
|
|
61
61
|
t = (2 * eig - (lam_m + lam_p)) / (lam_p - lam_m)
|
|
62
62
|
|
|
63
|
-
# Inner
|
|
63
|
+
# Inner-product norm of each U_k under w(t) = sqrt{1-t^2} is \\pi/2
|
|
64
64
|
norm = numpy.pi / 2
|
|
65
65
|
|
|
66
66
|
psi = numpy.empty(K+1)
|
|
@@ -92,12 +92,12 @@ def chebyshev_kernel_proj(xs, pdf, support, K=10, reg=0.0):
|
|
|
92
92
|
Projection of a *continuous* density given on a grid (xs, pdf)
|
|
93
93
|
onto the Chebyshev-II basis.
|
|
94
94
|
|
|
95
|
-
xs : 1-D numpy array (original x
|
|
95
|
+
xs : 1-D numpy array (original x-axis, not the t-variable)
|
|
96
96
|
pdf : same shape as xs, integrates to 1 on xs
|
|
97
97
|
"""
|
|
98
98
|
|
|
99
99
|
lam_m, lam_p = support
|
|
100
|
-
t = (2.0 * xs - (lam_m + lam_p)) / (lam_p - lam_m) # map to [
|
|
100
|
+
t = (2.0 * xs - (lam_m + lam_p)) / (lam_p - lam_m) # map to [-1,1]
|
|
101
101
|
|
|
102
102
|
norm = numpy.pi / 2.0
|
|
103
103
|
psi = numpy.empty(K + 1)
|
|
@@ -140,15 +140,15 @@ def chebyshev_density(x, psi, support):
|
|
|
140
140
|
-------
|
|
141
141
|
|
|
142
142
|
rho_x : ndarray, same shape as x
|
|
143
|
-
Approximated spectral density on the original x
|
|
143
|
+
Approximated spectral density on the original x-axis.
|
|
144
144
|
"""
|
|
145
145
|
|
|
146
146
|
lam_m, lam_p = support
|
|
147
147
|
|
|
148
|
-
# Map to [
|
|
148
|
+
# Map to [-1,1] interval
|
|
149
149
|
t = (2 * numpy.asarray(x) - (lam_m + lam_p)) / (lam_p - lam_m)
|
|
150
150
|
|
|
151
|
-
# Weight sqrt{1
|
|
151
|
+
# Weight sqrt{1-t^2} (clip for numerical safety)
|
|
152
152
|
w = numpy.sqrt(numpy.clip(1 - t**2, a_min=0, a_max=None))
|
|
153
153
|
|
|
154
154
|
# Summation approximation
|
|
@@ -165,22 +165,23 @@ def chebyshev_density(x, psi, support):
|
|
|
165
165
|
# chebushev stieltjes
|
|
166
166
|
# ===================
|
|
167
167
|
|
|
168
|
-
def chebyshev_stieltjes(z, psi, support, continuation='pade'
|
|
168
|
+
def chebyshev_stieltjes(z, psi, support, continuation='pade',
|
|
169
|
+
dtype=numpy.complex128):
|
|
169
170
|
"""
|
|
170
|
-
Compute the Stieltjes transform m(z) for a Chebyshev
|
|
171
|
+
Compute the Stieltjes transform m(z) for a Chebyshev-II expansion
|
|
171
172
|
|
|
172
|
-
rho(x) = (2/(lam_p - lam_m)) * sqrt(1
|
|
173
|
+
rho(x) = (2/(lam_p - lam_m)) * sqrt(1-t(x)^2) * sum_{k=0}^K psi_k U_k(t(x))
|
|
173
174
|
|
|
174
|
-
via the closed
|
|
175
|
+
via the closed-form
|
|
175
176
|
|
|
176
|
-
\\int_{-1}^1 U_k(t) sqrt(1
|
|
177
|
+
\\int_{-1}^1 U_k(t) sqrt(1-t^2)/(u - t) dt = \\pi J(u)^(k+1),
|
|
177
178
|
|
|
178
179
|
where
|
|
179
180
|
|
|
180
|
-
u = (2(z
|
|
181
|
+
u = (2(z-center))/span,
|
|
181
182
|
center = (lam_p + lam_m)/2,
|
|
182
183
|
span = lam_p - lam_m,
|
|
183
|
-
J(u) = u
|
|
184
|
+
J(u) = u - sqrt(u^2-1)
|
|
184
185
|
|
|
185
186
|
and then
|
|
186
187
|
|
|
@@ -193,7 +194,7 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
|
|
|
193
194
|
Points in the complex plane.
|
|
194
195
|
|
|
195
196
|
psi : array_like, shape (K+1,)
|
|
196
|
-
Chebyshev
|
|
197
|
+
Chebyshev-II coefficients \\psi.
|
|
197
198
|
|
|
198
199
|
support : tuple
|
|
199
200
|
The support interval of the original density.
|
|
@@ -201,6 +202,9 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
|
|
|
201
202
|
continuation : str, default= ``'pade'``
|
|
202
203
|
Methof of analytiv continuation.
|
|
203
204
|
|
|
205
|
+
dtype : numpy.type, default=numpy.complex128
|
|
206
|
+
Data type for compelx arrays. This might enhance series acceleration.
|
|
207
|
+
|
|
204
208
|
Returns
|
|
205
209
|
-------
|
|
206
210
|
|
|
@@ -208,7 +212,8 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
|
|
|
208
212
|
The Stieltjes transform m(z) on the same shape as z.
|
|
209
213
|
"""
|
|
210
214
|
|
|
211
|
-
z = numpy.asarray(z, dtype=
|
|
215
|
+
z = numpy.asarray(z, dtype=dtype)
|
|
216
|
+
|
|
212
217
|
lam_m, lam_p = support
|
|
213
218
|
span = lam_p - lam_m
|
|
214
219
|
center = 0.5 * (lam_m + lam_p)
|
|
@@ -236,7 +241,7 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
|
|
|
236
241
|
|
|
237
242
|
else:
|
|
238
243
|
# Flatten J before passing to Wynn method.
|
|
239
|
-
psi_zero = numpy.concatenate([[0], psi])
|
|
244
|
+
psi_zero = numpy.concatenate([[0.0], psi])
|
|
240
245
|
Sn = partial_sum(psi_zero, J.ravel(), p=0)
|
|
241
246
|
|
|
242
247
|
if continuation == 'wynn-eps':
|
|
@@ -567,7 +567,8 @@ def _newton_method(f, z_init, a, support, enforce_wall=False, tol=1e-4,
|
|
|
567
567
|
|
|
568
568
|
def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
569
569
|
alpha=0.5, max_bt=1, eps=1e-30, step_factor=5.0,
|
|
570
|
-
post_smooth=True, jump_tol=10.0,
|
|
570
|
+
post_smooth=True, jump_tol=10.0, dtype=numpy.complex128,
|
|
571
|
+
verbose=False):
|
|
571
572
|
"""
|
|
572
573
|
Solves :math:``f(z) = a`` for many starting points simultaneously using the
|
|
573
574
|
secant method in the complex plane.
|
|
@@ -581,7 +582,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
581
582
|
Two initial guesses. ``z1`` may be broadcast to ``z0``.
|
|
582
583
|
|
|
583
584
|
a : complex or array_like, optional
|
|
584
|
-
Right
|
|
585
|
+
Right-hand-side targets (broadcasted to ``z0``). Defaults to ``0+0j``.
|
|
585
586
|
|
|
586
587
|
tol : float, optional
|
|
587
588
|
Convergence criterion on ``|f(z) - a|``. Defaults to ``1e-12``.
|
|
@@ -590,10 +591,10 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
590
591
|
Maximum number of secant iterations. Defaults to ``100``.
|
|
591
592
|
|
|
592
593
|
alpha : float, optional
|
|
593
|
-
Back
|
|
594
|
+
Back-tracking shrink factor (``0 < alpha < 1``). Defaults to ``0.5``.
|
|
594
595
|
|
|
595
596
|
max_bt : int, optional
|
|
596
|
-
Maximum back
|
|
597
|
+
Maximum back-tracking trials per iteration. Defaults to ``0``.
|
|
597
598
|
|
|
598
599
|
eps : float, optional
|
|
599
600
|
Safeguard added to tiny denominators. Defaults to ``1e-30``.
|
|
@@ -607,6 +608,9 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
607
608
|
Sensitivity of the clean-up pass; larger tolerance implies fewer
|
|
608
609
|
re-solves.
|
|
609
610
|
|
|
611
|
+
dtype : {``'complex128'``, ``'complex256'``}, default = ``'complex128'``
|
|
612
|
+
Data type for inner computations of complex variables.
|
|
613
|
+
|
|
610
614
|
verbose : bool, optional
|
|
611
615
|
If *True*, prints progress every 10 iterations.
|
|
612
616
|
|
|
@@ -622,9 +626,9 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
622
626
|
|
|
623
627
|
# Broadcast inputs
|
|
624
628
|
z0, z1, a = numpy.broadcast_arrays(
|
|
625
|
-
numpy.asarray(z0,
|
|
626
|
-
numpy.asarray(z1,
|
|
627
|
-
numpy.asarray(a,
|
|
629
|
+
numpy.asarray(z0, dtype=dtype),
|
|
630
|
+
numpy.asarray(z1, dtype=dtype),
|
|
631
|
+
numpy.asarray(a, dtype=dtype),
|
|
628
632
|
)
|
|
629
633
|
orig_shape = z0.shape
|
|
630
634
|
z0, z1, a = (x.ravel() for x in (z0, z1, a))
|
|
@@ -677,7 +681,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
677
681
|
if not worse.any():
|
|
678
682
|
break
|
|
679
683
|
|
|
680
|
-
# Book
|
|
684
|
+
# Book-keeping
|
|
681
685
|
newly_conv = (numpy.abs(f2) < tol) & active
|
|
682
686
|
converged[newly_conv] = True
|
|
683
687
|
iterations[newly_conv] = k + 1
|
|
@@ -691,7 +695,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
691
695
|
if verbose and k % 10 == 0:
|
|
692
696
|
print(f"Iter {k}: {converged.sum()} / {n_points} converged")
|
|
693
697
|
|
|
694
|
-
# Non
|
|
698
|
+
# Non-converged points
|
|
695
699
|
remaining = ~converged
|
|
696
700
|
roots[remaining] = z1[remaining]
|
|
697
701
|
residuals[remaining] = numpy.abs(f1[remaining])
|
|
@@ -723,7 +727,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
|
|
|
723
727
|
new_root, new_res, new_iter = _secant_complex(
|
|
724
728
|
f, z_first, z_second, a[bad], tol=tol, max_iter=max_iter,
|
|
725
729
|
alpha=alpha, max_bt=max_bt, eps=eps, step_factor=step_factor,
|
|
726
|
-
post_smooth=False, # avoid recursion
|
|
730
|
+
dtype=dtype, post_smooth=False, # avoid recursion
|
|
727
731
|
)
|
|
728
732
|
|
|
729
733
|
roots[bad] = new_root
|
|
@@ -894,7 +898,7 @@ def decompress(freeform, alpha, x, roots_init=None, method='newton',
|
|
|
894
898
|
# Initialize roots below the real axis
|
|
895
899
|
if roots_init is None:
|
|
896
900
|
roots_init = numpy.full(x.shape, numpy.mean(freeform.support) - 0.1j,
|
|
897
|
-
dtype=
|
|
901
|
+
dtype=freeform.dtype)
|
|
898
902
|
|
|
899
903
|
# Finding roots
|
|
900
904
|
if method == 'newton':
|
|
@@ -925,11 +929,11 @@ def decompress(freeform, alpha, x, roots_init=None, method='newton',
|
|
|
925
929
|
|
|
926
930
|
elif method == 'secant':
|
|
927
931
|
z0 = numpy.full(x.shape, numpy.mean(freeform.support) + 0.1j,
|
|
928
|
-
dtype=
|
|
932
|
+
dtype=freeform.dtype)
|
|
929
933
|
z1 = z0 - 0.2j
|
|
930
934
|
|
|
931
935
|
roots, _, _ = _secant_complex(_char_z, z0, z1, a=target, tol=tolerance,
|
|
932
|
-
max_iter=max_iter)
|
|
936
|
+
max_iter=max_iter, dtype=freeform.dtype)
|
|
933
937
|
else:
|
|
934
938
|
raise NotImplementedError('"method" is invalid.')
|
|
935
939
|
|
|
@@ -960,7 +964,8 @@ def decompress(freeform, alpha, x, roots_init=None, method='newton',
|
|
|
960
964
|
# =======================
|
|
961
965
|
|
|
962
966
|
def reverse_characteristics(freeform, z_inits, T, iterations=500,
|
|
963
|
-
step_size=0.1, tolerance=1e-8
|
|
967
|
+
step_size=0.1, tolerance=1e-8,
|
|
968
|
+
dtype=numpy.complex128):
|
|
964
969
|
"""
|
|
965
970
|
"""
|
|
966
971
|
|
|
@@ -975,7 +980,7 @@ def reverse_characteristics(freeform, z_inits, T, iterations=500,
|
|
|
975
980
|
target_z, target_t = numpy.meshgrid(z_inits, t_eval)
|
|
976
981
|
|
|
977
982
|
z = numpy.full(target_z.shape, numpy.mean(freeform.support) - 0.1j,
|
|
978
|
-
dtype=
|
|
983
|
+
dtype=dtype)
|
|
979
984
|
|
|
980
985
|
# Broken Newton steps can produce a lot of warnings. Removing them for now.
|
|
981
986
|
with numpy.errstate(all='ignore'):
|
|
@@ -144,11 +144,23 @@ def jacobi_density(x, psi, support, alpha=0.0, beta=0.0):
|
|
|
144
144
|
lam_m, lam_p = support
|
|
145
145
|
t = (2 * x - (lam_p + lam_m)) / (lam_p - lam_m)
|
|
146
146
|
w = (1 - t)**alpha * (1 + t)**beta
|
|
147
|
+
|
|
148
|
+
# The function eval_jacobi does not accept complex256 type
|
|
149
|
+
down_cast = False
|
|
150
|
+
if numpy.issubdtype(t.dtype, numpy.complexfloating) and \
|
|
151
|
+
t.itemsize > numpy.dtype(numpy.complex128).itemsize:
|
|
152
|
+
t = t.astype(numpy.complex128)
|
|
153
|
+
down_cast = True
|
|
154
|
+
|
|
147
155
|
P = numpy.vstack([eval_jacobi(k, alpha, beta, t) for k in range(len(psi))])
|
|
148
156
|
|
|
149
157
|
rho_t = w * (psi @ P) # density in t-variable
|
|
150
158
|
rho_x = rho_t * (2.0 / (lam_p - lam_m)) # back to x-variable
|
|
151
159
|
|
|
160
|
+
# Case up to complex256
|
|
161
|
+
if down_cast:
|
|
162
|
+
rho_x = rho_x.astype(t.dtype)
|
|
163
|
+
|
|
152
164
|
return rho_x
|
|
153
165
|
|
|
154
166
|
|
|
@@ -157,13 +169,13 @@ def jacobi_density(x, psi, support, alpha=0.0, beta=0.0):
|
|
|
157
169
|
# ================
|
|
158
170
|
|
|
159
171
|
def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
|
|
160
|
-
continuation='pade'):
|
|
172
|
+
continuation='pade', dtype=numpy.complex128):
|
|
161
173
|
"""
|
|
162
174
|
Compute m(z) = sum_k psi_k * m_k(z) where
|
|
163
175
|
|
|
164
176
|
m_k(z) = \\int w^{(alpha, beta)}(t) P_k^{(alpha, beta)}(t) / (u(z)-t) dt
|
|
165
177
|
|
|
166
|
-
Each m_k is evaluated *separately* with a Gauss
|
|
178
|
+
Each m_k is evaluated *separately* with a Gauss-Jacobi rule sized
|
|
167
179
|
for that k. This follows the user's request: 1 quadrature rule per P_k.
|
|
168
180
|
|
|
169
181
|
Parameters
|
|
@@ -184,6 +196,9 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
|
|
|
184
196
|
continuation : str, default= ``'pade'``
|
|
185
197
|
Methof of analytiv continuation.
|
|
186
198
|
|
|
199
|
+
dtype : numpy.type, default=numpy.complex128
|
|
200
|
+
Data type for compelx arrays. This might enhance series acceleration.
|
|
201
|
+
|
|
187
202
|
Returns
|
|
188
203
|
-------
|
|
189
204
|
|
|
@@ -194,7 +209,7 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
|
|
|
194
209
|
Same shape as z
|
|
195
210
|
"""
|
|
196
211
|
|
|
197
|
-
z = numpy.asarray(z, dtype=
|
|
212
|
+
z = numpy.asarray(z, dtype=dtype)
|
|
198
213
|
lam_minus, lam_plus = support
|
|
199
214
|
span = lam_plus - lam_minus
|
|
200
215
|
centre = 0.5 * (lam_plus + lam_minus)
|
|
@@ -202,11 +217,11 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
|
|
|
202
217
|
# Map z -> u in the standard [-1,1] domain
|
|
203
218
|
u = (2.0 / span) * (z - centre)
|
|
204
219
|
|
|
205
|
-
m_total = numpy.zeros_like(z, dtype=
|
|
220
|
+
m_total = numpy.zeros_like(z, dtype=dtype)
|
|
206
221
|
|
|
207
222
|
if continuation != 'pade':
|
|
208
223
|
# Stores m with the ravel size of z.
|
|
209
|
-
m_partial = numpy.zeros((psi.size, z.size), dtype=
|
|
224
|
+
m_partial = numpy.zeros((psi.size, z.size), dtype=dtype)
|
|
210
225
|
|
|
211
226
|
for k, psi_k in enumerate(psi):
|
|
212
227
|
# Select quadrature size tailored to this P_k
|
|
@@ -221,7 +236,7 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
|
|
|
221
236
|
|
|
222
237
|
# Evaluate jacobi polynomals of the second kind, Q_k using quadrature
|
|
223
238
|
diff = t_nodes[:, None, None] - u[None, ...] # (n_quad, Ny, Nx)
|
|
224
|
-
Q_k = (integrand[:, None, None] / diff).sum(axis=0)
|
|
239
|
+
Q_k = (integrand[:, None, None] / diff).sum(axis=0).astype(dtype)
|
|
225
240
|
|
|
226
241
|
# Principal branch
|
|
227
242
|
m_k = (2.0 / span) * Q_k
|
|
@@ -151,7 +151,7 @@ def eigvalsh(A, size=None, psd=None, seed=None, plot=False, **kwargs):
|
|
|
151
151
|
# Perform fit and estimate eigenvalues
|
|
152
152
|
order = 1 + int(len(samples)**0.2)
|
|
153
153
|
ff.fit(method='chebyshev', K=order, projection='sample',
|
|
154
|
-
continuation='wynn', force=True, plot=False, latex=False,
|
|
154
|
+
continuation='wynn-eps', force=True, plot=False, latex=False,
|
|
155
155
|
save=False)
|
|
156
156
|
|
|
157
157
|
if plot:
|
|
@@ -27,8 +27,8 @@ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
|
|
|
27
27
|
"""
|
|
28
28
|
Generate q real poles outside [lam_m, lam_p].
|
|
29
29
|
|
|
30
|
-
|
|
31
|
-
|
|
30
|
+
* even q : q/2 on each side (Chebyshev-like layout)
|
|
31
|
+
* odd q : (q+1)/2 on the *left*, (q-1)/2 on the right
|
|
32
32
|
so q=1 => single pole on whichever side `odd_side` says.
|
|
33
33
|
|
|
34
34
|
safety >= 1: 1, then poles start half an interval away; >1 pushes them
|
|
@@ -73,13 +73,13 @@ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
|
|
|
73
73
|
|
|
74
74
|
def _encode_poles(a, lam_m, lam_p):
|
|
75
75
|
"""
|
|
76
|
-
Map real pole a_j
|
|
76
|
+
Map real pole a_j => unconstrained s_j,
|
|
77
77
|
so that the default left-of-interval pole stays left.
|
|
78
78
|
"""
|
|
79
79
|
|
|
80
80
|
# half-width of the interval
|
|
81
81
|
d = 0.5 * (lam_p - lam_m)
|
|
82
|
-
# if a < lam_m, we want s
|
|
82
|
+
# if a < lam_m, we want s >= 0; if a > lam_p, s < 0
|
|
83
83
|
return numpy.where(
|
|
84
84
|
a < lam_m,
|
|
85
85
|
numpy.log((lam_m - a) / d), # zero at a = lam_m - d
|
|
@@ -93,13 +93,13 @@ def _encode_poles(a, lam_m, lam_p):
|
|
|
93
93
|
|
|
94
94
|
def _decode_poles(s, lam_m, lam_p):
|
|
95
95
|
"""
|
|
96
|
-
Inverse map s_j
|
|
96
|
+
Inverse map s_j => real pole a_j outside the interval.
|
|
97
97
|
"""
|
|
98
98
|
|
|
99
99
|
d = 0.5 * (lam_p - lam_m)
|
|
100
100
|
return numpy.where(
|
|
101
101
|
s >= 0,
|
|
102
|
-
lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m
|
|
102
|
+
lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m-d (left)
|
|
103
103
|
lam_p + d * numpy.exp(-s) # maps s=0 to a=lam_p+d (right)
|
|
104
104
|
)
|
|
105
105
|
|
|
@@ -186,7 +186,7 @@ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
|
|
|
186
186
|
else:
|
|
187
187
|
skip = 0 # all entries are residues
|
|
188
188
|
|
|
189
|
-
# add
|
|
189
|
+
# add lambda only for the residue positions
|
|
190
190
|
n = ATA.shape[0]
|
|
191
191
|
for i in range(skip, n):
|
|
192
192
|
ATA[i, i] += pade_reg
|
|
@@ -343,7 +343,7 @@ def eval_pade(z, pade_sol):
|
|
|
343
343
|
"""
|
|
344
344
|
|
|
345
345
|
# z_arr = numpy.asanyarray(z) # shape=(M,N)
|
|
346
|
-
# flat = z_arr.ravel() # shape=(M
|
|
346
|
+
# flat = z_arr.ravel() # shape=(M*N,)
|
|
347
347
|
# c, D = pade_sol['c'], pade_sol['D']
|
|
348
348
|
# poles = pade_sol['poles']
|
|
349
349
|
# resid = pade_sol['resid']
|
|
@@ -362,7 +362,7 @@ def eval_pade(z, pade_sol):
|
|
|
362
362
|
|
|
363
363
|
out = c + D*z
|
|
364
364
|
for bj, rj in zip(poles, resid):
|
|
365
|
-
out += rj/(z - bj) # each is an (N,) op, no
|
|
365
|
+
out += rj/(z - bj) # each is an (N,) op, no N*q temp
|
|
366
366
|
return out
|
|
367
367
|
|
|
368
368
|
|
|
@@ -384,7 +384,7 @@ def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
|
|
|
384
384
|
b_j in (-infty, lam_m - delta] cup [lam_p + delta, infty)
|
|
385
385
|
|
|
386
386
|
Approach:
|
|
387
|
-
- Brute
|
|
387
|
+
- Brute-force all 2^q left/right assignments for denominator roots
|
|
388
388
|
- Global search with differential_evolution, fallback to zeros if needed
|
|
389
389
|
- Local refinement with least_squares
|
|
390
390
|
|
|
@@ -15,7 +15,7 @@ from scipy.integrate import cumulative_trapezoid
|
|
|
15
15
|
from scipy.interpolate import PchipInterpolator
|
|
16
16
|
from scipy.stats import qmc
|
|
17
17
|
|
|
18
|
-
__all__ = ['
|
|
18
|
+
__all__ = ['sample']
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
# =============
|
|
@@ -32,60 +32,75 @@ def _quantile_func(x, rho, clamp=1e-4, eps=1e-8):
|
|
|
32
32
|
rho_clamp[rho < clamp] = eps
|
|
33
33
|
cdf = cumulative_trapezoid(rho_clamp, x, initial=0)
|
|
34
34
|
cdf /= cdf[-1]
|
|
35
|
+
cdf_inv = PchipInterpolator(cdf, x, extrapolate=False)
|
|
35
36
|
|
|
36
|
-
return
|
|
37
|
+
return cdf_inv
|
|
37
38
|
|
|
38
39
|
|
|
39
|
-
#
|
|
40
|
-
#
|
|
41
|
-
#
|
|
40
|
+
# ======
|
|
41
|
+
# sample
|
|
42
|
+
# ======
|
|
42
43
|
|
|
43
|
-
def
|
|
44
|
+
def sample(x, rho, num_pts, method='qmc', seed=None):
|
|
44
45
|
"""
|
|
45
|
-
Low-discrepancy sampling from
|
|
46
|
-
Quasi-Monte Carlo.
|
|
46
|
+
Low-discrepancy sampling from density estimate.
|
|
47
47
|
|
|
48
48
|
Parameters
|
|
49
49
|
----------
|
|
50
50
|
|
|
51
|
-
x : numpy.array
|
|
52
|
-
Sorted abscissae at which the density has been evaluated.
|
|
51
|
+
x : numpy.array
|
|
52
|
+
Sorted abscissae at which the density has been evaluated. Shape `(n,)`.
|
|
53
53
|
|
|
54
|
-
rho : numpy.array
|
|
54
|
+
rho : numpy.array
|
|
55
55
|
Density values corresponding to `x`. Must be non-negative and define
|
|
56
56
|
a valid probability density (i.e., integrate to 1 over the support).
|
|
57
|
+
Shape `(n,)`.
|
|
57
58
|
|
|
58
59
|
num_pts : int
|
|
59
60
|
Number of sample points to generate from the density estimate.
|
|
60
61
|
|
|
62
|
+
method : {``'mc'``, ``'qmc'``}, default= ``'qmc'``
|
|
63
|
+
Method of drawing samples from uniform distribution:
|
|
64
|
+
|
|
65
|
+
* ``'mc'``: Monte Carlo
|
|
66
|
+
* ``'qmc'``: Quasi Monte Carlo
|
|
67
|
+
|
|
61
68
|
seed : int, default=None
|
|
62
69
|
Seed for random number generator
|
|
63
70
|
|
|
64
71
|
Returns
|
|
65
72
|
-------
|
|
73
|
+
|
|
66
74
|
samples : numpy.array, shape (num_pts,)
|
|
67
75
|
Samples drawn from the estimated density using a one-dimensional Halton
|
|
68
76
|
sequence mapped through the estimated quantile function.
|
|
69
77
|
|
|
70
78
|
See Also
|
|
71
79
|
--------
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
80
|
+
|
|
81
|
+
freealg.supp
|
|
82
|
+
freealg.kde
|
|
83
|
+
|
|
84
|
+
Notes
|
|
85
|
+
-----
|
|
86
|
+
|
|
87
|
+
The underlying Quasi-Monte Carlo engine uses ``scipy.stats.qmc.Halton``
|
|
88
|
+
function for generating low-discrepancy points.
|
|
75
89
|
|
|
76
90
|
Examples
|
|
77
91
|
--------
|
|
78
92
|
|
|
79
93
|
.. code-block:: python
|
|
94
|
+
:emphasize-lines: 8
|
|
80
95
|
|
|
81
96
|
>>> import numpy
|
|
82
|
-
>>> from freealg import
|
|
97
|
+
>>> from freealg import sample
|
|
83
98
|
|
|
84
99
|
>>> # density of Beta(3,1) on [0,1]
|
|
85
100
|
>>> x = numpy.linspace(0, 1, 200)
|
|
86
101
|
>>> rho = 3 * x**2
|
|
87
102
|
|
|
88
|
-
>>> samples =
|
|
103
|
+
>>> samples = sample(x, rho, num_pts=1000, method='qmc')
|
|
89
104
|
>>> assert samples.shape == (1000,)
|
|
90
105
|
|
|
91
106
|
>>> # Empirical mean should be close to 3/4
|
|
@@ -94,8 +109,17 @@ def qmc_sample(x, rho, num_pts, seed=None):
|
|
|
94
109
|
|
|
95
110
|
rng = numpy.random.default_rng(seed)
|
|
96
111
|
quantile = _quantile_func(x, rho)
|
|
97
|
-
|
|
98
|
-
|
|
112
|
+
|
|
113
|
+
# Draw from uniform distribution
|
|
114
|
+
if method == 'mc':
|
|
115
|
+
u = rng.random(num_pts)
|
|
116
|
+
elif method == 'qmc':
|
|
117
|
+
engine = qmc.Halton(d=1, rng=rng)
|
|
118
|
+
u = engine.random(num_pts)
|
|
119
|
+
else:
|
|
120
|
+
raise NotImplementedError('"method" is invalid.')
|
|
121
|
+
|
|
122
|
+
# Draw from distribution by mapping from inverse CDF
|
|
99
123
|
samples = quantile(u)
|
|
100
124
|
|
|
101
125
|
return samples.ravel()
|
|
@@ -183,13 +183,13 @@ def wynn_rho(Sn, beta=0.0):
|
|
|
183
183
|
-------
|
|
184
184
|
|
|
185
185
|
S : numpy.ndarray
|
|
186
|
-
A 1D array of shape ``(d,)`` giving the rho
|
|
186
|
+
A 1D array of shape ``(d,)`` giving the rho-accelerated estimate
|
|
187
187
|
of the series limit for each component.
|
|
188
188
|
|
|
189
189
|
Notes
|
|
190
190
|
-----
|
|
191
191
|
|
|
192
|
-
Let ``S_n`` be the *n
|
|
192
|
+
Let ``S_n`` be the *n*-th partial sum of the (possibly divergent)
|
|
193
193
|
sequence. Wynn's rho algorithm builds a triangular table
|
|
194
194
|
``rho[k, n]`` (row *k*, column *n*) as follows:
|
|
195
195
|
|
|
@@ -200,7 +200,7 @@ def wynn_rho(Sn, beta=0.0):
|
|
|
200
200
|
(n + beta + k - 1) / (rho[k-1, n+1] - rho[k-1, n])
|
|
201
201
|
|
|
202
202
|
Only even rows (k even) provide improved approximants. As with
|
|
203
|
-
``wynn_epsilon``, we apply the scalar recursion component
|
|
203
|
+
``wynn_epsilon``, we apply the scalar recursion component-wise so that a
|
|
204
204
|
slowly converging component does not stall the others.
|
|
205
205
|
"""
|
|
206
206
|
|
|
@@ -255,7 +255,7 @@ def wynn_rho(Sn, beta=0.0):
|
|
|
255
255
|
|
|
256
256
|
def levin_u(Sn, omega=None, beta=0.0):
|
|
257
257
|
"""
|
|
258
|
-
Levin u
|
|
258
|
+
Levin u-transform (vector form).
|
|
259
259
|
|
|
260
260
|
Parameters
|
|
261
261
|
----------
|
|
@@ -339,13 +339,13 @@ def weniger_delta(Sn):
|
|
|
339
339
|
-------
|
|
340
340
|
|
|
341
341
|
S : numpy.ndarray
|
|
342
|
-
Array of shape (d,) giving the
|
|
343
|
-
component.
|
|
342
|
+
Array of shape (d,) giving the delta2 accelerated limit estimate for
|
|
343
|
+
each component.
|
|
344
344
|
"""
|
|
345
345
|
|
|
346
346
|
N, d = Sn.shape
|
|
347
347
|
|
|
348
|
-
# Need at least three partial sums to form
|
|
348
|
+
# Need at least three partial sums to form delta2
|
|
349
349
|
if N < 3:
|
|
350
350
|
return Sn[-1, :].copy()
|
|
351
351
|
|
|
@@ -384,14 +384,14 @@ def brezinski_theta(Sn):
|
|
|
384
384
|
----------
|
|
385
385
|
|
|
386
386
|
Sn : numpy.ndarray
|
|
387
|
-
A 2
|
|
387
|
+
A 2-D array of the size ``(N, d)``, where `N` is the number of partial
|
|
388
388
|
sums and `d` is the vector size.
|
|
389
389
|
|
|
390
390
|
Returns
|
|
391
391
|
-------
|
|
392
392
|
|
|
393
393
|
S : numpy.ndarray
|
|
394
|
-
A 1
|
|
394
|
+
A 1-D array of the size ``(d,)``. The theta-accelerated estimate of
|
|
395
395
|
the series limit in each vector component.
|
|
396
396
|
"""
|
|
397
397
|
|