freealg 0.0.3__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {freealg-0.0.3/freealg.egg-info → freealg-0.1.1}/PKG-INFO +3 -1
- freealg-0.1.1/freealg/__version__.py +1 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg/_chebyshev.py +39 -5
- {freealg-0.0.3 → freealg-0.1.1}/freealg/_jacobi.py +37 -5
- freealg-0.1.1/freealg/_pade.py +487 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg/_plot_util.py +6 -28
- freealg-0.1.1/freealg/_sample.py +85 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg/distributions/__init__.py +4 -4
- freealg-0.1.1/freealg/distributions/kesten_mckay.py +559 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg/distributions/marchenko_pastur.py +4 -3
- freealg-0.1.1/freealg/distributions/wachter.py +568 -0
- freealg-0.1.1/freealg/distributions/wigner.py +552 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg/freeform.py +122 -32
- {freealg-0.0.3 → freealg-0.1.1/freealg.egg-info}/PKG-INFO +3 -1
- {freealg-0.0.3 → freealg-0.1.1}/freealg.egg-info/SOURCES.txt +5 -1
- {freealg-0.0.3 → freealg-0.1.1}/freealg.egg-info/requires.txt +2 -0
- {freealg-0.0.3 → freealg-0.1.1}/requirements.txt +3 -1
- freealg-0.0.3/freealg/__version__.py +0 -1
- freealg-0.0.3/freealg/_pade.py +0 -139
- {freealg-0.0.3 → freealg-0.1.1}/CHANGELOG.rst +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/LICENSE.txt +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/MANIFEST.in +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/README.rst +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg/__init__.py +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg/_damp.py +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg/_decompress.py +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg/_util.py +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg.egg-info/dependency_links.txt +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg.egg-info/not-zip-safe +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/freealg.egg-info/top_level.txt +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/pyproject.toml +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/setup.cfg +0 -0
- {freealg-0.0.3 → freealg-0.1.1}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: freealg
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.1.1
|
|
4
4
|
Summary: Free probability for large matrices
|
|
5
5
|
Keywords: leaderboard bot chat
|
|
6
6
|
Platform: Linux
|
|
@@ -29,6 +29,8 @@ Requires-Dist: scipy
|
|
|
29
29
|
Requires-Dist: texplot
|
|
30
30
|
Requires-Dist: matplotlib
|
|
31
31
|
Requires-Dist: colorcet
|
|
32
|
+
Requires-Dist: networkx
|
|
33
|
+
Requires-Dist: statsmodels
|
|
32
34
|
Provides-Extra: test
|
|
33
35
|
Requires-Dist: tox; extra == "test"
|
|
34
36
|
Requires-Dist: pytest-cov; extra == "test"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.1"
|
|
@@ -14,14 +14,15 @@
|
|
|
14
14
|
import numpy
|
|
15
15
|
from scipy.special import eval_chebyu
|
|
16
16
|
|
|
17
|
-
__all__ = ['
|
|
17
|
+
__all__ = ['chebyshev_sample_proj', 'chebyshev_kernel_proj',
|
|
18
|
+
'chebyshev_approx', 'chebyshev_stieltjes']
|
|
18
19
|
|
|
19
20
|
|
|
20
|
-
#
|
|
21
|
-
# chebyshev proj
|
|
22
|
-
#
|
|
21
|
+
# =====================
|
|
22
|
+
# chebyshev sample proj
|
|
23
|
+
# =====================
|
|
23
24
|
|
|
24
|
-
def
|
|
25
|
+
def chebyshev_sample_proj(eig, support, K=10, reg=0.0):
|
|
25
26
|
"""
|
|
26
27
|
Estimate the coefficients \\psi_k in
|
|
27
28
|
|
|
@@ -81,6 +82,39 @@ def chebyshev_proj(eig, support, K=10, reg=0.0):
|
|
|
81
82
|
return psi
|
|
82
83
|
|
|
83
84
|
|
|
85
|
+
# =====================
|
|
86
|
+
# chebyshev kernel proj
|
|
87
|
+
# =====================
|
|
88
|
+
|
|
89
|
+
def chebyshev_kernel_proj(xs, pdf, support, K=10, reg=0.0):
|
|
90
|
+
"""
|
|
91
|
+
Projection of a *continuous* density given on a grid (xs, pdf)
|
|
92
|
+
onto the Chebyshev-II basis.
|
|
93
|
+
|
|
94
|
+
xs : 1-D numpy array (original x–axis, not the t-variable)
|
|
95
|
+
pdf : same shape as xs, integrates to 1 on xs
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
lam_m, lam_p = support
|
|
99
|
+
t = (2.0 * xs - (lam_m + lam_p)) / (lam_p - lam_m) # map to [−1,1]
|
|
100
|
+
|
|
101
|
+
norm = numpy.pi / 2.0
|
|
102
|
+
psi = numpy.empty(K + 1)
|
|
103
|
+
|
|
104
|
+
for k in range(K + 1):
|
|
105
|
+
Pk = eval_chebyu(k, t) # U_k(t) on the grid
|
|
106
|
+
moment = numpy.trapz(Pk * pdf, xs) # \int U_k(t) \rho(x) dx
|
|
107
|
+
|
|
108
|
+
if k == 0:
|
|
109
|
+
penalty = 0
|
|
110
|
+
else:
|
|
111
|
+
penalty = reg * (k / (K + 1))**2
|
|
112
|
+
|
|
113
|
+
psi[k] = moment / (norm + penalty)
|
|
114
|
+
|
|
115
|
+
return psi
|
|
116
|
+
|
|
117
|
+
|
|
84
118
|
# ================
|
|
85
119
|
# chebyshev approx
|
|
86
120
|
# ================
|
|
@@ -15,7 +15,8 @@ import numpy
|
|
|
15
15
|
from scipy.special import eval_jacobi, roots_jacobi
|
|
16
16
|
from scipy.special import gammaln, beta as Beta
|
|
17
17
|
|
|
18
|
-
__all__ = ['
|
|
18
|
+
__all__ = ['jacobi_sample_proj', 'jacobi_kernel_proj', 'jacobi_approx',
|
|
19
|
+
'jacobi_stieltjes']
|
|
19
20
|
|
|
20
21
|
|
|
21
22
|
# ==============
|
|
@@ -43,11 +44,11 @@ def jacobi_sq_norm(k, alpha, beta):
|
|
|
43
44
|
return numpy.exp(lg_num - lg_den)
|
|
44
45
|
|
|
45
46
|
|
|
46
|
-
#
|
|
47
|
-
# jacobi
|
|
48
|
-
#
|
|
47
|
+
# ==================
|
|
48
|
+
# jacobi sample proj
|
|
49
|
+
# ==================
|
|
49
50
|
|
|
50
|
-
def
|
|
51
|
+
def jacobi_sample_proj(eig, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
|
|
51
52
|
"""
|
|
52
53
|
"""
|
|
53
54
|
|
|
@@ -76,6 +77,37 @@ def jacobi_proj(eig, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
|
|
|
76
77
|
return psi
|
|
77
78
|
|
|
78
79
|
|
|
80
|
+
# ==================
|
|
81
|
+
# jacobi kernel proj
|
|
82
|
+
# ==================
|
|
83
|
+
|
|
84
|
+
def jacobi_kernel_proj(xs, pdf, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
|
|
85
|
+
"""
|
|
86
|
+
Same moments as `jacobi_proj`, but the target is a *continuous* density
|
|
87
|
+
given on a grid (xs, pdf).
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
lam_m, lam_p = support
|
|
91
|
+
t = (2.0 * xs - (lam_p + lam_m)) / (lam_p - lam_m) # map to [-1,1]
|
|
92
|
+
psi = numpy.empty(K + 1)
|
|
93
|
+
|
|
94
|
+
for k in range(K + 1):
|
|
95
|
+
Pk = eval_jacobi(k, alpha, beta, t)
|
|
96
|
+
N_k = jacobi_sq_norm(k, alpha, beta)
|
|
97
|
+
|
|
98
|
+
# \int P_k(t) w(t) \rho(t) dt. w(t) cancels with pdf already being rho
|
|
99
|
+
moment = numpy.trapz(Pk * pdf, xs)
|
|
100
|
+
|
|
101
|
+
if k == 0:
|
|
102
|
+
penalty = 0
|
|
103
|
+
else:
|
|
104
|
+
penalty = reg * (k / (K + 1))**2
|
|
105
|
+
|
|
106
|
+
psi[k] = moment / (N_k + penalty)
|
|
107
|
+
|
|
108
|
+
return psi
|
|
109
|
+
|
|
110
|
+
|
|
79
111
|
# =============
|
|
80
112
|
# jacobi approx
|
|
81
113
|
# =============
|
|
@@ -0,0 +1,487 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright 2025, Siavash Ameli <sameli@berkeley.edu>
|
|
2
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
3
|
+
# SPDX-FileType: SOURCE
|
|
4
|
+
#
|
|
5
|
+
# This program is free software: you can redistribute it and/or modify it under
|
|
6
|
+
# the terms of the license found in the LICENSE.txt file in the root directory
|
|
7
|
+
# of this source tree.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# =======
|
|
11
|
+
# Imports
|
|
12
|
+
# =======
|
|
13
|
+
|
|
14
|
+
import numpy
|
|
15
|
+
from numpy.linalg import lstsq
|
|
16
|
+
from itertools import product
|
|
17
|
+
from scipy.optimize import least_squares, differential_evolution
|
|
18
|
+
|
|
19
|
+
__all__ = ['fit_pade', 'eval_pade']
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# =============
|
|
23
|
+
# default poles
|
|
24
|
+
# =============
|
|
25
|
+
|
|
26
|
+
def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
|
|
27
|
+
"""
|
|
28
|
+
Generate q real poles outside [lam_m, lam_p].
|
|
29
|
+
|
|
30
|
+
• even q : q/2 on each side (Chebyshev-like layout)
|
|
31
|
+
• odd q : (q+1)/2 on the *left*, (q–1)/2 on the right
|
|
32
|
+
so q=1 => single pole on whichever side `odd_side` says.
|
|
33
|
+
|
|
34
|
+
safety >= 1: 1, then poles start half an interval away; >1 pushes them
|
|
35
|
+
farther.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
if q == 0:
|
|
39
|
+
return numpy.empty(0)
|
|
40
|
+
|
|
41
|
+
Delta = 0.5 * (lam_p - lam_m)
|
|
42
|
+
|
|
43
|
+
# Decide how many poles on each side. m_L and m_R determine how many poles
|
|
44
|
+
# to be on the left and right of the support interval.
|
|
45
|
+
if q % 2 == 0:
|
|
46
|
+
m_L = m_R = q // 2
|
|
47
|
+
else:
|
|
48
|
+
if odd_side == 'left':
|
|
49
|
+
m_L = (q + 1) // 2
|
|
50
|
+
m_R = q // 2
|
|
51
|
+
else:
|
|
52
|
+
m_L = q // 2
|
|
53
|
+
m_R = (q + 1) // 2
|
|
54
|
+
|
|
55
|
+
# Chebyshev-extrema offsets (all positive)
|
|
56
|
+
kL = numpy.arange(m_L)
|
|
57
|
+
tL = (2 * kL + 1) * numpy.pi / (2 * m_L)
|
|
58
|
+
offsL = safety * Delta * (1 + numpy.cos(tL))
|
|
59
|
+
|
|
60
|
+
kR = numpy.arange(m_R)
|
|
61
|
+
tR = (2 * kR + 1) * numpy.pi / (2 * m_R + (m_R == 0))
|
|
62
|
+
offsR = safety * Delta * (1 + numpy.cos(tR))
|
|
63
|
+
|
|
64
|
+
left = lam_m - offsL
|
|
65
|
+
right = lam_p + offsR
|
|
66
|
+
|
|
67
|
+
return numpy.sort(numpy.concatenate([left, right]))
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
# ============
|
|
71
|
+
# encode poles
|
|
72
|
+
# ============
|
|
73
|
+
|
|
74
|
+
def _encode_poles(a, lam_m, lam_p):
|
|
75
|
+
"""
|
|
76
|
+
Map real pole a_j → unconstrained s_j,
|
|
77
|
+
so that the default left-of-interval pole stays left.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
# half-width of the interval
|
|
81
|
+
d = 0.5 * (lam_p - lam_m)
|
|
82
|
+
# if a < lam_m, we want s ≥ 0; if a > lam_p, s < 0
|
|
83
|
+
return numpy.where(
|
|
84
|
+
a < lam_m,
|
|
85
|
+
numpy.log((lam_m - a) / d), # zero at a = lam_m - d
|
|
86
|
+
-numpy.log((a - lam_p) / d) # zero at a = lam_p + d
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
# ============
|
|
91
|
+
# decode poles
|
|
92
|
+
# ============
|
|
93
|
+
|
|
94
|
+
def _decode_poles(s, lam_m, lam_p):
|
|
95
|
+
"""
|
|
96
|
+
Inverse map s_j → real pole a_j outside the interval.
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
d = 0.5 * (lam_p - lam_m)
|
|
100
|
+
return numpy.where(
|
|
101
|
+
s >= 0,
|
|
102
|
+
lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m−d (left)
|
|
103
|
+
lam_p + d * numpy.exp(-s) # maps s=0 to a=lam_p+d (right)
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
# ========
|
|
108
|
+
# inner ls
|
|
109
|
+
# ========
|
|
110
|
+
|
|
111
|
+
def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
|
|
112
|
+
"""
|
|
113
|
+
This is the inner least square (blazing fast).
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
if poles.size == 0 and p == -1:
|
|
117
|
+
return 0.0, 0.0, numpy.empty(0)
|
|
118
|
+
|
|
119
|
+
if poles.size == 0: # q = 0
|
|
120
|
+
# A = numpy.column_stack((numpy.ones_like(x), x))
|
|
121
|
+
cols = [numpy.ones_like(x)] if p >= 0 else []
|
|
122
|
+
if p == 1:
|
|
123
|
+
cols.append(x)
|
|
124
|
+
A = numpy.column_stack(cols)
|
|
125
|
+
# ---
|
|
126
|
+
theta, *_ = lstsq(A, f, rcond=None)
|
|
127
|
+
# c, D = theta # TEST
|
|
128
|
+
if p == -1:
|
|
129
|
+
c = 0.0
|
|
130
|
+
D = 0.0
|
|
131
|
+
resid = numpy.empty(0)
|
|
132
|
+
elif p == 0:
|
|
133
|
+
c = theta[0]
|
|
134
|
+
D = 0.0
|
|
135
|
+
resid = numpy.empty(0)
|
|
136
|
+
else: # p == 1
|
|
137
|
+
c, D = theta
|
|
138
|
+
resid = numpy.empty(0)
|
|
139
|
+
else:
|
|
140
|
+
# phi = 1.0 / (x[:, None] - poles[None, :])
|
|
141
|
+
# # A = numpy.column_stack((numpy.ones_like(x), x, phi)) # TEST
|
|
142
|
+
# # theta, *_ = lstsq(A, f, rcond=None)
|
|
143
|
+
# # c, D, resid = theta[0], theta[1], theta[2:]
|
|
144
|
+
# phi = 1.0 / (x[:, None] - poles[None, :])
|
|
145
|
+
# cols = [numpy.ones_like(x)] if p >= 0 else []
|
|
146
|
+
# if p == 1:
|
|
147
|
+
# cols.append(x)
|
|
148
|
+
# cols.append(phi)
|
|
149
|
+
# A = numpy.column_stack(cols)
|
|
150
|
+
# theta, *_ = lstsq(A, f, rcond=None)
|
|
151
|
+
# if p == -1:
|
|
152
|
+
# c = 0.0
|
|
153
|
+
# D = 0.0
|
|
154
|
+
# resid = theta
|
|
155
|
+
# elif p == 0:
|
|
156
|
+
# c = theta[0]
|
|
157
|
+
# D = 0.0
|
|
158
|
+
# resid = theta[1:]
|
|
159
|
+
# else: # p == 1
|
|
160
|
+
# c = theta[0]
|
|
161
|
+
# D = theta[1]
|
|
162
|
+
# resid = theta[2:]
|
|
163
|
+
|
|
164
|
+
phi = 1.0 / (x[:, None] - poles[None, :])
|
|
165
|
+
cols = [numpy.ones_like(x)] if p >= 0 else []
|
|
166
|
+
if p == 1:
|
|
167
|
+
cols.append(x)
|
|
168
|
+
cols.append(phi)
|
|
169
|
+
|
|
170
|
+
A = numpy.column_stack(cols)
|
|
171
|
+
|
|
172
|
+
# theta, *_ = lstsq(A, f, rcond=None) # TEST
|
|
173
|
+
if pade_reg > 0:
|
|
174
|
+
ATA = A.T.dot(A)
|
|
175
|
+
|
|
176
|
+
# # add pade_reg * I
|
|
177
|
+
# ATA.flat[:: ATA.shape[1]+1] += pade_reg
|
|
178
|
+
# ATf = A.T.dot(f)
|
|
179
|
+
# theta = numpy.linalg.solve(ATA, ATf)
|
|
180
|
+
|
|
181
|
+
# figure out how many elements to skip
|
|
182
|
+
if p == 1:
|
|
183
|
+
skip = 2 # skip c and D
|
|
184
|
+
elif p == 0:
|
|
185
|
+
skip = 1 # skip c only
|
|
186
|
+
else:
|
|
187
|
+
skip = 0 # all entries are residues
|
|
188
|
+
|
|
189
|
+
# add λ only for the residue positions
|
|
190
|
+
n = ATA.shape[0]
|
|
191
|
+
for i in range(skip, n):
|
|
192
|
+
ATA[i, i] += pade_reg
|
|
193
|
+
|
|
194
|
+
# then solve
|
|
195
|
+
ATf = A.T.dot(f)
|
|
196
|
+
theta = numpy.linalg.solve(ATA, ATf)
|
|
197
|
+
|
|
198
|
+
else:
|
|
199
|
+
theta, *_ = lstsq(A, f, rcond=None)
|
|
200
|
+
|
|
201
|
+
if p == -1:
|
|
202
|
+
c, D, resid = 0.0, 0.0, theta
|
|
203
|
+
elif p == 0:
|
|
204
|
+
c, D, resid = theta[0], 0.0, theta[1:]
|
|
205
|
+
else: # p == 1
|
|
206
|
+
c, D, resid = theta[0], theta[1], theta[2:]
|
|
207
|
+
|
|
208
|
+
return c, D, resid
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
# =============
|
|
212
|
+
# eval rational
|
|
213
|
+
# =============
|
|
214
|
+
|
|
215
|
+
def _eval_rational(z, c, D, poles, resid):
|
|
216
|
+
"""
|
|
217
|
+
"""
|
|
218
|
+
|
|
219
|
+
# z = z[:, None]
|
|
220
|
+
# if poles.size == 0:
|
|
221
|
+
# term = 0.0
|
|
222
|
+
# else:
|
|
223
|
+
# term = numpy.sum(resid / (z - poles), axis=1)
|
|
224
|
+
#
|
|
225
|
+
# return c + D * z.ravel() + term
|
|
226
|
+
|
|
227
|
+
# ensure z is a 1-D array
|
|
228
|
+
z = numpy.asarray(z)
|
|
229
|
+
z_col = z[:, None]
|
|
230
|
+
|
|
231
|
+
if poles.size == 0:
|
|
232
|
+
term = 0.0
|
|
233
|
+
else:
|
|
234
|
+
term = numpy.sum(resid / (z_col - poles[None, :]), axis=1)
|
|
235
|
+
|
|
236
|
+
return c + D * z + term
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
# ========
|
|
240
|
+
# fit pade
|
|
241
|
+
# ========
|
|
242
|
+
|
|
243
|
+
def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
|
|
244
|
+
safety=1.0, max_outer=40, xtol=1e-12, ftol=1e-12, optimizer='ls',
|
|
245
|
+
verbose=0):
|
|
246
|
+
"""
|
|
247
|
+
This is the outer optimiser.
|
|
248
|
+
"""
|
|
249
|
+
|
|
250
|
+
# Checks
|
|
251
|
+
if not (odd_side in ['left', 'right']):
|
|
252
|
+
raise ValueError('"odd_side" can only be "left" or "right".')
|
|
253
|
+
|
|
254
|
+
if not (p in [-1, 0, 1]):
|
|
255
|
+
raise ValueError('"pade_p" can only be -1, 0, or 1.')
|
|
256
|
+
|
|
257
|
+
x = numpy.asarray(x, float)
|
|
258
|
+
f = numpy.asarray(f, float)
|
|
259
|
+
|
|
260
|
+
poles0 = _default_poles(q, lam_m, lam_p, safety=safety, odd_side=odd_side)
|
|
261
|
+
if q == 0 and p <= 0:
|
|
262
|
+
# c, D, resid = _inner_ls(x, f, poles0, pade_reg=pade_reg) # TEST
|
|
263
|
+
c, D, resid = _inner_ls(x, f, poles0, p, pade_reg=pade_reg)
|
|
264
|
+
pade_sol = {
|
|
265
|
+
'c': c, 'D': D, 'poles': poles0, 'resid': resid,
|
|
266
|
+
'outer_iters': 0
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
return pade_sol
|
|
270
|
+
|
|
271
|
+
s0 = _encode_poles(poles0, lam_m, lam_p)
|
|
272
|
+
|
|
273
|
+
# --------
|
|
274
|
+
# residual
|
|
275
|
+
# --------
|
|
276
|
+
|
|
277
|
+
def residual(s, p=p):
|
|
278
|
+
poles = _decode_poles(s, lam_m, lam_p)
|
|
279
|
+
# c, D, resid = _inner_ls(x, f, poles, pade_reg=pade_reg) # TEST
|
|
280
|
+
c, D, resid = _inner_ls(x, f, poles, p, pade_reg=pade_reg)
|
|
281
|
+
return _eval_rational(x, c, D, poles, resid) - f
|
|
282
|
+
|
|
283
|
+
# ----------------
|
|
284
|
+
|
|
285
|
+
# Optimizer
|
|
286
|
+
if optimizer == 'ls':
|
|
287
|
+
# scale = numpy.maximum(1.0, numpy.abs(s0))
|
|
288
|
+
res = least_squares(residual, s0,
|
|
289
|
+
method='trf',
|
|
290
|
+
# method='lm',
|
|
291
|
+
# x_scale=scale,
|
|
292
|
+
max_nfev=max_outer, xtol=xtol, ftol=ftol,
|
|
293
|
+
verbose=verbose)
|
|
294
|
+
|
|
295
|
+
elif optimizer == 'de':
|
|
296
|
+
|
|
297
|
+
# Bounds
|
|
298
|
+
# span = lam_p - lam_m
|
|
299
|
+
# B = 3.0 # multiples of span
|
|
300
|
+
# L = numpy.log(B * span)
|
|
301
|
+
# bounds = [(-L, L)] * len(s0)
|
|
302
|
+
|
|
303
|
+
d = 0.5*(lam_p - lam_m)
|
|
304
|
+
# the minimum factor so that lam_m - d*exp(s)=0 is exp(s)=lam_m/d
|
|
305
|
+
min_factor = lam_m/d
|
|
306
|
+
B = max(10.0, min_factor*10.0)
|
|
307
|
+
L = numpy.log(B)
|
|
308
|
+
bounds = [(-L, L)] * len(s0)
|
|
309
|
+
|
|
310
|
+
# Global stage
|
|
311
|
+
glob = differential_evolution(lambda s: numpy.sum(residual(s)**2),
|
|
312
|
+
bounds, maxiter=50, popsize=10,
|
|
313
|
+
polish=False)
|
|
314
|
+
|
|
315
|
+
# local polish
|
|
316
|
+
res = least_squares(
|
|
317
|
+
residual, glob.x,
|
|
318
|
+
method='lm',
|
|
319
|
+
max_nfev=max_outer, xtol=xtol, ftol=ftol,
|
|
320
|
+
verbose=verbose)
|
|
321
|
+
|
|
322
|
+
else:
|
|
323
|
+
raise RuntimeError('"optimizer" is invalid.')
|
|
324
|
+
|
|
325
|
+
poles = _decode_poles(res.x, lam_m, lam_p)
|
|
326
|
+
# c, D, resid = _inner_ls(x, f, poles, pade_reg=pade_reg) # TEST
|
|
327
|
+
c, D, resid = _inner_ls(x, f, poles, p, pade_reg=pade_reg)
|
|
328
|
+
|
|
329
|
+
pade_sol = {
|
|
330
|
+
'c': c, 'D': D, 'poles': poles, 'resid': resid,
|
|
331
|
+
'outer_iters': res.nfev
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
return pade_sol
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
# =========
|
|
338
|
+
# eval pade
|
|
339
|
+
# =========
|
|
340
|
+
|
|
341
|
+
def eval_pade(z, pade_sol):
|
|
342
|
+
"""
|
|
343
|
+
"""
|
|
344
|
+
|
|
345
|
+
# z_arr = numpy.asanyarray(z) # shape=(M,N)
|
|
346
|
+
# flat = z_arr.ravel() # shape=(M·N,)
|
|
347
|
+
# c, D = pade_sol['c'], pade_sol['D']
|
|
348
|
+
# poles = pade_sol['poles']
|
|
349
|
+
# resid = pade_sol['resid']
|
|
350
|
+
#
|
|
351
|
+
# # _eval_rational takes a 1-D array of z's and returns 1-D outputs
|
|
352
|
+
# flat_out = _eval_rational(flat, c, D, poles, resid)
|
|
353
|
+
#
|
|
354
|
+
# # restore the original shape
|
|
355
|
+
# out = flat_out.reshape(z_arr.shape) # shape=(M,N)
|
|
356
|
+
#
|
|
357
|
+
# return out
|
|
358
|
+
|
|
359
|
+
z = numpy.asanyarray(z) # complex or real, any shape
|
|
360
|
+
c, D = pade_sol['c'], pade_sol['D']
|
|
361
|
+
poles, resid = pade_sol['poles'], pade_sol['resid']
|
|
362
|
+
|
|
363
|
+
out = c + D*z
|
|
364
|
+
for bj, rj in zip(poles, resid):
|
|
365
|
+
out += rj/(z - bj) # each is an (N,) op, no N×q temp
|
|
366
|
+
return out
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
# ============
|
|
370
|
+
# fit pade old
|
|
371
|
+
# ============
|
|
372
|
+
|
|
373
|
+
def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
|
|
374
|
+
S=numpy.inf, B_default=10.0, S_factor=2.0, maxiter_de=200):
|
|
375
|
+
"""
|
|
376
|
+
Fit a [p/q] rational P/Q of the form:
|
|
377
|
+
P(x) = s * prod_{i=0..p-1}(x - a_i)
|
|
378
|
+
Q(x) = prod_{j=0..q-1}(x - b_j)
|
|
379
|
+
|
|
380
|
+
Constraints:
|
|
381
|
+
a_i in [lam_m, lam_p]
|
|
382
|
+
b_j in (-infty, lam_m - delta] cup [lam_p + delta, infty)
|
|
383
|
+
|
|
384
|
+
Approach:
|
|
385
|
+
- Brute‐force all 2^q left/right assignments for denominator roots
|
|
386
|
+
- Global search with differential_evolution, fallback to zeros if needed
|
|
387
|
+
- Local refinement with least_squares
|
|
388
|
+
|
|
389
|
+
Returns a dict with keys:
|
|
390
|
+
's' : optimal scale factor
|
|
391
|
+
'a' : array of p numerator roots (in [lam_m, lam_p])
|
|
392
|
+
'b' : array of q denominator roots (outside the interval)
|
|
393
|
+
'resid' : final residual norm
|
|
394
|
+
'signs' : tuple indicating left/right pattern for each b_j
|
|
395
|
+
"""
|
|
396
|
+
|
|
397
|
+
# Determine finite bounds for DE
|
|
398
|
+
if not numpy.isfinite(B):
|
|
399
|
+
B_eff = B_default
|
|
400
|
+
else:
|
|
401
|
+
B_eff = B
|
|
402
|
+
if not numpy.isfinite(S):
|
|
403
|
+
# scale bound: S_factor * max|f| * interval width + safety
|
|
404
|
+
S_eff = S_factor * numpy.max(numpy.abs(f)) * (lam_p - lam_m) + 1.0
|
|
405
|
+
if S_eff <= 0:
|
|
406
|
+
S_eff = 1.0
|
|
407
|
+
else:
|
|
408
|
+
S_eff = S
|
|
409
|
+
|
|
410
|
+
def map_roots(signs, b):
|
|
411
|
+
"""Map unconstrained b_j -> real root outside the interval."""
|
|
412
|
+
out = numpy.empty_like(b)
|
|
413
|
+
for j, (s_val, bj) in enumerate(zip(signs, b)):
|
|
414
|
+
if s_val > 0:
|
|
415
|
+
out[j] = lam_p + delta + numpy.exp(bj)
|
|
416
|
+
else:
|
|
417
|
+
out[j] = lam_m - delta - numpy.exp(bj)
|
|
418
|
+
return out
|
|
419
|
+
|
|
420
|
+
best = {'resid': numpy.inf}
|
|
421
|
+
|
|
422
|
+
# Enumerate all left/right sign patterns
|
|
423
|
+
for signs in product([-1, 1], repeat=q):
|
|
424
|
+
# Residual vector for current pattern
|
|
425
|
+
def resid_vec(z):
|
|
426
|
+
s_val = z[0]
|
|
427
|
+
a = z[1:1+p]
|
|
428
|
+
b = z[1+p:]
|
|
429
|
+
P = s_val * numpy.prod(x[:, None] - a[None, :], axis=1)
|
|
430
|
+
roots_Q = map_roots(signs, b)
|
|
431
|
+
Q = numpy.prod(x[:, None] - roots_Q[None, :], axis=1)
|
|
432
|
+
return P - f * Q
|
|
433
|
+
|
|
434
|
+
def obj(z):
|
|
435
|
+
r = resid_vec(z)
|
|
436
|
+
return r.dot(r)
|
|
437
|
+
|
|
438
|
+
# Build bounds for DE
|
|
439
|
+
bounds = []
|
|
440
|
+
bounds.append((-S_eff, S_eff)) # s
|
|
441
|
+
bounds += [(lam_m, lam_p)] * p # a_i
|
|
442
|
+
bounds += [(-B_eff, B_eff)] * q # b_j
|
|
443
|
+
|
|
444
|
+
# 1) Global search
|
|
445
|
+
try:
|
|
446
|
+
de = differential_evolution(obj, bounds,
|
|
447
|
+
maxiter=maxiter_de,
|
|
448
|
+
polish=False)
|
|
449
|
+
z0 = de.x
|
|
450
|
+
except ValueError:
|
|
451
|
+
# fallback: start at zeros
|
|
452
|
+
z0 = numpy.zeros(1 + p + q)
|
|
453
|
+
|
|
454
|
+
# 2) Local refinement
|
|
455
|
+
ls = least_squares(resid_vec, z0, xtol=1e-12, ftol=1e-12)
|
|
456
|
+
|
|
457
|
+
rnorm = numpy.linalg.norm(resid_vec(ls.x))
|
|
458
|
+
if rnorm < best['resid']:
|
|
459
|
+
best.update(resid=rnorm, signs=signs, x=ls.x.copy())
|
|
460
|
+
|
|
461
|
+
# Unpack best solution
|
|
462
|
+
z_best = best['x']
|
|
463
|
+
s_opt = z_best[0]
|
|
464
|
+
a_opt = z_best[1:1+p]
|
|
465
|
+
b_opt = map_roots(best['signs'], z_best[1+p:])
|
|
466
|
+
|
|
467
|
+
return {
|
|
468
|
+
's': s_opt,
|
|
469
|
+
'a': a_opt,
|
|
470
|
+
'b': b_opt,
|
|
471
|
+
'resid': best['resid'],
|
|
472
|
+
'signs': best['signs'],
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
# =============
|
|
477
|
+
# eval pade old
|
|
478
|
+
# =============
|
|
479
|
+
|
|
480
|
+
def eval_pade_old(z, s, a, b):
|
|
481
|
+
"""
|
|
482
|
+
"""
|
|
483
|
+
|
|
484
|
+
Pz = s * numpy.prod([z - aj for aj in a], axis=0)
|
|
485
|
+
Qz = numpy.prod([z - bj for bj in b], axis=0)
|
|
486
|
+
|
|
487
|
+
return Pz / Qz
|
|
@@ -23,34 +23,6 @@ __all__ = ['plot_fit', 'plot_density', 'plot_hilbert', 'plot_stieltjes',
|
|
|
23
23
|
'plot_stieltjes_on_disk']
|
|
24
24
|
|
|
25
25
|
|
|
26
|
-
# ==============
|
|
27
|
-
# plot coeff fit
|
|
28
|
-
# ==============
|
|
29
|
-
|
|
30
|
-
def plot_coeff_fit(psi, latex=False, save=False):
|
|
31
|
-
"""
|
|
32
|
-
"""
|
|
33
|
-
|
|
34
|
-
with texplot.theme(use_latex=latex):
|
|
35
|
-
|
|
36
|
-
fig, ax = plt.subplots(figsize=(6, 2.7))
|
|
37
|
-
|
|
38
|
-
# Save
|
|
39
|
-
if save is False:
|
|
40
|
-
save_status = False
|
|
41
|
-
save_filename = ''
|
|
42
|
-
else:
|
|
43
|
-
save_status = True
|
|
44
|
-
if isinstance(save, str):
|
|
45
|
-
save_filename = save
|
|
46
|
-
else:
|
|
47
|
-
save_filename = 'energy.pdf'
|
|
48
|
-
|
|
49
|
-
texplot.show_or_save_plot(plt, default_filename=save_filename,
|
|
50
|
-
transparent_background=True, dpi=400,
|
|
51
|
-
show_and_save=save_status, verbose=True)
|
|
52
|
-
|
|
53
|
-
|
|
54
26
|
# ========
|
|
55
27
|
# plot fit
|
|
56
28
|
# ========
|
|
@@ -400,6 +372,12 @@ def plot_stieltjes_on_disk(r, t, m1_D, m2_D, support, latex=False, save=False):
|
|
|
400
372
|
lam_m_z = (lam_m - 1j) / (lam_m + 1j)
|
|
401
373
|
theta_p = numpy.angle(lam_p_z)
|
|
402
374
|
theta_n = numpy.angle(lam_m_z)
|
|
375
|
+
|
|
376
|
+
if theta_n < 0:
|
|
377
|
+
theta_n += 2.0 * numpy.pi
|
|
378
|
+
if theta_p < 0:
|
|
379
|
+
theta_p += 2.0 * numpy.pi
|
|
380
|
+
|
|
403
381
|
theta_branch = numpy.linspace(theta_n, theta_p, 100)
|
|
404
382
|
theta_alt_branch = numpy.linspace(theta_p, theta_n + 2*numpy.pi, 100)
|
|
405
383
|
r_branch = numpy.ones_like(theta_branch)
|