freealg 0.0.3__tar.gz → 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {freealg-0.0.3/freealg.egg-info → freealg-0.1.0}/PKG-INFO +2 -1
- freealg-0.1.0/freealg/__version__.py +1 -0
- freealg-0.1.0/freealg/_pade.py +463 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/_plot_util.py +6 -28
- freealg-0.1.0/freealg/_sample.py +85 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/distributions/__init__.py +4 -4
- freealg-0.1.0/freealg/distributions/kesten_mckay.py +559 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/distributions/marchenko_pastur.py +4 -3
- freealg-0.1.0/freealg/distributions/wachter.py +568 -0
- freealg-0.1.0/freealg/distributions/wigner.py +552 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/freeform.py +58 -27
- {freealg-0.0.3 → freealg-0.1.0/freealg.egg-info}/PKG-INFO +2 -1
- {freealg-0.0.3 → freealg-0.1.0}/freealg.egg-info/SOURCES.txt +5 -1
- {freealg-0.0.3 → freealg-0.1.0}/freealg.egg-info/requires.txt +1 -0
- {freealg-0.0.3 → freealg-0.1.0}/requirements.txt +2 -1
- freealg-0.0.3/freealg/__version__.py +0 -1
- freealg-0.0.3/freealg/_pade.py +0 -139
- {freealg-0.0.3 → freealg-0.1.0}/CHANGELOG.rst +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/LICENSE.txt +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/MANIFEST.in +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/README.rst +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/__init__.py +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/_chebyshev.py +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/_damp.py +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/_decompress.py +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/_jacobi.py +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg/_util.py +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg.egg-info/dependency_links.txt +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg.egg-info/not-zip-safe +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/freealg.egg-info/top_level.txt +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/pyproject.toml +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/setup.cfg +0 -0
- {freealg-0.0.3 → freealg-0.1.0}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: freealg
|
|
3
|
-
Version: 0.0
|
|
3
|
+
Version: 0.1.0
|
|
4
4
|
Summary: Free probability for large matrices
|
|
5
5
|
Keywords: leaderboard bot chat
|
|
6
6
|
Platform: Linux
|
|
@@ -29,6 +29,7 @@ Requires-Dist: scipy
|
|
|
29
29
|
Requires-Dist: texplot
|
|
30
30
|
Requires-Dist: matplotlib
|
|
31
31
|
Requires-Dist: colorcet
|
|
32
|
+
Requires-Dist: networkx
|
|
32
33
|
Provides-Extra: test
|
|
33
34
|
Requires-Dist: tox; extra == "test"
|
|
34
35
|
Requires-Dist: pytest-cov; extra == "test"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.0"
|
|
@@ -0,0 +1,463 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright 2025, Siavash Ameli <sameli@berkeley.edu>
|
|
2
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
3
|
+
# SPDX-FileType: SOURCE
|
|
4
|
+
#
|
|
5
|
+
# This program is free software: you can redistribute it and/or modify it under
|
|
6
|
+
# the terms of the license found in the LICENSE.txt file in the root directory
|
|
7
|
+
# of this source tree.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# =======
|
|
11
|
+
# Imports
|
|
12
|
+
# =======
|
|
13
|
+
|
|
14
|
+
import numpy
|
|
15
|
+
from numpy.linalg import lstsq
|
|
16
|
+
from itertools import product
|
|
17
|
+
from scipy.optimize import least_squares, differential_evolution
|
|
18
|
+
|
|
19
|
+
__all__ = ['fit_pade', 'eval_pade']
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# =============
|
|
23
|
+
# default poles
|
|
24
|
+
# =============
|
|
25
|
+
|
|
26
|
+
def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
|
|
27
|
+
"""
|
|
28
|
+
Generate q real poles outside [lam_m, lam_p].
|
|
29
|
+
|
|
30
|
+
• even q : q/2 on each side (Chebyshev-like layout)
|
|
31
|
+
• odd q : (q+1)/2 on the *left*, (q–1)/2 on the right
|
|
32
|
+
so q=1 => single pole on whichever side `odd_side` says.
|
|
33
|
+
|
|
34
|
+
safety >= 1: 1, then poles start half an interval away; >1 pushes them
|
|
35
|
+
farther.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
if q == 0:
|
|
39
|
+
return numpy.empty(0)
|
|
40
|
+
|
|
41
|
+
Delta = 0.5 * (lam_p - lam_m)
|
|
42
|
+
|
|
43
|
+
# Decide how many poles on each side. m_L and m_R determine how many poles
|
|
44
|
+
# to be on the left and right of the support interval.
|
|
45
|
+
if q % 2 == 0:
|
|
46
|
+
m_L = m_R = q // 2
|
|
47
|
+
else:
|
|
48
|
+
if odd_side == 'left':
|
|
49
|
+
m_L = (q + 1) // 2
|
|
50
|
+
m_R = q // 2
|
|
51
|
+
else:
|
|
52
|
+
m_L = q // 2
|
|
53
|
+
m_R = (q + 1) // 2
|
|
54
|
+
|
|
55
|
+
# Chebyshev-extrema offsets (all positive)
|
|
56
|
+
kL = numpy.arange(m_L)
|
|
57
|
+
tL = (2 * kL + 1) * numpy.pi / (2 * m_L)
|
|
58
|
+
offsL = safety * Delta * (1 + numpy.cos(tL))
|
|
59
|
+
|
|
60
|
+
kR = numpy.arange(m_R)
|
|
61
|
+
tR = (2 * kR + 1) * numpy.pi / (2 * m_R + (m_R == 0))
|
|
62
|
+
offsR = safety * Delta * (1 + numpy.cos(tR))
|
|
63
|
+
|
|
64
|
+
left = lam_m - offsL
|
|
65
|
+
right = lam_p + offsR
|
|
66
|
+
|
|
67
|
+
return numpy.sort(numpy.concatenate([left, right]))
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
# ============
|
|
71
|
+
# encode poles
|
|
72
|
+
# ============
|
|
73
|
+
|
|
74
|
+
def _encode_poles(a, lam_m, lam_p):
|
|
75
|
+
"""
|
|
76
|
+
Map real pole a_j → unconstrained s_j,
|
|
77
|
+
so that the default left-of-interval pole stays left.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
# half-width of the interval
|
|
81
|
+
d = 0.5 * (lam_p - lam_m)
|
|
82
|
+
# if a < lam_m, we want s ≥ 0; if a > lam_p, s < 0
|
|
83
|
+
return numpy.where(
|
|
84
|
+
a < lam_m,
|
|
85
|
+
numpy.log((lam_m - a) / d), # zero at a = lam_m - d
|
|
86
|
+
-numpy.log((a - lam_p) / d) # zero at a = lam_p + d
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
# ============
|
|
91
|
+
# decode poles
|
|
92
|
+
# ============
|
|
93
|
+
|
|
94
|
+
def _decode_poles(s, lam_m, lam_p):
|
|
95
|
+
"""
|
|
96
|
+
Inverse map s_j → real pole a_j outside the interval.
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
d = 0.5 * (lam_p - lam_m)
|
|
100
|
+
return numpy.where(
|
|
101
|
+
s >= 0,
|
|
102
|
+
lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m−d (left)
|
|
103
|
+
lam_p + d * numpy.exp(-s) # maps s=0 to a=lam_p+d (right)
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
# ========
|
|
108
|
+
# inner ls
|
|
109
|
+
# ========
|
|
110
|
+
|
|
111
|
+
# def _inner_ls(x, f, poles): # TEST
|
|
112
|
+
def _inner_ls(x, f, poles, p=1):
|
|
113
|
+
"""
|
|
114
|
+
This is the inner least square (blazing fast).
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
if poles.size == 0 and p == -1:
|
|
118
|
+
return 0.0, 0.0, numpy.empty(0)
|
|
119
|
+
|
|
120
|
+
if poles.size == 0: # q = 0
|
|
121
|
+
# A = numpy.column_stack((numpy.ones_like(x), x))
|
|
122
|
+
cols = [numpy.ones_like(x)] if p >= 0 else []
|
|
123
|
+
if p == 1:
|
|
124
|
+
cols.append(x)
|
|
125
|
+
A = numpy.column_stack(cols)
|
|
126
|
+
# ---
|
|
127
|
+
theta, *_ = lstsq(A, f, rcond=None)
|
|
128
|
+
# c, D = theta # TEST
|
|
129
|
+
if p == -1:
|
|
130
|
+
c = 0.0
|
|
131
|
+
D = 0.0
|
|
132
|
+
resid = numpy.empty(0)
|
|
133
|
+
elif p == 0:
|
|
134
|
+
c = theta[0]
|
|
135
|
+
D = 0.0
|
|
136
|
+
resid = numpy.empty(0)
|
|
137
|
+
else: # p == 1
|
|
138
|
+
c, D = theta
|
|
139
|
+
resid = numpy.empty(0)
|
|
140
|
+
else:
|
|
141
|
+
# phi = 1.0 / (x[:, None] - poles[None, :])
|
|
142
|
+
# # A = numpy.column_stack((numpy.ones_like(x), x, phi)) # TEST
|
|
143
|
+
# # theta, *_ = lstsq(A, f, rcond=None)
|
|
144
|
+
# # c, D, resid = theta[0], theta[1], theta[2:]
|
|
145
|
+
# phi = 1.0 / (x[:, None] - poles[None, :])
|
|
146
|
+
# cols = [numpy.ones_like(x)] if p >= 0 else []
|
|
147
|
+
# if p == 1:
|
|
148
|
+
# cols.append(x)
|
|
149
|
+
# cols.append(phi)
|
|
150
|
+
# A = numpy.column_stack(cols)
|
|
151
|
+
# theta, *_ = lstsq(A, f, rcond=None)
|
|
152
|
+
# if p == -1:
|
|
153
|
+
# c = 0.0
|
|
154
|
+
# D = 0.0
|
|
155
|
+
# resid = theta
|
|
156
|
+
# elif p == 0:
|
|
157
|
+
# c = theta[0]
|
|
158
|
+
# D = 0.0
|
|
159
|
+
# resid = theta[1:]
|
|
160
|
+
# else: # p == 1
|
|
161
|
+
# c = theta[0]
|
|
162
|
+
# D = theta[1]
|
|
163
|
+
# resid = theta[2:]
|
|
164
|
+
|
|
165
|
+
phi = 1.0 / (x[:, None] - poles[None, :])
|
|
166
|
+
cols = [numpy.ones_like(x)] if p >= 0 else []
|
|
167
|
+
if p == 1:
|
|
168
|
+
cols.append(x)
|
|
169
|
+
cols.append(phi)
|
|
170
|
+
|
|
171
|
+
A = numpy.column_stack(cols)
|
|
172
|
+
theta, *_ = lstsq(A, f, rcond=None)
|
|
173
|
+
|
|
174
|
+
if p == -1:
|
|
175
|
+
c, D, resid = 0.0, 0.0, theta
|
|
176
|
+
elif p == 0:
|
|
177
|
+
c, D, resid = theta[0], 0.0, theta[1:]
|
|
178
|
+
else: # p == 1
|
|
179
|
+
c, D, resid = theta[0], theta[1], theta[2:]
|
|
180
|
+
|
|
181
|
+
return c, D, resid
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
# =============
|
|
185
|
+
# eval rational
|
|
186
|
+
# =============
|
|
187
|
+
|
|
188
|
+
def _eval_rational(z, c, D, poles, resid):
|
|
189
|
+
"""
|
|
190
|
+
"""
|
|
191
|
+
|
|
192
|
+
# z = z[:, None]
|
|
193
|
+
# if poles.size == 0:
|
|
194
|
+
# term = 0.0
|
|
195
|
+
# else:
|
|
196
|
+
# term = numpy.sum(resid / (z - poles), axis=1)
|
|
197
|
+
#
|
|
198
|
+
# return c + D * z.ravel() + term
|
|
199
|
+
|
|
200
|
+
# ensure z is a 1-D array
|
|
201
|
+
z = numpy.asarray(z)
|
|
202
|
+
z_col = z[:, None]
|
|
203
|
+
|
|
204
|
+
if poles.size == 0:
|
|
205
|
+
term = 0.0
|
|
206
|
+
else:
|
|
207
|
+
term = numpy.sum(resid / (z_col - poles[None, :]), axis=1)
|
|
208
|
+
|
|
209
|
+
return c + D * z + term
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
# ========
|
|
213
|
+
# fit pade
|
|
214
|
+
# ========
|
|
215
|
+
|
|
216
|
+
def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', safety=1.0,
|
|
217
|
+
max_outer=40, xtol=1e-12, ftol=1e-12, optimizer='ls', verbose=0):
|
|
218
|
+
"""
|
|
219
|
+
This is the outer optimiser.
|
|
220
|
+
|
|
221
|
+
Fits G(x) = (p>=1 ? c : 0) + (p==1 ? D x : 0) + sum r_j/(x - a_j) # TEST
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
# Checks
|
|
225
|
+
if not (odd_side in ['left', 'right']):
|
|
226
|
+
raise ValueError('"odd_side" can only be "left" or "right".')
|
|
227
|
+
|
|
228
|
+
if not (p in [-1, 0, 1]):
|
|
229
|
+
raise ValueError('"pade_p" can only be -1, 0, or 1.')
|
|
230
|
+
|
|
231
|
+
x = numpy.asarray(x, float)
|
|
232
|
+
f = numpy.asarray(f, float)
|
|
233
|
+
|
|
234
|
+
poles0 = _default_poles(q, lam_m, lam_p, safety=safety, odd_side=odd_side)
|
|
235
|
+
# if q == 0: # nothing to optimise
|
|
236
|
+
if q == 0 and p <= 0:
|
|
237
|
+
# c, D, resid = _inner_ls(x, f, poles0) # TEST
|
|
238
|
+
c, D, resid = _inner_ls(x, f, poles0, p)
|
|
239
|
+
pade_sol = {
|
|
240
|
+
'c': c, 'D': D, 'poles': poles0, 'resid': resid,
|
|
241
|
+
'outer_iters': 0
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
return pade_sol
|
|
245
|
+
|
|
246
|
+
s0 = _encode_poles(poles0, lam_m, lam_p)
|
|
247
|
+
|
|
248
|
+
# --------
|
|
249
|
+
# residual
|
|
250
|
+
# --------
|
|
251
|
+
|
|
252
|
+
# def residual(s): # TEST
|
|
253
|
+
def residual(s, p=p):
|
|
254
|
+
poles = _decode_poles(s, lam_m, lam_p)
|
|
255
|
+
# c, D, resid = _inner_ls(x, f, poles) # TEST
|
|
256
|
+
c, D, resid = _inner_ls(x, f, poles, p)
|
|
257
|
+
return _eval_rational(x, c, D, poles, resid) - f
|
|
258
|
+
|
|
259
|
+
# ----------------
|
|
260
|
+
|
|
261
|
+
# Optimizer
|
|
262
|
+
if optimizer == 'ls':
|
|
263
|
+
# scale = numpy.maximum(1.0, numpy.abs(s0))
|
|
264
|
+
res = least_squares(residual, s0,
|
|
265
|
+
method='trf',
|
|
266
|
+
# method='lm',
|
|
267
|
+
# x_scale=scale,
|
|
268
|
+
max_nfev=max_outer, xtol=xtol, ftol=ftol,
|
|
269
|
+
verbose=verbose)
|
|
270
|
+
|
|
271
|
+
elif optimizer == 'de':
|
|
272
|
+
|
|
273
|
+
# Bounds
|
|
274
|
+
# span = lam_p - lam_m
|
|
275
|
+
# B = 3.0 # multiples of span
|
|
276
|
+
# L = numpy.log(B * span)
|
|
277
|
+
# bounds = [(-L, L)] * len(s0)
|
|
278
|
+
|
|
279
|
+
d = 0.5*(lam_p - lam_m)
|
|
280
|
+
# the minimum factor so that lam_m - d*exp(s)=0 is exp(s)=lam_m/d
|
|
281
|
+
min_factor = lam_m/d
|
|
282
|
+
B = max(10.0, min_factor*10.0)
|
|
283
|
+
L = numpy.log(B)
|
|
284
|
+
bounds = [(-L, L)] * len(s0)
|
|
285
|
+
|
|
286
|
+
# Global stage
|
|
287
|
+
glob = differential_evolution(lambda s: numpy.sum(residual(s)**2),
|
|
288
|
+
bounds, maxiter=50, popsize=10,
|
|
289
|
+
polish=False)
|
|
290
|
+
|
|
291
|
+
# local polish
|
|
292
|
+
res = least_squares(
|
|
293
|
+
residual, glob.x,
|
|
294
|
+
method='lm',
|
|
295
|
+
max_nfev=max_outer, xtol=xtol, ftol=ftol,
|
|
296
|
+
verbose=verbose)
|
|
297
|
+
|
|
298
|
+
else:
|
|
299
|
+
raise RuntimeError('"optimizer" is invalid.')
|
|
300
|
+
|
|
301
|
+
poles = _decode_poles(res.x, lam_m, lam_p)
|
|
302
|
+
# c, D, resid = _inner_ls(x, f, poles) # TEST
|
|
303
|
+
c, D, resid = _inner_ls(x, f, poles, p)
|
|
304
|
+
|
|
305
|
+
pade_sol = {
|
|
306
|
+
'c': c, 'D': D, 'poles': poles, 'resid': resid,
|
|
307
|
+
'outer_iters': res.nfev
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
return pade_sol
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
# =========
|
|
314
|
+
# eval pade
|
|
315
|
+
# =========
|
|
316
|
+
|
|
317
|
+
def eval_pade(z, pade_sol):
|
|
318
|
+
"""
|
|
319
|
+
"""
|
|
320
|
+
|
|
321
|
+
# z_arr = numpy.asanyarray(z) # shape=(M,N)
|
|
322
|
+
# flat = z_arr.ravel() # shape=(M·N,)
|
|
323
|
+
# c, D = pade_sol['c'], pade_sol['D']
|
|
324
|
+
# poles = pade_sol['poles']
|
|
325
|
+
# resid = pade_sol['resid']
|
|
326
|
+
#
|
|
327
|
+
# # _eval_rational takes a 1-D array of z's and returns 1-D outputs
|
|
328
|
+
# flat_out = _eval_rational(flat, c, D, poles, resid)
|
|
329
|
+
#
|
|
330
|
+
# # restore the original shape
|
|
331
|
+
# out = flat_out.reshape(z_arr.shape) # shape=(M,N)
|
|
332
|
+
#
|
|
333
|
+
# return out
|
|
334
|
+
|
|
335
|
+
z = numpy.asanyarray(z) # complex or real, any shape
|
|
336
|
+
c, D = pade_sol['c'], pade_sol['D']
|
|
337
|
+
poles, resid = pade_sol['poles'], pade_sol['resid']
|
|
338
|
+
|
|
339
|
+
out = c + D*z
|
|
340
|
+
for bj, rj in zip(poles, resid):
|
|
341
|
+
out += rj/(z - bj) # each is an (N,) op, no N×q temp
|
|
342
|
+
return out
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
# ============
|
|
346
|
+
# fit pade old
|
|
347
|
+
# ============
|
|
348
|
+
|
|
349
|
+
def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
|
|
350
|
+
S=numpy.inf, B_default=10.0, S_factor=2.0, maxiter_de=200):
|
|
351
|
+
"""
|
|
352
|
+
Fit a [p/q] rational P/Q of the form:
|
|
353
|
+
P(x) = s * prod_{i=0..p-1}(x - a_i)
|
|
354
|
+
Q(x) = prod_{j=0..q-1}(x - b_j)
|
|
355
|
+
|
|
356
|
+
Constraints:
|
|
357
|
+
a_i ∈ [lam_m, lam_p]
|
|
358
|
+
b_j ∈ (-infty, lam_m - delta] cup [lam_p + delta, infty)
|
|
359
|
+
|
|
360
|
+
Approach:
|
|
361
|
+
- Brute‐force all 2^q left/right assignments for denominator roots
|
|
362
|
+
- Global search with differential_evolution, fallback to zeros if needed
|
|
363
|
+
- Local refinement with least_squares
|
|
364
|
+
|
|
365
|
+
Returns a dict with keys:
|
|
366
|
+
's' : optimal scale factor
|
|
367
|
+
'a' : array of p numerator roots (in [lam_m, lam_p])
|
|
368
|
+
'b' : array of q denominator roots (outside the interval)
|
|
369
|
+
'resid' : final residual norm
|
|
370
|
+
'signs' : tuple indicating left/right pattern for each b_j
|
|
371
|
+
"""
|
|
372
|
+
|
|
373
|
+
# Determine finite bounds for DE
|
|
374
|
+
if not numpy.isfinite(B):
|
|
375
|
+
B_eff = B_default
|
|
376
|
+
else:
|
|
377
|
+
B_eff = B
|
|
378
|
+
if not numpy.isfinite(S):
|
|
379
|
+
# scale bound: S_factor * max|f| * interval width + safety
|
|
380
|
+
S_eff = S_factor * numpy.max(numpy.abs(f)) * (lam_p - lam_m) + 1.0
|
|
381
|
+
if S_eff <= 0:
|
|
382
|
+
S_eff = 1.0
|
|
383
|
+
else:
|
|
384
|
+
S_eff = S
|
|
385
|
+
|
|
386
|
+
def map_roots(signs, b):
|
|
387
|
+
"""Map unconstrained b_j -> real root outside the interval."""
|
|
388
|
+
out = numpy.empty_like(b)
|
|
389
|
+
for j, (s_val, bj) in enumerate(zip(signs, b)):
|
|
390
|
+
if s_val > 0:
|
|
391
|
+
out[j] = lam_p + delta + numpy.exp(bj)
|
|
392
|
+
else:
|
|
393
|
+
out[j] = lam_m - delta - numpy.exp(bj)
|
|
394
|
+
return out
|
|
395
|
+
|
|
396
|
+
best = {'resid': numpy.inf}
|
|
397
|
+
|
|
398
|
+
# Enumerate all left/right sign patterns
|
|
399
|
+
for signs in product([-1, 1], repeat=q):
|
|
400
|
+
# Residual vector for current pattern
|
|
401
|
+
def resid_vec(z):
|
|
402
|
+
s_val = z[0]
|
|
403
|
+
a = z[1:1+p]
|
|
404
|
+
b = z[1+p:]
|
|
405
|
+
P = s_val * numpy.prod(x[:, None] - a[None, :], axis=1)
|
|
406
|
+
roots_Q = map_roots(signs, b)
|
|
407
|
+
Q = numpy.prod(x[:, None] - roots_Q[None, :], axis=1)
|
|
408
|
+
return P - f * Q
|
|
409
|
+
|
|
410
|
+
def obj(z):
|
|
411
|
+
r = resid_vec(z)
|
|
412
|
+
return r.dot(r)
|
|
413
|
+
|
|
414
|
+
# Build bounds for DE
|
|
415
|
+
bounds = []
|
|
416
|
+
bounds.append((-S_eff, S_eff)) # s
|
|
417
|
+
bounds += [(lam_m, lam_p)] * p # a_i
|
|
418
|
+
bounds += [(-B_eff, B_eff)] * q # b_j
|
|
419
|
+
|
|
420
|
+
# 1) Global search
|
|
421
|
+
try:
|
|
422
|
+
de = differential_evolution(obj, bounds,
|
|
423
|
+
maxiter=maxiter_de,
|
|
424
|
+
polish=False)
|
|
425
|
+
z0 = de.x
|
|
426
|
+
except ValueError:
|
|
427
|
+
# fallback: start at zeros
|
|
428
|
+
z0 = numpy.zeros(1 + p + q)
|
|
429
|
+
|
|
430
|
+
# 2) Local refinement
|
|
431
|
+
ls = least_squares(resid_vec, z0, xtol=1e-12, ftol=1e-12)
|
|
432
|
+
|
|
433
|
+
rnorm = numpy.linalg.norm(resid_vec(ls.x))
|
|
434
|
+
if rnorm < best['resid']:
|
|
435
|
+
best.update(resid=rnorm, signs=signs, x=ls.x.copy())
|
|
436
|
+
|
|
437
|
+
# Unpack best solution
|
|
438
|
+
z_best = best['x']
|
|
439
|
+
s_opt = z_best[0]
|
|
440
|
+
a_opt = z_best[1:1+p]
|
|
441
|
+
b_opt = map_roots(best['signs'], z_best[1+p:])
|
|
442
|
+
|
|
443
|
+
return {
|
|
444
|
+
's': s_opt,
|
|
445
|
+
'a': a_opt,
|
|
446
|
+
'b': b_opt,
|
|
447
|
+
'resid': best['resid'],
|
|
448
|
+
'signs': best['signs'],
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
# =============
|
|
453
|
+
# eval pade old
|
|
454
|
+
# =============
|
|
455
|
+
|
|
456
|
+
def eval_pade_old(z, s, a, b):
|
|
457
|
+
"""
|
|
458
|
+
"""
|
|
459
|
+
|
|
460
|
+
Pz = s * numpy.prod([z - aj for aj in a], axis=0)
|
|
461
|
+
Qz = numpy.prod([z - bj for bj in b], axis=0)
|
|
462
|
+
|
|
463
|
+
return Pz / Qz
|
|
@@ -23,34 +23,6 @@ __all__ = ['plot_fit', 'plot_density', 'plot_hilbert', 'plot_stieltjes',
|
|
|
23
23
|
'plot_stieltjes_on_disk']
|
|
24
24
|
|
|
25
25
|
|
|
26
|
-
# ==============
|
|
27
|
-
# plot coeff fit
|
|
28
|
-
# ==============
|
|
29
|
-
|
|
30
|
-
def plot_coeff_fit(psi, latex=False, save=False):
|
|
31
|
-
"""
|
|
32
|
-
"""
|
|
33
|
-
|
|
34
|
-
with texplot.theme(use_latex=latex):
|
|
35
|
-
|
|
36
|
-
fig, ax = plt.subplots(figsize=(6, 2.7))
|
|
37
|
-
|
|
38
|
-
# Save
|
|
39
|
-
if save is False:
|
|
40
|
-
save_status = False
|
|
41
|
-
save_filename = ''
|
|
42
|
-
else:
|
|
43
|
-
save_status = True
|
|
44
|
-
if isinstance(save, str):
|
|
45
|
-
save_filename = save
|
|
46
|
-
else:
|
|
47
|
-
save_filename = 'energy.pdf'
|
|
48
|
-
|
|
49
|
-
texplot.show_or_save_plot(plt, default_filename=save_filename,
|
|
50
|
-
transparent_background=True, dpi=400,
|
|
51
|
-
show_and_save=save_status, verbose=True)
|
|
52
|
-
|
|
53
|
-
|
|
54
26
|
# ========
|
|
55
27
|
# plot fit
|
|
56
28
|
# ========
|
|
@@ -400,6 +372,12 @@ def plot_stieltjes_on_disk(r, t, m1_D, m2_D, support, latex=False, save=False):
|
|
|
400
372
|
lam_m_z = (lam_m - 1j) / (lam_m + 1j)
|
|
401
373
|
theta_p = numpy.angle(lam_p_z)
|
|
402
374
|
theta_n = numpy.angle(lam_m_z)
|
|
375
|
+
|
|
376
|
+
if theta_n < 0:
|
|
377
|
+
theta_n += 2.0 * numpy.pi
|
|
378
|
+
if theta_p < 0:
|
|
379
|
+
theta_p += 2.0 * numpy.pi
|
|
380
|
+
|
|
403
381
|
theta_branch = numpy.linspace(theta_n, theta_p, 100)
|
|
404
382
|
theta_alt_branch = numpy.linspace(theta_p, theta_n + 2*numpy.pi, 100)
|
|
405
383
|
r_branch = numpy.ones_like(theta_branch)
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
2
|
+
# SPDX-FileType: SOURCE
|
|
3
|
+
#
|
|
4
|
+
# This program is free software: you can redistribute it and/or modify it under
|
|
5
|
+
# the terms of the license found in the LICENSE.txt file in the root directory
|
|
6
|
+
# of this source tree.
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# =======
|
|
10
|
+
# Imports
|
|
11
|
+
# =======
|
|
12
|
+
|
|
13
|
+
from scipy.integrate import cumulative_trapezoid
|
|
14
|
+
from scipy.interpolate import interp1d
|
|
15
|
+
from scipy.stats import qmc
|
|
16
|
+
|
|
17
|
+
__all__ = ['qmc_sample']
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# =============
|
|
21
|
+
# quantile func
|
|
22
|
+
# =============
|
|
23
|
+
|
|
24
|
+
def _quantile_func(x, rho):
|
|
25
|
+
"""
|
|
26
|
+
Construct a quantile function from evaluations of an estimated density
|
|
27
|
+
on a grid (x, rho(x)).
|
|
28
|
+
"""
|
|
29
|
+
cdf = cumulative_trapezoid(rho, x, initial=0)
|
|
30
|
+
cdf /= cdf[-1]
|
|
31
|
+
return interp1d(cdf, x, bounds_error=False, assume_sorted=True)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# ==========
|
|
35
|
+
# qmc sample
|
|
36
|
+
# ==========
|
|
37
|
+
|
|
38
|
+
def qmc_sample(x, rho, num_pts):
|
|
39
|
+
"""
|
|
40
|
+
Low-discrepancy sampling from a univariate density estimate using
|
|
41
|
+
Quasi-Monte Carlo.
|
|
42
|
+
|
|
43
|
+
Parameters
|
|
44
|
+
----------
|
|
45
|
+
x : numpy.array, shape (n,)
|
|
46
|
+
Sorted abscissae at which the density has been evaluated.
|
|
47
|
+
|
|
48
|
+
rho : numpy.array, shape (n,)
|
|
49
|
+
Density values corresponding to `x`. Must be non-negative and define
|
|
50
|
+
a valid probability density (i.e., integrate to 1 over the support).
|
|
51
|
+
|
|
52
|
+
num_pts : int
|
|
53
|
+
Number of sample points to generate from the density estimate.
|
|
54
|
+
|
|
55
|
+
Returns
|
|
56
|
+
-------
|
|
57
|
+
samples : numpy.array, shape (num_pts,)
|
|
58
|
+
Samples drawn from the estimated density using a one-dimensional Halton
|
|
59
|
+
sequence mapped through the estimated quantile function.
|
|
60
|
+
|
|
61
|
+
See Also
|
|
62
|
+
--------
|
|
63
|
+
scipy.stats.qmc.Halton
|
|
64
|
+
Underlying Quasi-Monte Carlo engine used for generating low-discrepancy
|
|
65
|
+
points.
|
|
66
|
+
|
|
67
|
+
Examples
|
|
68
|
+
--------
|
|
69
|
+
.. code-block:: python
|
|
70
|
+
|
|
71
|
+
>>> import numpy
|
|
72
|
+
>>> from your_module import qmc_sample
|
|
73
|
+
>>> x = numpy.linspace(0, 1, 200)
|
|
74
|
+
>>> rho = 3 * x**2 # density of Beta(3,1) on [0,1]
|
|
75
|
+
>>> samples = qmc_sample(x, rho, num_pts=1000)
|
|
76
|
+
>>> assert samples.shape == (1000,)
|
|
77
|
+
>>> # Empirical mean should be close to 3/4
|
|
78
|
+
>>> numpy.allclose(samples.mean(), 0.75, atol=0.02)
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
quantile = _quantile_func(x, rho)
|
|
82
|
+
engine = qmc.Halton(d=1)
|
|
83
|
+
u = engine.random(num_pts)
|
|
84
|
+
samples = quantile(u)
|
|
85
|
+
return samples.ravel()
|
|
@@ -7,10 +7,10 @@
|
|
|
7
7
|
# directory of this source tree.
|
|
8
8
|
|
|
9
9
|
from .marchenko_pastur import MarchenkoPastur
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
10
|
+
from .wigner import Wigner
|
|
11
|
+
from .kesten_mckay import KestenMcKay
|
|
12
|
+
from .wachter import Wachter
|
|
13
13
|
# from .meixner import meixner
|
|
14
14
|
|
|
15
15
|
# __all__ = ['MarchenkoPastur', 'Wigner', 'KestenMcKay', 'Wachter', 'Meixner']
|
|
16
|
-
__all__ = ['MarchenkoPastur']
|
|
16
|
+
__all__ = ['MarchenkoPastur', 'Wigner', 'KestenMcKay', 'Wachter']
|