freealg 0.1.11__py3-none-any.whl → 0.7.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- freealg/__init__.py +8 -2
- freealg/__version__.py +1 -1
- freealg/_algebraic_form/__init__.py +12 -0
- freealg/_algebraic_form/_branch_points.py +288 -0
- freealg/_algebraic_form/_constraints.py +139 -0
- freealg/_algebraic_form/_continuation_algebraic.py +706 -0
- freealg/_algebraic_form/_decompress.py +641 -0
- freealg/_algebraic_form/_decompress2.py +204 -0
- freealg/_algebraic_form/_edge.py +330 -0
- freealg/_algebraic_form/_homotopy.py +323 -0
- freealg/_algebraic_form/_moments.py +448 -0
- freealg/_algebraic_form/_sheets_util.py +145 -0
- freealg/_algebraic_form/_support.py +309 -0
- freealg/_algebraic_form/algebraic_form.py +1232 -0
- freealg/_free_form/__init__.py +16 -0
- freealg/{_chebyshev.py → _free_form/_chebyshev.py} +75 -43
- freealg/_free_form/_decompress.py +993 -0
- freealg/_free_form/_density_util.py +243 -0
- freealg/_free_form/_jacobi.py +359 -0
- freealg/_free_form/_linalg.py +508 -0
- freealg/{_pade.py → _free_form/_pade.py} +42 -208
- freealg/{_plot_util.py → _free_form/_plot_util.py} +37 -22
- freealg/{_sample.py → _free_form/_sample.py} +58 -22
- freealg/_free_form/_series.py +454 -0
- freealg/_free_form/_support.py +214 -0
- freealg/_free_form/free_form.py +1362 -0
- freealg/_geometric_form/__init__.py +13 -0
- freealg/_geometric_form/_continuation_genus0.py +175 -0
- freealg/_geometric_form/_continuation_genus1.py +275 -0
- freealg/_geometric_form/_elliptic_functions.py +174 -0
- freealg/_geometric_form/_sphere_maps.py +63 -0
- freealg/_geometric_form/_torus_maps.py +118 -0
- freealg/_geometric_form/geometric_form.py +1094 -0
- freealg/_util.py +56 -110
- freealg/distributions/__init__.py +7 -1
- freealg/distributions/_chiral_block.py +494 -0
- freealg/distributions/_deformed_marchenko_pastur.py +726 -0
- freealg/distributions/_deformed_wigner.py +386 -0
- freealg/distributions/_kesten_mckay.py +29 -15
- freealg/distributions/_marchenko_pastur.py +224 -95
- freealg/distributions/_meixner.py +47 -37
- freealg/distributions/_wachter.py +29 -17
- freealg/distributions/_wigner.py +27 -14
- freealg/visualization/__init__.py +12 -0
- freealg/visualization/_glue_util.py +32 -0
- freealg/visualization/_rgb_hsv.py +125 -0
- freealg-0.7.12.dist-info/METADATA +172 -0
- freealg-0.7.12.dist-info/RECORD +53 -0
- {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/WHEEL +1 -1
- freealg/_decompress.py +0 -180
- freealg/_jacobi.py +0 -218
- freealg/_support.py +0 -85
- freealg/freeform.py +0 -967
- freealg-0.1.11.dist-info/METADATA +0 -140
- freealg-0.1.11.dist-info/RECORD +0 -24
- /freealg/{_damp.py → _free_form/_damp.py} +0 -0
- {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/licenses/AUTHORS.txt +0 -0
- {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/licenses/LICENSE.txt +0 -0
- {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/top_level.txt +0 -0
|
@@ -12,9 +12,7 @@
|
|
|
12
12
|
# =======
|
|
13
13
|
|
|
14
14
|
import numpy
|
|
15
|
-
import numba
|
|
16
15
|
from numpy.linalg import lstsq
|
|
17
|
-
from itertools import product
|
|
18
16
|
from scipy.optimize import least_squares, differential_evolution
|
|
19
17
|
|
|
20
18
|
__all__ = ['fit_pade', 'eval_pade']
|
|
@@ -28,8 +26,8 @@ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
|
|
|
28
26
|
"""
|
|
29
27
|
Generate q real poles outside [lam_m, lam_p].
|
|
30
28
|
|
|
31
|
-
|
|
32
|
-
|
|
29
|
+
* even q : q/2 on each side (Chebyshev-like layout)
|
|
30
|
+
* odd q : (q+1)/2 on the *left*, (q-1)/2 on the right
|
|
33
31
|
so q=1 => single pole on whichever side `odd_side` says.
|
|
34
32
|
|
|
35
33
|
safety >= 1: 1, then poles start half an interval away; >1 pushes them
|
|
@@ -74,13 +72,13 @@ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
|
|
|
74
72
|
|
|
75
73
|
def _encode_poles(a, lam_m, lam_p):
|
|
76
74
|
"""
|
|
77
|
-
Map real pole a_j
|
|
75
|
+
Map real pole a_j => unconstrained s_j,
|
|
78
76
|
so that the default left-of-interval pole stays left.
|
|
79
77
|
"""
|
|
80
78
|
|
|
81
79
|
# half-width of the interval
|
|
82
80
|
d = 0.5 * (lam_p - lam_m)
|
|
83
|
-
# if a < lam_m, we want s
|
|
81
|
+
# if a < lam_m, we want s >= 0; if a > lam_p, s < 0
|
|
84
82
|
return numpy.where(
|
|
85
83
|
a < lam_m,
|
|
86
84
|
numpy.log((lam_m - a) / d), # zero at a = lam_m - d
|
|
@@ -94,13 +92,13 @@ def _encode_poles(a, lam_m, lam_p):
|
|
|
94
92
|
|
|
95
93
|
def _decode_poles(s, lam_m, lam_p):
|
|
96
94
|
"""
|
|
97
|
-
Inverse map s_j
|
|
95
|
+
Inverse map s_j => real pole a_j outside the interval.
|
|
98
96
|
"""
|
|
99
97
|
|
|
100
98
|
d = 0.5 * (lam_p - lam_m)
|
|
101
99
|
return numpy.where(
|
|
102
100
|
s >= 0,
|
|
103
|
-
lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m
|
|
101
|
+
lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m-d (left)
|
|
104
102
|
lam_p + d * numpy.exp(-s) # maps s=0 to a=lam_p+d (right)
|
|
105
103
|
)
|
|
106
104
|
|
|
@@ -109,32 +107,35 @@ def _decode_poles(s, lam_m, lam_p):
|
|
|
109
107
|
# inner ls
|
|
110
108
|
# ========
|
|
111
109
|
|
|
112
|
-
def _inner_ls(x, f, poles,
|
|
110
|
+
def _inner_ls(x, f, poles, dpq=1, pade_reg=0.0):
|
|
113
111
|
"""
|
|
114
112
|
This is the inner least square (blazing fast).
|
|
113
|
+
|
|
114
|
+
dqp is the difference between the order of P (numerator) and Q
|
|
115
|
+
(denominator).
|
|
115
116
|
"""
|
|
116
117
|
|
|
117
|
-
if poles.size == 0 and
|
|
118
|
+
if poles.size == 0 and dpq == -1:
|
|
118
119
|
return 0.0, 0.0, numpy.empty(0)
|
|
119
120
|
|
|
120
121
|
if poles.size == 0: # q = 0
|
|
121
122
|
# A = numpy.column_stack((numpy.ones_like(x), x))
|
|
122
|
-
cols = [numpy.ones_like(x)] if
|
|
123
|
-
if
|
|
123
|
+
cols = [numpy.ones_like(x)] if dpq >= 0 else []
|
|
124
|
+
if dpq == 1:
|
|
124
125
|
cols.append(x)
|
|
125
126
|
A = numpy.column_stack(cols)
|
|
126
127
|
# ---
|
|
127
128
|
theta, *_ = lstsq(A, f, rcond=None)
|
|
128
129
|
# c, D = theta # TEST
|
|
129
|
-
if
|
|
130
|
+
if dpq == -1:
|
|
130
131
|
c = 0.0
|
|
131
132
|
D = 0.0
|
|
132
133
|
resid = numpy.empty(0)
|
|
133
|
-
elif
|
|
134
|
+
elif dpq == 0:
|
|
134
135
|
c = theta[0]
|
|
135
136
|
D = 0.0
|
|
136
137
|
resid = numpy.empty(0)
|
|
137
|
-
else: #
|
|
138
|
+
else: # dpq == 1
|
|
138
139
|
c, D = theta
|
|
139
140
|
resid = numpy.empty(0)
|
|
140
141
|
else:
|
|
@@ -143,28 +144,28 @@ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
|
|
|
143
144
|
# # theta, *_ = lstsq(A, f, rcond=None)
|
|
144
145
|
# # c, D, resid = theta[0], theta[1], theta[2:]
|
|
145
146
|
# phi = 1.0 / (x[:, None] - poles[None, :])
|
|
146
|
-
# cols = [numpy.ones_like(x)] if
|
|
147
|
-
# if
|
|
147
|
+
# cols = [numpy.ones_like(x)] if dpq >= 0 else []
|
|
148
|
+
# if dpq == 1:
|
|
148
149
|
# cols.append(x)
|
|
149
150
|
# cols.append(phi)
|
|
150
151
|
# A = numpy.column_stack(cols)
|
|
151
152
|
# theta, *_ = lstsq(A, f, rcond=None)
|
|
152
|
-
# if
|
|
153
|
+
# if dpq == -1:
|
|
153
154
|
# c = 0.0
|
|
154
155
|
# D = 0.0
|
|
155
156
|
# resid = theta
|
|
156
|
-
# elif
|
|
157
|
+
# elif dpq == 0:
|
|
157
158
|
# c = theta[0]
|
|
158
159
|
# D = 0.0
|
|
159
160
|
# resid = theta[1:]
|
|
160
|
-
# else: #
|
|
161
|
+
# else: # dpq == 1
|
|
161
162
|
# c = theta[0]
|
|
162
163
|
# D = theta[1]
|
|
163
164
|
# resid = theta[2:]
|
|
164
165
|
|
|
165
166
|
phi = 1.0 / (x[:, None] - poles[None, :])
|
|
166
|
-
cols = [numpy.ones_like(x)] if
|
|
167
|
-
if
|
|
167
|
+
cols = [numpy.ones_like(x)] if dpq >= 0 else []
|
|
168
|
+
if dpq == 1:
|
|
168
169
|
cols.append(x)
|
|
169
170
|
cols.append(phi)
|
|
170
171
|
|
|
@@ -180,14 +181,14 @@ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
|
|
|
180
181
|
# theta = numpy.linalg.solve(ATA, ATf)
|
|
181
182
|
|
|
182
183
|
# figure out how many elements to skip
|
|
183
|
-
if
|
|
184
|
+
if dpq == 1:
|
|
184
185
|
skip = 2 # skip c and D
|
|
185
|
-
elif
|
|
186
|
+
elif dpq == 0:
|
|
186
187
|
skip = 1 # skip c only
|
|
187
188
|
else:
|
|
188
189
|
skip = 0 # all entries are residues
|
|
189
190
|
|
|
190
|
-
# add
|
|
191
|
+
# add lambda only for the residue positions
|
|
191
192
|
n = ATA.shape[0]
|
|
192
193
|
for i in range(skip, n):
|
|
193
194
|
ATA[i, i] += pade_reg
|
|
@@ -199,11 +200,11 @@ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
|
|
|
199
200
|
else:
|
|
200
201
|
theta, *_ = lstsq(A, f, rcond=None)
|
|
201
202
|
|
|
202
|
-
if
|
|
203
|
+
if dpq == -1:
|
|
203
204
|
c, D, resid = 0.0, 0.0, theta
|
|
204
|
-
elif
|
|
205
|
+
elif dpq == 0:
|
|
205
206
|
c, D, resid = theta[0], 0.0, theta[1:]
|
|
206
|
-
else: #
|
|
207
|
+
else: # dpq == 1
|
|
207
208
|
c, D, resid = theta[0], theta[1], theta[2:]
|
|
208
209
|
|
|
209
210
|
return c, D, resid
|
|
@@ -236,61 +237,12 @@ def _eval_rational(z, c, D, poles, resid):
|
|
|
236
237
|
|
|
237
238
|
return c + D * z + term
|
|
238
239
|
|
|
239
|
-
# ========
|
|
240
|
-
# Wynn epsilon algorithm for Pade
|
|
241
|
-
# ========
|
|
242
|
-
|
|
243
|
-
@numba.jit(nopython=True, parallel=True)
|
|
244
|
-
def wynn_pade(coeffs, x):
|
|
245
|
-
"""
|
|
246
|
-
Given the coefficients of a power series
|
|
247
|
-
f(x) = sum_{n=0}^∞ coeffs[n] * x^n,
|
|
248
|
-
returns a function handle that computes the Pade approximant at any x
|
|
249
|
-
using Wynn's epsilon algorithm.
|
|
250
|
-
|
|
251
|
-
Parameters:
|
|
252
|
-
coeffs (list or array): Coefficients [a0, a1, a2, ...] of the power series.
|
|
253
|
-
|
|
254
|
-
Returns:
|
|
255
|
-
function: A function approximant(x) that returns the approximated value f(x).
|
|
256
|
-
"""
|
|
257
|
-
# Number of coefficients
|
|
258
|
-
xn = x.ravel()
|
|
259
|
-
d = len(xn)
|
|
260
|
-
N = len(coeffs)
|
|
261
|
-
|
|
262
|
-
# Compute the partial sums s_n = sum_{i=0}^n a_i * x^i for n=0,...,N-1
|
|
263
|
-
eps = numpy.zeros((N+1, N, d), dtype=numpy.complex128)
|
|
264
|
-
for i in numba.prange(d):
|
|
265
|
-
partial_sum = 0.0
|
|
266
|
-
for n in range(N):
|
|
267
|
-
partial_sum += coeffs[n] * (xn[i] ** n)
|
|
268
|
-
eps[0,n,i] = partial_sum
|
|
269
|
-
|
|
270
|
-
for i in numba.prange(d):
|
|
271
|
-
for k in range(1, N+1):
|
|
272
|
-
for j in range(N - k):
|
|
273
|
-
delta = eps[k-1, j+1,i] - eps[k-1, j,i]
|
|
274
|
-
if delta == 0:
|
|
275
|
-
rec_delta = numpy.inf
|
|
276
|
-
elif numpy.isinf(delta) or numpy.isnan(delta):
|
|
277
|
-
rec_delta = 0.0
|
|
278
|
-
else:
|
|
279
|
-
rec_delta = 1.0 / delta
|
|
280
|
-
eps[k,j,i] = rec_delta
|
|
281
|
-
if k > 1:
|
|
282
|
-
eps[k,j,i] += eps[k-2,j+1,i]
|
|
283
|
-
|
|
284
|
-
if (N % 2) == 0:
|
|
285
|
-
N -= 1
|
|
286
|
-
|
|
287
|
-
return eps[N-1, 0, :].reshape(x.shape)
|
|
288
240
|
|
|
289
241
|
# ========
|
|
290
242
|
# fit pade
|
|
291
243
|
# ========
|
|
292
244
|
|
|
293
|
-
def fit_pade(x, f, lam_m, lam_p, p=
|
|
245
|
+
def fit_pade(x, f, lam_m, lam_p, p=2, q=2, odd_side='left', pade_reg=0.0,
|
|
294
246
|
safety=1.0, max_outer=40, xtol=1e-12, ftol=1e-12, optimizer='ls',
|
|
295
247
|
verbose=0):
|
|
296
248
|
"""
|
|
@@ -301,16 +253,19 @@ def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
|
|
|
301
253
|
if not (odd_side in ['left', 'right']):
|
|
302
254
|
raise ValueError('"odd_side" can only be "left" or "right".')
|
|
303
255
|
|
|
304
|
-
|
|
305
|
-
|
|
256
|
+
# Difference between the degrees of numerator and denominator
|
|
257
|
+
dpq = p - q
|
|
258
|
+
if not (dpq in [-1, 0, 1]):
|
|
259
|
+
raise ValueError('"pade_p" and "pade_q" can only differ by "+1", ' +
|
|
260
|
+
'"0", or "-1".')
|
|
306
261
|
|
|
307
262
|
x = numpy.asarray(x, float)
|
|
308
263
|
f = numpy.asarray(f, float)
|
|
309
264
|
|
|
310
265
|
poles0 = _default_poles(q, lam_m, lam_p, safety=safety, odd_side=odd_side)
|
|
311
|
-
if q == 0 and
|
|
266
|
+
if q == 0 and dpq <= 0:
|
|
312
267
|
# c, D, resid = _inner_ls(x, f, poles0, pade_reg=pade_reg) # TEST
|
|
313
|
-
c, D, resid = _inner_ls(x, f, poles0,
|
|
268
|
+
c, D, resid = _inner_ls(x, f, poles0, dpq, pade_reg=pade_reg)
|
|
314
269
|
pade_sol = {
|
|
315
270
|
'c': c, 'D': D, 'poles': poles0, 'resid': resid,
|
|
316
271
|
'outer_iters': 0
|
|
@@ -324,10 +279,10 @@ def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
|
|
|
324
279
|
# residual
|
|
325
280
|
# --------
|
|
326
281
|
|
|
327
|
-
def residual(s,
|
|
282
|
+
def residual(s, dpq=dpq):
|
|
328
283
|
poles = _decode_poles(s, lam_m, lam_p)
|
|
329
284
|
# c, D, resid = _inner_ls(x, f, poles, pade_reg=pade_reg) # TEST
|
|
330
|
-
c, D, resid = _inner_ls(x, f, poles,
|
|
285
|
+
c, D, resid = _inner_ls(x, f, poles, dpq, pade_reg=pade_reg)
|
|
331
286
|
return _eval_rational(x, c, D, poles, resid) - f
|
|
332
287
|
|
|
333
288
|
# ----------------
|
|
@@ -374,7 +329,7 @@ def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
|
|
|
374
329
|
|
|
375
330
|
poles = _decode_poles(res.x, lam_m, lam_p)
|
|
376
331
|
# c, D, resid = _inner_ls(x, f, poles, pade_reg=pade_reg) # TEST
|
|
377
|
-
c, D, resid = _inner_ls(x, f, poles,
|
|
332
|
+
c, D, resid = _inner_ls(x, f, poles, dpq, pade_reg=pade_reg)
|
|
378
333
|
|
|
379
334
|
pade_sol = {
|
|
380
335
|
'c': c, 'D': D, 'poles': poles, 'resid': resid,
|
|
@@ -393,7 +348,7 @@ def eval_pade(z, pade_sol):
|
|
|
393
348
|
"""
|
|
394
349
|
|
|
395
350
|
# z_arr = numpy.asanyarray(z) # shape=(M,N)
|
|
396
|
-
# flat = z_arr.ravel() # shape=(M
|
|
351
|
+
# flat = z_arr.ravel() # shape=(M*N,)
|
|
397
352
|
# c, D = pade_sol['c'], pade_sol['D']
|
|
398
353
|
# poles = pade_sol['poles']
|
|
399
354
|
# resid = pade_sol['resid']
|
|
@@ -412,126 +367,5 @@ def eval_pade(z, pade_sol):
|
|
|
412
367
|
|
|
413
368
|
out = c + D*z
|
|
414
369
|
for bj, rj in zip(poles, resid):
|
|
415
|
-
out += rj/(z - bj) # each is an (N,) op, no
|
|
370
|
+
out += rj/(z - bj) # each is an (N,) op, no N*q temp
|
|
416
371
|
return out
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
# ============
|
|
420
|
-
# fit pade old
|
|
421
|
-
# ============
|
|
422
|
-
|
|
423
|
-
def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
|
|
424
|
-
S=numpy.inf, B_default=10.0, S_factor=2.0, maxiter_de=200):
|
|
425
|
-
"""
|
|
426
|
-
Fit a [p/q] rational P/Q of the form:
|
|
427
|
-
P(x) = s * prod_{i=0..p-1}(x - a_i)
|
|
428
|
-
Q(x) = prod_{j=0..q-1}(x - b_j)
|
|
429
|
-
|
|
430
|
-
Constraints:
|
|
431
|
-
a_i in [lam_m, lam_p]
|
|
432
|
-
b_j in (-infty, lam_m - delta] cup [lam_p + delta, infty)
|
|
433
|
-
|
|
434
|
-
Approach:
|
|
435
|
-
- Brute‐force all 2^q left/right assignments for denominator roots
|
|
436
|
-
- Global search with differential_evolution, fallback to zeros if needed
|
|
437
|
-
- Local refinement with least_squares
|
|
438
|
-
|
|
439
|
-
Returns a dict with keys:
|
|
440
|
-
's' : optimal scale factor
|
|
441
|
-
'a' : array of p numerator roots (in [lam_m, lam_p])
|
|
442
|
-
'b' : array of q denominator roots (outside the interval)
|
|
443
|
-
'resid' : final residual norm
|
|
444
|
-
'signs' : tuple indicating left/right pattern for each b_j
|
|
445
|
-
"""
|
|
446
|
-
|
|
447
|
-
# Determine finite bounds for DE
|
|
448
|
-
if not numpy.isfinite(B):
|
|
449
|
-
B_eff = B_default
|
|
450
|
-
else:
|
|
451
|
-
B_eff = B
|
|
452
|
-
if not numpy.isfinite(S):
|
|
453
|
-
# scale bound: S_factor * max|f| * interval width + safety
|
|
454
|
-
S_eff = S_factor * numpy.max(numpy.abs(f)) * (lam_p - lam_m) + 1.0
|
|
455
|
-
if S_eff <= 0:
|
|
456
|
-
S_eff = 1.0
|
|
457
|
-
else:
|
|
458
|
-
S_eff = S
|
|
459
|
-
|
|
460
|
-
def map_roots(signs, b):
|
|
461
|
-
"""Map unconstrained b_j -> real root outside the interval."""
|
|
462
|
-
out = numpy.empty_like(b)
|
|
463
|
-
for j, (s_val, bj) in enumerate(zip(signs, b)):
|
|
464
|
-
if s_val > 0:
|
|
465
|
-
out[j] = lam_p + delta + numpy.exp(bj)
|
|
466
|
-
else:
|
|
467
|
-
out[j] = lam_m - delta - numpy.exp(bj)
|
|
468
|
-
return out
|
|
469
|
-
|
|
470
|
-
best = {'resid': numpy.inf}
|
|
471
|
-
|
|
472
|
-
# Enumerate all left/right sign patterns
|
|
473
|
-
for signs in product([-1, 1], repeat=q):
|
|
474
|
-
# Residual vector for current pattern
|
|
475
|
-
def resid_vec(z):
|
|
476
|
-
s_val = z[0]
|
|
477
|
-
a = z[1:1+p]
|
|
478
|
-
b = z[1+p:]
|
|
479
|
-
P = s_val * numpy.prod(x[:, None] - a[None, :], axis=1)
|
|
480
|
-
roots_Q = map_roots(signs, b)
|
|
481
|
-
Q = numpy.prod(x[:, None] - roots_Q[None, :], axis=1)
|
|
482
|
-
return P - f * Q
|
|
483
|
-
|
|
484
|
-
def obj(z):
|
|
485
|
-
r = resid_vec(z)
|
|
486
|
-
return r.dot(r)
|
|
487
|
-
|
|
488
|
-
# Build bounds for DE
|
|
489
|
-
bounds = []
|
|
490
|
-
bounds.append((-S_eff, S_eff)) # s
|
|
491
|
-
bounds += [(lam_m, lam_p)] * p # a_i
|
|
492
|
-
bounds += [(-B_eff, B_eff)] * q # b_j
|
|
493
|
-
|
|
494
|
-
# 1) Global search
|
|
495
|
-
try:
|
|
496
|
-
de = differential_evolution(obj, bounds,
|
|
497
|
-
maxiter=maxiter_de,
|
|
498
|
-
polish=False)
|
|
499
|
-
z0 = de.x
|
|
500
|
-
except ValueError:
|
|
501
|
-
# fallback: start at zeros
|
|
502
|
-
z0 = numpy.zeros(1 + p + q)
|
|
503
|
-
|
|
504
|
-
# 2) Local refinement
|
|
505
|
-
ls = least_squares(resid_vec, z0, xtol=1e-12, ftol=1e-12)
|
|
506
|
-
|
|
507
|
-
rnorm = numpy.linalg.norm(resid_vec(ls.x))
|
|
508
|
-
if rnorm < best['resid']:
|
|
509
|
-
best.update(resid=rnorm, signs=signs, x=ls.x.copy())
|
|
510
|
-
|
|
511
|
-
# Unpack best solution
|
|
512
|
-
z_best = best['x']
|
|
513
|
-
s_opt = z_best[0]
|
|
514
|
-
a_opt = z_best[1:1+p]
|
|
515
|
-
b_opt = map_roots(best['signs'], z_best[1+p:])
|
|
516
|
-
|
|
517
|
-
return {
|
|
518
|
-
's': s_opt,
|
|
519
|
-
'a': a_opt,
|
|
520
|
-
'b': b_opt,
|
|
521
|
-
'resid': best['resid'],
|
|
522
|
-
'signs': best['signs'],
|
|
523
|
-
}
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
# =============
|
|
527
|
-
# eval pade old
|
|
528
|
-
# =============
|
|
529
|
-
|
|
530
|
-
def eval_pade_old(z, s, a, b):
|
|
531
|
-
"""
|
|
532
|
-
"""
|
|
533
|
-
|
|
534
|
-
Pz = s * numpy.prod([z - aj for aj in a], axis=0)
|
|
535
|
-
Qz = numpy.prod([z - bj for bj in b], axis=0)
|
|
536
|
-
|
|
537
|
-
return Pz / Qz
|
|
@@ -20,7 +20,7 @@ import matplotlib.ticker as ticker
|
|
|
20
20
|
import matplotlib.gridspec as gridspec
|
|
21
21
|
|
|
22
22
|
__all__ = ['plot_fit', 'plot_density', 'plot_hilbert', 'plot_stieltjes',
|
|
23
|
-
'plot_stieltjes_on_disk']
|
|
23
|
+
'plot_stieltjes_on_disk', 'plot_samples']
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
# ========
|
|
@@ -34,7 +34,17 @@ def plot_fit(psi, x_supp, g_supp, g_supp_approx, support, latex=False,
|
|
|
34
34
|
|
|
35
35
|
with texplot.theme(use_latex=latex):
|
|
36
36
|
|
|
37
|
-
|
|
37
|
+
if g_supp is None:
|
|
38
|
+
figsize = (4.5, 3)
|
|
39
|
+
ncols = 1
|
|
40
|
+
else:
|
|
41
|
+
figsize = (9, 3)
|
|
42
|
+
ncols = 2
|
|
43
|
+
|
|
44
|
+
fig, ax = plt.subplots(figsize=figsize, ncols=ncols)
|
|
45
|
+
|
|
46
|
+
if g_supp is None:
|
|
47
|
+
ax = [ax]
|
|
38
48
|
|
|
39
49
|
# Plot psi
|
|
40
50
|
n = numpy.arange(1, 1+psi.size)
|
|
@@ -46,22 +56,24 @@ def plot_fit(psi, x_supp, g_supp, g_supp_approx, support, latex=False,
|
|
|
46
56
|
ax[0].set_yscale('log')
|
|
47
57
|
|
|
48
58
|
# Plot pade
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
59
|
+
if g_supp is not None:
|
|
60
|
+
lam_m, lam_p = support
|
|
61
|
+
g_supp_min = numpy.min(g_supp)
|
|
62
|
+
g_supp_max = numpy.max(g_supp)
|
|
63
|
+
g_supp_dif = g_supp_max - g_supp_min
|
|
64
|
+
g_min = g_supp_min - g_supp_dif * 1.1
|
|
65
|
+
g_max = g_supp_max + g_supp_dif * 1.1
|
|
66
|
+
|
|
67
|
+
ax[1].plot(x_supp, g_supp, color='firebrick',
|
|
68
|
+
label=r'$2 \pi \times $ Hilbert Transform')
|
|
69
|
+
ax[1].plot(x_supp, g_supp_approx, color='black',
|
|
70
|
+
label='Pade estimate')
|
|
71
|
+
ax[1].legend(fontsize='small')
|
|
72
|
+
ax[1].set_xlim([lam_m, lam_p])
|
|
73
|
+
ax[1].set_ylim([g_min, g_max])
|
|
74
|
+
ax[1].set_title('Approximation of Glue Function')
|
|
75
|
+
ax[1].set_xlabel(r'$x$')
|
|
76
|
+
ax[1].set_ylabel(r'$G(x)$')
|
|
65
77
|
|
|
66
78
|
plt.tight_layout()
|
|
67
79
|
|
|
@@ -129,7 +141,7 @@ def _auto_bins(array, method='scott', factor=5):
|
|
|
129
141
|
num_bins = int(numpy.ceil(numpy.log2(len(array)) + 1))
|
|
130
142
|
|
|
131
143
|
else:
|
|
132
|
-
raise
|
|
144
|
+
raise NotImplementedError('"method" is invalid.')
|
|
133
145
|
|
|
134
146
|
return num_bins * factor
|
|
135
147
|
|
|
@@ -139,7 +151,7 @@ def _auto_bins(array, method='scott', factor=5):
|
|
|
139
151
|
# ============
|
|
140
152
|
|
|
141
153
|
def plot_density(x, rho, eig=None, support=None, label='',
|
|
142
|
-
title='Spectral
|
|
154
|
+
title='Spectral Density', latex=False, save=False):
|
|
143
155
|
"""
|
|
144
156
|
"""
|
|
145
157
|
|
|
@@ -147,8 +159,11 @@ def plot_density(x, rho, eig=None, support=None, label='',
|
|
|
147
159
|
|
|
148
160
|
fig, ax = plt.subplots(figsize=(6, 2.7))
|
|
149
161
|
|
|
150
|
-
if
|
|
151
|
-
|
|
162
|
+
if eig is not None:
|
|
163
|
+
if support is not None:
|
|
164
|
+
lam_m, lam_p = support
|
|
165
|
+
else:
|
|
166
|
+
lam_m, lam_p = min(eig), max(eig)
|
|
152
167
|
bins = numpy.linspace(lam_m, lam_p, _auto_bins(eig))
|
|
153
168
|
_ = ax.hist(eig, bins, density=True, color='silver',
|
|
154
169
|
edgecolor='none', label='Histogram')
|
|
@@ -15,7 +15,7 @@ from scipy.integrate import cumulative_trapezoid
|
|
|
15
15
|
from scipy.interpolate import PchipInterpolator
|
|
16
16
|
from scipy.stats import qmc
|
|
17
17
|
|
|
18
|
-
__all__ = ['
|
|
18
|
+
__all__ = ['sample']
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
# =============
|
|
@@ -27,68 +27,104 @@ def _quantile_func(x, rho, clamp=1e-4, eps=1e-8):
|
|
|
27
27
|
Construct a quantile function from evaluations of an estimated density
|
|
28
28
|
on a grid (x, rho(x)).
|
|
29
29
|
"""
|
|
30
|
+
|
|
30
31
|
rho_clamp = rho.copy()
|
|
31
32
|
rho_clamp[rho < clamp] = eps
|
|
32
33
|
cdf = cumulative_trapezoid(rho_clamp, x, initial=0)
|
|
33
34
|
cdf /= cdf[-1]
|
|
34
|
-
|
|
35
|
+
cdf_inv = PchipInterpolator(cdf, x, extrapolate=False)
|
|
36
|
+
|
|
37
|
+
return cdf_inv
|
|
35
38
|
|
|
36
39
|
|
|
37
|
-
#
|
|
38
|
-
#
|
|
39
|
-
#
|
|
40
|
+
# ======
|
|
41
|
+
# sample
|
|
42
|
+
# ======
|
|
40
43
|
|
|
41
|
-
def
|
|
44
|
+
def sample(x, rho, num_pts, method='qmc', seed=None):
|
|
42
45
|
"""
|
|
43
|
-
Low-discrepancy sampling from
|
|
44
|
-
Quasi-Monte Carlo.
|
|
46
|
+
Low-discrepancy sampling from density estimate.
|
|
45
47
|
|
|
46
48
|
Parameters
|
|
47
49
|
----------
|
|
48
|
-
x : numpy.array, shape (n,)
|
|
49
|
-
Sorted abscissae at which the density has been evaluated.
|
|
50
50
|
|
|
51
|
-
|
|
51
|
+
x : numpy.array
|
|
52
|
+
Sorted abscissae at which the density has been evaluated. Shape `(n,)`.
|
|
53
|
+
|
|
54
|
+
rho : numpy.array
|
|
52
55
|
Density values corresponding to `x`. Must be non-negative and define
|
|
53
56
|
a valid probability density (i.e., integrate to 1 over the support).
|
|
57
|
+
Shape `(n,)`.
|
|
54
58
|
|
|
55
59
|
num_pts : int
|
|
56
60
|
Number of sample points to generate from the density estimate.
|
|
57
61
|
|
|
62
|
+
method : {``'mc'``, ``'qmc'``}, default= ``'qmc'``
|
|
63
|
+
Method of drawing samples from uniform distribution:
|
|
64
|
+
|
|
65
|
+
* ``'mc'``: Monte Carlo
|
|
66
|
+
* ``'qmc'``: Quasi Monte Carlo
|
|
67
|
+
|
|
58
68
|
seed : int, default=None
|
|
59
69
|
Seed for random number generator
|
|
60
70
|
|
|
61
71
|
Returns
|
|
62
72
|
-------
|
|
73
|
+
|
|
63
74
|
samples : numpy.array, shape (num_pts,)
|
|
64
75
|
Samples drawn from the estimated density using a one-dimensional Halton
|
|
65
76
|
sequence mapped through the estimated quantile function.
|
|
66
77
|
|
|
67
78
|
See Also
|
|
68
79
|
--------
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
80
|
+
|
|
81
|
+
freealg.supp
|
|
82
|
+
freealg.kde
|
|
83
|
+
|
|
84
|
+
Notes
|
|
85
|
+
-----
|
|
86
|
+
|
|
87
|
+
The underlying Quasi-Monte Carlo engine uses ``scipy.stats.qmc.Halton``
|
|
88
|
+
function for generating low-discrepancy points.
|
|
72
89
|
|
|
73
90
|
Examples
|
|
74
91
|
--------
|
|
92
|
+
|
|
75
93
|
.. code-block:: python
|
|
94
|
+
:emphasize-lines: 8
|
|
76
95
|
|
|
77
96
|
>>> import numpy
|
|
78
|
-
>>> from
|
|
97
|
+
>>> from freealg import sample
|
|
98
|
+
|
|
99
|
+
>>> # density of Beta(3,1) on [0,1]
|
|
79
100
|
>>> x = numpy.linspace(0, 1, 200)
|
|
80
|
-
>>> rho = 3 * x**2
|
|
81
|
-
|
|
101
|
+
>>> rho = 3 * x**2
|
|
102
|
+
|
|
103
|
+
>>> samples = sample(x, rho, num_pts=1000, method='qmc')
|
|
82
104
|
>>> assert samples.shape == (1000,)
|
|
105
|
+
|
|
83
106
|
>>> # Empirical mean should be close to 3/4
|
|
84
107
|
>>> numpy.allclose(samples.mean(), 0.75, atol=0.02)
|
|
85
108
|
"""
|
|
86
109
|
|
|
87
|
-
|
|
88
|
-
numpy.random.rand(seed)
|
|
89
|
-
|
|
110
|
+
rng = numpy.random.default_rng(seed)
|
|
90
111
|
quantile = _quantile_func(x, rho)
|
|
91
|
-
|
|
92
|
-
|
|
112
|
+
|
|
113
|
+
# Draw from uniform distribution
|
|
114
|
+
if method == 'mc':
|
|
115
|
+
u = rng.random(num_pts)
|
|
116
|
+
|
|
117
|
+
elif method == 'qmc':
|
|
118
|
+
try:
|
|
119
|
+
engine = qmc.Halton(d=1, scramble=True, rng=rng)
|
|
120
|
+
except TypeError:
|
|
121
|
+
engine = qmc.Halton(d=1, scramble=True, seed=rng)
|
|
122
|
+
u = engine.random(num_pts).ravel()
|
|
123
|
+
|
|
124
|
+
else:
|
|
125
|
+
raise NotImplementedError('"method" is invalid.')
|
|
126
|
+
|
|
127
|
+
# Draw from distribution by mapping from inverse CDF
|
|
93
128
|
samples = quantile(u)
|
|
129
|
+
|
|
94
130
|
return samples.ravel()
|