freealg 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
freealg/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.0.3"
1
+ __version__ = "0.1.0"
freealg/_pade.py CHANGED
@@ -12,18 +12,342 @@
12
12
  # =======
13
13
 
14
14
  import numpy
15
+ from numpy.linalg import lstsq
15
16
  from itertools import product
16
17
  from scipy.optimize import least_squares, differential_evolution
17
18
 
18
19
  __all__ = ['fit_pade', 'eval_pade']
19
20
 
20
21
 
22
+ # =============
23
+ # default poles
24
+ # =============
25
+
26
+ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
27
+ """
28
+ Generate q real poles outside [lam_m, lam_p].
29
+
30
+ • even q : q/2 on each side (Chebyshev-like layout)
31
+ • odd q : (q+1)/2 on the *left*, (q–1)/2 on the right
32
+ so q=1 => single pole on whichever side `odd_side` says.
33
+
34
+ safety >= 1: 1, then poles start half an interval away; >1 pushes them
35
+ farther.
36
+ """
37
+
38
+ if q == 0:
39
+ return numpy.empty(0)
40
+
41
+ Delta = 0.5 * (lam_p - lam_m)
42
+
43
+ # Decide how many poles on each side. m_L and m_R determine how many poles
44
+ # to be on the left and right of the support interval.
45
+ if q % 2 == 0:
46
+ m_L = m_R = q // 2
47
+ else:
48
+ if odd_side == 'left':
49
+ m_L = (q + 1) // 2
50
+ m_R = q // 2
51
+ else:
52
+ m_L = q // 2
53
+ m_R = (q + 1) // 2
54
+
55
+ # Chebyshev-extrema offsets (all positive)
56
+ kL = numpy.arange(m_L)
57
+ tL = (2 * kL + 1) * numpy.pi / (2 * m_L)
58
+ offsL = safety * Delta * (1 + numpy.cos(tL))
59
+
60
+ kR = numpy.arange(m_R)
61
+ tR = (2 * kR + 1) * numpy.pi / (2 * m_R + (m_R == 0))
62
+ offsR = safety * Delta * (1 + numpy.cos(tR))
63
+
64
+ left = lam_m - offsL
65
+ right = lam_p + offsR
66
+
67
+ return numpy.sort(numpy.concatenate([left, right]))
68
+
69
+
70
+ # ============
71
+ # encode poles
72
+ # ============
73
+
74
+ def _encode_poles(a, lam_m, lam_p):
75
+ """
76
+ Map real pole a_j → unconstrained s_j,
77
+ so that the default left-of-interval pole stays left.
78
+ """
79
+
80
+ # half-width of the interval
81
+ d = 0.5 * (lam_p - lam_m)
82
+ # if a < lam_m, we want s ≥ 0; if a > lam_p, s < 0
83
+ return numpy.where(
84
+ a < lam_m,
85
+ numpy.log((lam_m - a) / d), # zero at a = lam_m - d
86
+ -numpy.log((a - lam_p) / d) # zero at a = lam_p + d
87
+ )
88
+
89
+
90
+ # ============
91
+ # decode poles
92
+ # ============
93
+
94
+ def _decode_poles(s, lam_m, lam_p):
95
+ """
96
+ Inverse map s_j → real pole a_j outside the interval.
97
+ """
98
+
99
+ d = 0.5 * (lam_p - lam_m)
100
+ return numpy.where(
101
+ s >= 0,
102
+ lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m−d (left)
103
+ lam_p + d * numpy.exp(-s) # maps s=0 to a=lam_p+d (right)
104
+ )
105
+
106
+
107
+ # ========
108
+ # inner ls
109
+ # ========
110
+
111
+ # def _inner_ls(x, f, poles): # TEST
112
+ def _inner_ls(x, f, poles, p=1):
113
+ """
114
+ This is the inner least square (blazing fast).
115
+ """
116
+
117
+ if poles.size == 0 and p == -1:
118
+ return 0.0, 0.0, numpy.empty(0)
119
+
120
+ if poles.size == 0: # q = 0
121
+ # A = numpy.column_stack((numpy.ones_like(x), x))
122
+ cols = [numpy.ones_like(x)] if p >= 0 else []
123
+ if p == 1:
124
+ cols.append(x)
125
+ A = numpy.column_stack(cols)
126
+ # ---
127
+ theta, *_ = lstsq(A, f, rcond=None)
128
+ # c, D = theta # TEST
129
+ if p == -1:
130
+ c = 0.0
131
+ D = 0.0
132
+ resid = numpy.empty(0)
133
+ elif p == 0:
134
+ c = theta[0]
135
+ D = 0.0
136
+ resid = numpy.empty(0)
137
+ else: # p == 1
138
+ c, D = theta
139
+ resid = numpy.empty(0)
140
+ else:
141
+ # phi = 1.0 / (x[:, None] - poles[None, :])
142
+ # # A = numpy.column_stack((numpy.ones_like(x), x, phi)) # TEST
143
+ # # theta, *_ = lstsq(A, f, rcond=None)
144
+ # # c, D, resid = theta[0], theta[1], theta[2:]
145
+ # phi = 1.0 / (x[:, None] - poles[None, :])
146
+ # cols = [numpy.ones_like(x)] if p >= 0 else []
147
+ # if p == 1:
148
+ # cols.append(x)
149
+ # cols.append(phi)
150
+ # A = numpy.column_stack(cols)
151
+ # theta, *_ = lstsq(A, f, rcond=None)
152
+ # if p == -1:
153
+ # c = 0.0
154
+ # D = 0.0
155
+ # resid = theta
156
+ # elif p == 0:
157
+ # c = theta[0]
158
+ # D = 0.0
159
+ # resid = theta[1:]
160
+ # else: # p == 1
161
+ # c = theta[0]
162
+ # D = theta[1]
163
+ # resid = theta[2:]
164
+
165
+ phi = 1.0 / (x[:, None] - poles[None, :])
166
+ cols = [numpy.ones_like(x)] if p >= 0 else []
167
+ if p == 1:
168
+ cols.append(x)
169
+ cols.append(phi)
170
+
171
+ A = numpy.column_stack(cols)
172
+ theta, *_ = lstsq(A, f, rcond=None)
173
+
174
+ if p == -1:
175
+ c, D, resid = 0.0, 0.0, theta
176
+ elif p == 0:
177
+ c, D, resid = theta[0], 0.0, theta[1:]
178
+ else: # p == 1
179
+ c, D, resid = theta[0], theta[1], theta[2:]
180
+
181
+ return c, D, resid
182
+
183
+
184
+ # =============
185
+ # eval rational
186
+ # =============
187
+
188
+ def _eval_rational(z, c, D, poles, resid):
189
+ """
190
+ """
191
+
192
+ # z = z[:, None]
193
+ # if poles.size == 0:
194
+ # term = 0.0
195
+ # else:
196
+ # term = numpy.sum(resid / (z - poles), axis=1)
197
+ #
198
+ # return c + D * z.ravel() + term
199
+
200
+ # ensure z is a 1-D array
201
+ z = numpy.asarray(z)
202
+ z_col = z[:, None]
203
+
204
+ if poles.size == 0:
205
+ term = 0.0
206
+ else:
207
+ term = numpy.sum(resid / (z_col - poles[None, :]), axis=1)
208
+
209
+ return c + D * z + term
210
+
211
+
21
212
  # ========
22
213
  # fit pade
23
214
  # ========
24
215
 
25
- def fit_pade(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf, S=numpy.inf,
26
- B_default=10.0, S_factor=2.0, maxiter_de=200):
216
+ def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', safety=1.0,
217
+ max_outer=40, xtol=1e-12, ftol=1e-12, optimizer='ls', verbose=0):
218
+ """
219
+ This is the outer optimiser.
220
+
221
+ Fits G(x) = (p>=1 ? c : 0) + (p==1 ? D x : 0) + sum r_j/(x - a_j) # TEST
222
+ """
223
+
224
+ # Checks
225
+ if not (odd_side in ['left', 'right']):
226
+ raise ValueError('"odd_side" can only be "left" or "right".')
227
+
228
+ if not (p in [-1, 0, 1]):
229
+ raise ValueError('"pade_p" can only be -1, 0, or 1.')
230
+
231
+ x = numpy.asarray(x, float)
232
+ f = numpy.asarray(f, float)
233
+
234
+ poles0 = _default_poles(q, lam_m, lam_p, safety=safety, odd_side=odd_side)
235
+ # if q == 0: # nothing to optimise
236
+ if q == 0 and p <= 0:
237
+ # c, D, resid = _inner_ls(x, f, poles0) # TEST
238
+ c, D, resid = _inner_ls(x, f, poles0, p)
239
+ pade_sol = {
240
+ 'c': c, 'D': D, 'poles': poles0, 'resid': resid,
241
+ 'outer_iters': 0
242
+ }
243
+
244
+ return pade_sol
245
+
246
+ s0 = _encode_poles(poles0, lam_m, lam_p)
247
+
248
+ # --------
249
+ # residual
250
+ # --------
251
+
252
+ # def residual(s): # TEST
253
+ def residual(s, p=p):
254
+ poles = _decode_poles(s, lam_m, lam_p)
255
+ # c, D, resid = _inner_ls(x, f, poles) # TEST
256
+ c, D, resid = _inner_ls(x, f, poles, p)
257
+ return _eval_rational(x, c, D, poles, resid) - f
258
+
259
+ # ----------------
260
+
261
+ # Optimizer
262
+ if optimizer == 'ls':
263
+ # scale = numpy.maximum(1.0, numpy.abs(s0))
264
+ res = least_squares(residual, s0,
265
+ method='trf',
266
+ # method='lm',
267
+ # x_scale=scale,
268
+ max_nfev=max_outer, xtol=xtol, ftol=ftol,
269
+ verbose=verbose)
270
+
271
+ elif optimizer == 'de':
272
+
273
+ # Bounds
274
+ # span = lam_p - lam_m
275
+ # B = 3.0 # multiples of span
276
+ # L = numpy.log(B * span)
277
+ # bounds = [(-L, L)] * len(s0)
278
+
279
+ d = 0.5*(lam_p - lam_m)
280
+ # the minimum factor so that lam_m - d*exp(s)=0 is exp(s)=lam_m/d
281
+ min_factor = lam_m/d
282
+ B = max(10.0, min_factor*10.0)
283
+ L = numpy.log(B)
284
+ bounds = [(-L, L)] * len(s0)
285
+
286
+ # Global stage
287
+ glob = differential_evolution(lambda s: numpy.sum(residual(s)**2),
288
+ bounds, maxiter=50, popsize=10,
289
+ polish=False)
290
+
291
+ # local polish
292
+ res = least_squares(
293
+ residual, glob.x,
294
+ method='lm',
295
+ max_nfev=max_outer, xtol=xtol, ftol=ftol,
296
+ verbose=verbose)
297
+
298
+ else:
299
+ raise RuntimeError('"optimizer" is invalid.')
300
+
301
+ poles = _decode_poles(res.x, lam_m, lam_p)
302
+ # c, D, resid = _inner_ls(x, f, poles) # TEST
303
+ c, D, resid = _inner_ls(x, f, poles, p)
304
+
305
+ pade_sol = {
306
+ 'c': c, 'D': D, 'poles': poles, 'resid': resid,
307
+ 'outer_iters': res.nfev
308
+ }
309
+
310
+ return pade_sol
311
+
312
+
313
+ # =========
314
+ # eval pade
315
+ # =========
316
+
317
+ def eval_pade(z, pade_sol):
318
+ """
319
+ """
320
+
321
+ # z_arr = numpy.asanyarray(z) # shape=(M,N)
322
+ # flat = z_arr.ravel() # shape=(M·N,)
323
+ # c, D = pade_sol['c'], pade_sol['D']
324
+ # poles = pade_sol['poles']
325
+ # resid = pade_sol['resid']
326
+ #
327
+ # # _eval_rational takes a 1-D array of z's and returns 1-D outputs
328
+ # flat_out = _eval_rational(flat, c, D, poles, resid)
329
+ #
330
+ # # restore the original shape
331
+ # out = flat_out.reshape(z_arr.shape) # shape=(M,N)
332
+ #
333
+ # return out
334
+
335
+ z = numpy.asanyarray(z) # complex or real, any shape
336
+ c, D = pade_sol['c'], pade_sol['D']
337
+ poles, resid = pade_sol['poles'], pade_sol['resid']
338
+
339
+ out = c + D*z
340
+ for bj, rj in zip(poles, resid):
341
+ out += rj/(z - bj) # each is an (N,) op, no N×q temp
342
+ return out
343
+
344
+
345
+ # ============
346
+ # fit pade old
347
+ # ============
348
+
349
+ def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
350
+ S=numpy.inf, B_default=10.0, S_factor=2.0, maxiter_de=200):
27
351
  """
28
352
  Fit a [p/q] rational P/Q of the form:
29
353
  P(x) = s * prod_{i=0..p-1}(x - a_i)
@@ -125,11 +449,11 @@ def fit_pade(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf, S=numpy.inf,
125
449
  }
126
450
 
127
451
 
128
- # =========
129
- # eval pade
130
- # =========
452
+ # =============
453
+ # eval pade old
454
+ # =============
131
455
 
132
- def eval_pade(z, s, a, b):
456
+ def eval_pade_old(z, s, a, b):
133
457
  """
134
458
  """
135
459
 
freealg/_plot_util.py CHANGED
@@ -23,34 +23,6 @@ __all__ = ['plot_fit', 'plot_density', 'plot_hilbert', 'plot_stieltjes',
23
23
  'plot_stieltjes_on_disk']
24
24
 
25
25
 
26
- # ==============
27
- # plot coeff fit
28
- # ==============
29
-
30
- def plot_coeff_fit(psi, latex=False, save=False):
31
- """
32
- """
33
-
34
- with texplot.theme(use_latex=latex):
35
-
36
- fig, ax = plt.subplots(figsize=(6, 2.7))
37
-
38
- # Save
39
- if save is False:
40
- save_status = False
41
- save_filename = ''
42
- else:
43
- save_status = True
44
- if isinstance(save, str):
45
- save_filename = save
46
- else:
47
- save_filename = 'energy.pdf'
48
-
49
- texplot.show_or_save_plot(plt, default_filename=save_filename,
50
- transparent_background=True, dpi=400,
51
- show_and_save=save_status, verbose=True)
52
-
53
-
54
26
  # ========
55
27
  # plot fit
56
28
  # ========
@@ -400,6 +372,12 @@ def plot_stieltjes_on_disk(r, t, m1_D, m2_D, support, latex=False, save=False):
400
372
  lam_m_z = (lam_m - 1j) / (lam_m + 1j)
401
373
  theta_p = numpy.angle(lam_p_z)
402
374
  theta_n = numpy.angle(lam_m_z)
375
+
376
+ if theta_n < 0:
377
+ theta_n += 2.0 * numpy.pi
378
+ if theta_p < 0:
379
+ theta_p += 2.0 * numpy.pi
380
+
403
381
  theta_branch = numpy.linspace(theta_n, theta_p, 100)
404
382
  theta_alt_branch = numpy.linspace(theta_p, theta_n + 2*numpy.pi, 100)
405
383
  r_branch = numpy.ones_like(theta_branch)
freealg/_sample.py ADDED
@@ -0,0 +1,85 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # SPDX-FileType: SOURCE
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify it under
5
+ # the terms of the license found in the LICENSE.txt file in the root directory
6
+ # of this source tree.
7
+
8
+
9
+ # =======
10
+ # Imports
11
+ # =======
12
+
13
+ from scipy.integrate import cumulative_trapezoid
14
+ from scipy.interpolate import interp1d
15
+ from scipy.stats import qmc
16
+
17
+ __all__ = ['qmc_sample']
18
+
19
+
20
+ # =============
21
+ # quantile func
22
+ # =============
23
+
24
+ def _quantile_func(x, rho):
25
+ """
26
+ Construct a quantile function from evaluations of an estimated density
27
+ on a grid (x, rho(x)).
28
+ """
29
+ cdf = cumulative_trapezoid(rho, x, initial=0)
30
+ cdf /= cdf[-1]
31
+ return interp1d(cdf, x, bounds_error=False, assume_sorted=True)
32
+
33
+
34
+ # ==========
35
+ # qmc sample
36
+ # ==========
37
+
38
+ def qmc_sample(x, rho, num_pts):
39
+ """
40
+ Low-discrepancy sampling from a univariate density estimate using
41
+ Quasi-Monte Carlo.
42
+
43
+ Parameters
44
+ ----------
45
+ x : numpy.array, shape (n,)
46
+ Sorted abscissae at which the density has been evaluated.
47
+
48
+ rho : numpy.array, shape (n,)
49
+ Density values corresponding to `x`. Must be non-negative and define
50
+ a valid probability density (i.e., integrate to 1 over the support).
51
+
52
+ num_pts : int
53
+ Number of sample points to generate from the density estimate.
54
+
55
+ Returns
56
+ -------
57
+ samples : numpy.array, shape (num_pts,)
58
+ Samples drawn from the estimated density using a one-dimensional Halton
59
+ sequence mapped through the estimated quantile function.
60
+
61
+ See Also
62
+ --------
63
+ scipy.stats.qmc.Halton
64
+ Underlying Quasi-Monte Carlo engine used for generating low-discrepancy
65
+ points.
66
+
67
+ Examples
68
+ --------
69
+ .. code-block:: python
70
+
71
+ >>> import numpy
72
+ >>> from your_module import qmc_sample
73
+ >>> x = numpy.linspace(0, 1, 200)
74
+ >>> rho = 3 * x**2 # density of Beta(3,1) on [0,1]
75
+ >>> samples = qmc_sample(x, rho, num_pts=1000)
76
+ >>> assert samples.shape == (1000,)
77
+ >>> # Empirical mean should be close to 3/4
78
+ >>> numpy.allclose(samples.mean(), 0.75, atol=0.02)
79
+ """
80
+
81
+ quantile = _quantile_func(x, rho)
82
+ engine = qmc.Halton(d=1)
83
+ u = engine.random(num_pts)
84
+ samples = quantile(u)
85
+ return samples.ravel()
@@ -7,10 +7,10 @@
7
7
  # directory of this source tree.
8
8
 
9
9
  from .marchenko_pastur import MarchenkoPastur
10
- # from .wigner import Wigner
11
- # from .kesten_mckay import KestenMcKay
12
- # from .wachter import Wachter
10
+ from .wigner import Wigner
11
+ from .kesten_mckay import KestenMcKay
12
+ from .wachter import Wachter
13
13
  # from .meixner import meixner
14
14
 
15
15
  # __all__ = ['MarchenkoPastur', 'Wigner', 'KestenMcKay', 'Wachter', 'Meixner']
16
- __all__ = ['MarchenkoPastur']
16
+ __all__ = ['MarchenkoPastur', 'Wigner', 'KestenMcKay', 'Wachter']