freealg 0.1.12__tar.gz → 0.1.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {freealg-0.1.12 → freealg-0.1.14}/PKG-INFO +17 -7
  2. {freealg-0.1.12 → freealg-0.1.14}/README.rst +16 -6
  3. {freealg-0.1.12 → freealg-0.1.14}/freealg/__init__.py +2 -1
  4. freealg-0.1.14/freealg/__version__.py +1 -0
  5. {freealg-0.1.12 → freealg-0.1.14}/freealg/_chebyshev.py +4 -5
  6. freealg-0.1.14/freealg/_decompress.py +356 -0
  7. {freealg-0.1.12 → freealg-0.1.14}/freealg/_pade.py +23 -13
  8. {freealg-0.1.12 → freealg-0.1.14}/freealg/_plot_util.py +6 -3
  9. freealg-0.1.14/freealg/_support.py +186 -0
  10. {freealg-0.1.12 → freealg-0.1.14}/freealg/distributions/_kesten_mckay.py +13 -5
  11. {freealg-0.1.12 → freealg-0.1.14}/freealg/distributions/_marchenko_pastur.py +10 -2
  12. {freealg-0.1.12 → freealg-0.1.14}/freealg/distributions/_meixner.py +10 -2
  13. {freealg-0.1.12 → freealg-0.1.14}/freealg/distributions/_wachter.py +10 -2
  14. {freealg-0.1.12 → freealg-0.1.14}/freealg/distributions/_wigner.py +10 -2
  15. freealg-0.1.14/freealg/eigfree.py +120 -0
  16. {freealg-0.1.12 → freealg-0.1.14}/freealg/freeform.py +84 -60
  17. {freealg-0.1.12 → freealg-0.1.14}/freealg.egg-info/PKG-INFO +17 -7
  18. {freealg-0.1.12 → freealg-0.1.14}/freealg.egg-info/SOURCES.txt +1 -0
  19. freealg-0.1.12/freealg/__version__.py +0 -1
  20. freealg-0.1.12/freealg/_decompress.py +0 -180
  21. freealg-0.1.12/freealg/_support.py +0 -85
  22. {freealg-0.1.12 → freealg-0.1.14}/AUTHORS.txt +0 -0
  23. {freealg-0.1.12 → freealg-0.1.14}/CHANGELOG.rst +0 -0
  24. {freealg-0.1.12 → freealg-0.1.14}/LICENSE.txt +0 -0
  25. {freealg-0.1.12 → freealg-0.1.14}/MANIFEST.in +0 -0
  26. {freealg-0.1.12 → freealg-0.1.14}/freealg/_damp.py +0 -0
  27. {freealg-0.1.12 → freealg-0.1.14}/freealg/_jacobi.py +0 -0
  28. {freealg-0.1.12 → freealg-0.1.14}/freealg/_sample.py +0 -0
  29. {freealg-0.1.12 → freealg-0.1.14}/freealg/_util.py +0 -0
  30. {freealg-0.1.12 → freealg-0.1.14}/freealg/distributions/__init__.py +0 -0
  31. {freealg-0.1.12 → freealg-0.1.14}/freealg.egg-info/dependency_links.txt +0 -0
  32. {freealg-0.1.12 → freealg-0.1.14}/freealg.egg-info/not-zip-safe +0 -0
  33. {freealg-0.1.12 → freealg-0.1.14}/freealg.egg-info/requires.txt +0 -0
  34. {freealg-0.1.12 → freealg-0.1.14}/freealg.egg-info/top_level.txt +0 -0
  35. {freealg-0.1.12 → freealg-0.1.14}/pyproject.toml +0 -0
  36. {freealg-0.1.12 → freealg-0.1.14}/requirements.txt +0 -0
  37. {freealg-0.1.12 → freealg-0.1.14}/setup.cfg +0 -0
  38. {freealg-0.1.12 → freealg-0.1.14}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: freealg
3
- Version: 0.1.12
3
+ Version: 0.1.14
4
4
  Summary: Free probability for large matrices
5
5
  Keywords: leaderboard bot chat
6
6
  Platform: Linux
@@ -70,6 +70,12 @@ Dynamic: summary
70
70
  :width: 240
71
71
  :class: custom-dark
72
72
 
73
+ `Paper <https://arxiv.org/abs/2506.11994>`__ |
74
+ `Slides <https://www.dropbox.com/scl/fi/03gjuyz17k9yhsqy0isoz/free_decomporession_slides.pdf?rlkey=8f82mhciyl2ju02l7hv1md5li&st=26xmhjga&dl=0>`__ |
75
+ `Docs <https://ameli.github.io/freealg>`__
76
+
77
+ .. `Slides <https://ameli.github.io/freealg/_static/data/slides.pdf>`__ |
78
+
73
79
  *freealg* is a Python package that employs **free** probability to evaluate the spectral
74
80
  densities of large matrix **form**\ s. The fundamental algorithm employed by *freealg* is
75
81
  **free decompression**, which extrapolates from the empirical spectral densities of small
@@ -140,15 +146,19 @@ requests and bug reports.
140
146
  How to Cite
141
147
  ===========
142
148
 
143
- If you use this work, please cite the `arXiv paper <https://arxiv.org/abs/2506.11994>`.
149
+ If you use this work, please cite the `arXiv paper <https://arxiv.org/abs/2506.11994>`__.
144
150
 
145
151
  .. code::
146
152
 
147
- @article{ameli2025spectral,
148
- title={Spectral Estimation with Free Decompression},
149
- author={Siavash Ameli and Chris van der Heide and Liam Hodgkinson and Michael W. Mahoney},
150
- journal={arXiv preprint arXiv:2506.11994},
151
- year={2025}
153
+ @article{spectral2025,
154
+ title={Spectral Estimation with Free Decompression},
155
+ author={Siavash Ameli and Chris van der Heide and Liam Hodgkinson and Michael W. Mahoney},
156
+ year={2025},
157
+ eprint={2506.11994},
158
+ archivePrefix={arXiv},
159
+ primaryClass={stat.ML},
160
+ url={https://arxiv.org/abs/2506.11994},
161
+ journal={arXiv preprint arXiv:2506.11994},
152
162
  }
153
163
 
154
164
 
@@ -3,6 +3,12 @@
3
3
  :width: 240
4
4
  :class: custom-dark
5
5
 
6
+ `Paper <https://arxiv.org/abs/2506.11994>`__ |
7
+ `Slides <https://www.dropbox.com/scl/fi/03gjuyz17k9yhsqy0isoz/free_decomporession_slides.pdf?rlkey=8f82mhciyl2ju02l7hv1md5li&st=26xmhjga&dl=0>`__ |
8
+ `Docs <https://ameli.github.io/freealg>`__
9
+
10
+ .. `Slides <https://ameli.github.io/freealg/_static/data/slides.pdf>`__ |
11
+
6
12
  *freealg* is a Python package that employs **free** probability to evaluate the spectral
7
13
  densities of large matrix **form**\ s. The fundamental algorithm employed by *freealg* is
8
14
  **free decompression**, which extrapolates from the empirical spectral densities of small
@@ -73,15 +79,19 @@ requests and bug reports.
73
79
  How to Cite
74
80
  ===========
75
81
 
76
- If you use this work, please cite the `arXiv paper <https://arxiv.org/abs/2506.11994>`.
82
+ If you use this work, please cite the `arXiv paper <https://arxiv.org/abs/2506.11994>`__.
77
83
 
78
84
  .. code::
79
85
 
80
- @article{ameli2025spectral,
81
- title={Spectral Estimation with Free Decompression},
82
- author={Siavash Ameli and Chris van der Heide and Liam Hodgkinson and Michael W. Mahoney},
83
- journal={arXiv preprint arXiv:2506.11994},
84
- year={2025}
86
+ @article{spectral2025,
87
+ title={Spectral Estimation with Free Decompression},
88
+ author={Siavash Ameli and Chris van der Heide and Liam Hodgkinson and Michael W. Mahoney},
89
+ year={2025},
90
+ eprint={2506.11994},
91
+ archivePrefix={arXiv},
92
+ primaryClass={stat.ML},
93
+ url={https://arxiv.org/abs/2506.11994},
94
+ journal={arXiv preprint arXiv:2506.11994},
85
95
  }
86
96
 
87
97
 
@@ -6,7 +6,8 @@
6
6
  # under the terms of the license found in the LICENSE.txt file in the root
7
7
  # directory of this source tree.
8
8
 
9
- from .freeform import FreeForm, eigfree
9
+ from .freeform import FreeForm
10
+ from .eigfree import eigfree
10
11
  from . import distributions
11
12
 
12
13
  __all__ = ['FreeForm', 'distributions', 'eigfree']
@@ -0,0 +1 @@
1
+ __version__ = "0.1.14"
@@ -58,7 +58,6 @@ def chebyshev_sample_proj(eig, support, K=10, reg=0.0):
58
58
 
59
59
  # Map to [–1,1] interval
60
60
  t = (2 * eig - (lam_m + lam_p)) / (lam_p - lam_m)
61
- N = eig.size
62
61
 
63
62
  # Inner‐product norm of each U_k under w(t) = sqrt{1–t^2} is \\pi/2
64
63
  norm = numpy.pi / 2
@@ -104,7 +103,7 @@ def chebyshev_kernel_proj(xs, pdf, support, K=10, reg=0.0):
104
103
 
105
104
  for k in range(K + 1):
106
105
  Pk = eval_chebyu(k, t) # U_k(t) on the grid
107
- moment = numpy.trapezoid(Pk * pdf, xs) # \int U_k(t) \rho(x) dx
106
+ moment = numpy.trapezoid(Pk * pdf, xs) # \int U_k(t) \rho(x) dx
108
107
 
109
108
  if k == 0:
110
109
  penalty = 0
@@ -226,12 +225,12 @@ def chebyshev_stieltjes(z, psi, support):
226
225
  S = wynn_pade(psi_zero, J)
227
226
 
228
227
  # build powers J^(k+1) for k=0..K
229
- #K = len(psi) - 1
228
+ # K = len(psi) - 1
230
229
  # shape: (..., K+1)
231
- #Jpow = J[..., None] ** numpy.arange(1, K+2)
230
+ # Jpow = J[..., None] ** numpy.arange(1, K+2)
232
231
 
233
232
  # sum psi_k * J^(k+1)
234
- #S = numpy.sum(psi * Jpow, axis=-1)
233
+ # S = numpy.sum(psi * Jpow, axis=-1)
235
234
 
236
235
  # assemble m(z)
237
236
  m_z = -2 / span * numpy.pi * S
@@ -0,0 +1,356 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # SPDX-FileType: SOURCE
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify it under
5
+ # the terms of the license found in the LICENSE.txt file in the root directory
6
+ # of this source tree.
7
+
8
+
9
+ # =======
10
+ # Imports
11
+ # =======
12
+
13
+ import numpy
14
+
15
+ __all__ = ['decompress', 'reverse_characteristics']
16
+
17
+
18
+ # =============
19
+ # secant method
20
+ # =============
21
+
22
+
23
+ def secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
24
+ alpha=0.5, max_bt=2, eps=1e-30, step_factor=5.0,
25
+ post_smooth=True, jump_tol=10.0, verbose=False):
26
+ """
27
+ Solves :math:``f(z) = a`` for many starting points simultaneously
28
+ using the secant method in the complex plane.
29
+
30
+ Parameters
31
+ ----------
32
+ f : callable
33
+ Function that accepts and returns complex `ndarray`s.
34
+
35
+ z0, z1 : array_like
36
+ Two initial guesses. ``z1`` may be broadcast to ``z0``.
37
+
38
+ a : complex or array_like, optional
39
+ Right‑hand‑side targets (broadcasted to ``z0``). Defaults to ``0+0j``.
40
+
41
+ tol : float, optional
42
+ Convergence criterion on ``|f(z) - a|``. Defaults to ``1e-12``.
43
+
44
+ max_iter : int, optional
45
+ Maximum number of secant iterations. Defaults to ``100``.
46
+
47
+ alpha : float, optional
48
+ Back‑tracking shrink factor (``0 < alpha < 1``). Defaults to ``0.5``.
49
+
50
+ max_bt : int, optional
51
+ Maximum back‑tracking trials per iteration. Defaults to ``0``.
52
+
53
+ eps : float, optional
54
+ Safeguard added to tiny denominators. Defaults to ``1e-30``.
55
+
56
+ post_smooth : bool, optional
57
+ If True (default) run a single vectorised clean-up pass that
58
+ re-solves points whose final root differs from the *nearest*
59
+ neighbour by more than ``jump_tol`` times the local median jump.
60
+
61
+ jump_tol : float, optional
62
+ Sensitivity of the clean-up pass; larger tolerance implies fewer
63
+ re-solves.
64
+
65
+ verbose : bool, optional
66
+ If *True*, prints progress every 10 iterations.
67
+
68
+ Returns
69
+ -------
70
+ roots : ndarray
71
+ Estimated roots, shaped like the broadcast inputs.
72
+ residuals : ndarray
73
+ Final residuals ``|f(root) - a|``.
74
+ iterations : ndarray
75
+ Iteration count for each point.
76
+ """
77
+
78
+ # Broadcast inputs
79
+ z0, z1, a = numpy.broadcast_arrays(
80
+ numpy.asarray(z0, numpy.complex128),
81
+ numpy.asarray(z1, numpy.complex128),
82
+ numpy.asarray(a, numpy.complex128),
83
+ )
84
+ orig_shape = z0.shape
85
+ z0, z1, a = (x.ravel() for x in (z0, z1, a))
86
+
87
+ n_points = z0.size
88
+ roots = z1.copy()
89
+ iterations = numpy.zeros(n_points, dtype=int)
90
+
91
+ f0 = f(z0) - a
92
+ f1 = f(z1) - a
93
+ residuals = numpy.abs(f1)
94
+ converged = residuals < tol
95
+
96
+ # Entering main loop
97
+ for k in range(max_iter):
98
+ active = ~converged
99
+ if not active.any():
100
+ break
101
+
102
+ # Secant step
103
+ denom = f1 - f0
104
+ denom = numpy.where(numpy.abs(denom) < eps, denom + eps, denom)
105
+ dz = (z1 - z0) * f1 / denom
106
+
107
+ # Step-size limiter
108
+ prev_step = numpy.maximum(numpy.abs(z1 - z0), eps)
109
+ max_step = step_factor * prev_step
110
+ big = numpy.abs(dz) > max_step
111
+ dz[big] *= max_step[big] / numpy.abs(dz[big])
112
+
113
+ z2 = z1 - dz
114
+ f2 = f(z2) - a
115
+
116
+ # Line search by backtracking
117
+ worse = (numpy.abs(f2) >= numpy.abs(f1)) & active
118
+ if worse.any():
119
+ shrink = numpy.ones_like(dz)
120
+ for _ in range(max_bt):
121
+ shrink[worse] *= alpha
122
+ z_try = z1[worse] - shrink[worse] * dz[worse]
123
+ f_try = f(z_try) - a[worse]
124
+
125
+ improved = numpy.abs(f_try) < numpy.abs(f1[worse])
126
+ if not improved.any():
127
+ continue
128
+
129
+ idx = numpy.flatnonzero(worse)[improved]
130
+ z2[idx], f2[idx] = z_try[improved], f_try[improved]
131
+ worse[idx] = False
132
+ if not worse.any():
133
+ break
134
+
135
+ # Book‑keeping
136
+ newly_conv = (numpy.abs(f2) < tol) & active
137
+ converged[newly_conv] = True
138
+ iterations[newly_conv] = k + 1
139
+ roots[newly_conv] = z2[newly_conv]
140
+ residuals[newly_conv] = numpy.abs(f2[newly_conv])
141
+
142
+ still = active & ~newly_conv
143
+ z0[still], z1[still] = z1[still], z2[still]
144
+ f0[still], f1[still] = f1[still], f2[still]
145
+
146
+ if verbose and k % 10 == 0:
147
+ print(f"Iter {k}: {converged.sum()} / {n_points} converged")
148
+
149
+ # Non‑converged points
150
+ remaining = ~converged
151
+ roots[remaining] = z1[remaining]
152
+ residuals[remaining] = numpy.abs(f1[remaining])
153
+ iterations[remaining] = max_iter
154
+
155
+ # Optional clean-up pass
156
+ if post_smooth and n_points > 2:
157
+ # absolute jump to *nearest* neighbour (left or right)
158
+ diff_left = numpy.empty_like(roots)
159
+ diff_right = numpy.empty_like(roots)
160
+ diff_left[1:] = numpy.abs(roots[1:] - roots[:-1])
161
+ diff_right[:-1] = numpy.abs(roots[:-1] - roots[1:])
162
+ jump = numpy.minimum(diff_left, diff_right)
163
+
164
+ # ignore unconverged points
165
+ median_jump = numpy.median(jump[~remaining])
166
+ bad = (jump > jump_tol * median_jump) & ~remaining
167
+
168
+ if bad.any():
169
+ z_first_all = numpy.where(bad & (diff_left <= diff_right),
170
+ roots - diff_left,
171
+ roots + diff_right)
172
+
173
+ # keep only the offending indices
174
+ z_first = z_first_all[bad]
175
+ z_second = z_first + (roots[bad] - z_first) * 1e-2
176
+
177
+ # re-solve just the outliers in one vector call
178
+ new_root, new_res, new_iter = secant_complex(
179
+ f, z_first, z_second, a[bad],
180
+ tol=tol, max_iter=max_iter,
181
+ alpha=alpha, max_bt=max_bt,
182
+ eps=eps, step_factor=step_factor,
183
+ post_smooth=False, # avoid recursion
184
+ )
185
+ roots[bad] = new_root
186
+ residuals[bad] = new_res
187
+ iterations[bad] = iterations[bad] + new_iter
188
+
189
+ if verbose:
190
+ print(f"Clean-up: re-solved {bad.sum()} outliers")
191
+
192
+ return (
193
+ roots.reshape(orig_shape),
194
+ residuals.reshape(orig_shape),
195
+ iterations.reshape(orig_shape),
196
+ )
197
+
198
+
199
+ # ==========
200
+ # decompress
201
+ # ==========
202
+
203
+
204
+ def decompress(freeform, size, x=None, delta=1e-4, max_iter=500,
205
+ tolerance=1e-8):
206
+ """
207
+ Free decompression of spectral density.
208
+
209
+ Parameters
210
+ ----------
211
+
212
+ freeform : FreeForm
213
+ The initial freeform object of matrix to be decompressed
214
+
215
+ size : int
216
+ Size of the decompressed matrix.
217
+
218
+ x : numpy.array, default=None
219
+ Positions where density to be evaluated at. If `None`, an interval
220
+ slightly larger than the support interval will be used.
221
+
222
+ delta: float, default=1e-4
223
+ Size of the perturbation into the upper half plane for Plemelj's
224
+ formula.
225
+
226
+ max_iter: int, default=500
227
+ Maximum number of secant method iterations.
228
+
229
+ tolerance: float, default=1e-12
230
+ Tolerance for the solution obtained by the secant method solver.
231
+
232
+ Returns
233
+ -------
234
+
235
+ rho : numpy.array
236
+ Spectral density
237
+
238
+ See Also
239
+ --------
240
+
241
+ density
242
+ stieltjes
243
+
244
+ Notes
245
+ -----
246
+
247
+ Work in progress.
248
+
249
+ References
250
+ ----------
251
+
252
+ .. [1] tbd
253
+
254
+ Examples
255
+ --------
256
+
257
+ .. code-block:: python
258
+
259
+ >>> from freealg import FreeForm
260
+ """
261
+
262
+ alpha = size / freeform.n
263
+ m = freeform._eval_stieltjes
264
+ # Lower and upper bound on new support
265
+ hilb_lb = (1 / m(freeform.lam_m + delta * 1j)).real
266
+ hilb_ub = (1 / m(freeform.lam_p + delta * 1j)).real
267
+ lb = freeform.lam_m - (alpha - 1) * hilb_lb
268
+ ub = freeform.lam_p - (alpha - 1) * hilb_ub
269
+
270
+ # Create x if not given
271
+ on_grid = (x is None)
272
+ if on_grid:
273
+ radius = 0.5 * (ub - lb)
274
+ center = 0.5 * (ub + lb)
275
+ scale = 1.25
276
+ x_min = numpy.floor(center - radius * scale)
277
+ x_max = numpy.ceil(center + radius * scale)
278
+ x = numpy.linspace(x_min, x_max, 500)
279
+ else:
280
+ x = numpy.asarray(x)
281
+
282
+ target = x + delta * 1j
283
+ if numpy.isclose(alpha, 1.0):
284
+ return freeform.density(x), x, freeform.support
285
+
286
+ # Characteristic curve map
287
+ def _char_z(z):
288
+ return z + (1 / m(z)) * (1 - alpha)
289
+
290
+ z0 = numpy.full(target.shape, numpy.mean(freeform.support) + .1j,
291
+ dtype=numpy.complex128)
292
+ z1 = z0 - .2j
293
+
294
+ roots, _, _ = secant_complex(
295
+ _char_z, z0, z1,
296
+ a=target,
297
+ tol=tolerance,
298
+ max_iter=max_iter
299
+ )
300
+
301
+ # Plemelj's formula
302
+ z = roots
303
+ char_s = m(z) / alpha
304
+ rho = numpy.maximum(0, char_s.imag / numpy.pi)
305
+ rho[numpy.isnan(rho) | numpy.isinf(rho)] = 0
306
+ if on_grid:
307
+ x, rho = x.ravel(), rho.ravel()
308
+ # dx = x[1] - x[0]
309
+ # left_idx, right_idx = support_from_density(dx, rho)
310
+ # x, rho = x[left_idx-1:right_idx+1], rho[left_idx-1:right_idx+1]
311
+ rho = rho / numpy.trapezoid(rho, x)
312
+
313
+ return rho.reshape(*x.shape), x, (lb, ub)
314
+
315
+
316
+ # =======================
317
+ # reverse characteristics
318
+ # =======================
319
+
320
+ def reverse_characteristics(freeform, z_inits, T, iterations=500,
321
+ step_size=0.1, tolerance=1e-8):
322
+ """
323
+ """
324
+
325
+ t_span = (0, T)
326
+ t_eval = numpy.linspace(t_span[0], t_span[1], 50)
327
+
328
+ m = freeform._eval_stieltjes
329
+
330
+ def _char_z(z, t):
331
+ return z + (1 / m(z)) * (1 - numpy.exp(t))
332
+
333
+ target_z, target_t = numpy.meshgrid(z_inits, t_eval)
334
+
335
+ z = numpy.full(target_z.shape, numpy.mean(freeform.support) - .1j,
336
+ dtype=numpy.complex128)
337
+
338
+ # Broken Newton steps can produce a lot of warnings. Removing them for now.
339
+ with numpy.errstate(all='ignore'):
340
+ for _ in range(iterations):
341
+ objective = _char_z(z, target_t) - target_z
342
+ mask = numpy.abs(objective) >= tolerance
343
+ if not numpy.any(mask):
344
+ break
345
+ z_m = z[mask]
346
+ t_m = target_t[mask]
347
+
348
+ # Perform finite difference approximation
349
+ dfdz = _char_z(z_m+tolerance, t_m) - _char_z(z_m-tolerance, t_m)
350
+ dfdz /= 2*tolerance
351
+ dfdz[dfdz == 0] = 1.0
352
+
353
+ # Perform Newton step
354
+ z[mask] = z_m - step_size * objective[mask] / dfdz
355
+
356
+ return z
@@ -236,9 +236,10 @@ def _eval_rational(z, c, D, poles, resid):
236
236
 
237
237
  return c + D * z + term
238
238
 
239
- # ========
240
- # Wynn epsilon algorithm for Pade
241
- # ========
239
+
240
+ # =========
241
+ # Wynn pade
242
+ # =========
242
243
 
243
244
  @numba.jit(nopython=True, parallel=True)
244
245
  def wynn_pade(coeffs, x):
@@ -248,48 +249,57 @@ def wynn_pade(coeffs, x):
248
249
  returns a function handle that computes the Pade approximant at any x
249
250
  using Wynn's epsilon algorithm.
250
251
 
251
- Parameters:
252
- coeffs (list or array): Coefficients [a0, a1, a2, ...] of the power series.
252
+ Parameters
253
+ ----------
254
+
255
+ coeffs (list or array):
256
+ Coefficients [a0, a1, a2, ...] of the power series.
253
257
 
254
- Returns:
255
- function: A function approximant(x) that returns the approximated value f(x).
258
+ Returns
259
+ -------
260
+
261
+ function:
262
+ A function approximant(x) that returns the approximated value f(x).
256
263
  """
264
+
257
265
  # Number of coefficients
258
266
  xn = x.ravel()
259
267
  d = len(xn)
260
268
  N = len(coeffs)
261
-
269
+
262
270
  # Compute the partial sums s_n = sum_{i=0}^n a_i * x^i for n=0,...,N-1
263
271
  eps = numpy.zeros((N+1, N, d), dtype=numpy.complex128)
264
272
  for i in numba.prange(d):
265
273
  partial_sum = 0.0
266
274
  for n in range(N):
267
275
  partial_sum += coeffs[n] * (xn[i] ** n)
268
- eps[0,n,i] = partial_sum
276
+ eps[0, n, i] = partial_sum
269
277
 
270
278
  for i in numba.prange(d):
271
279
  for k in range(1, N+1):
272
280
  for j in range(N - k):
273
- delta = eps[k-1, j+1,i] - eps[k-1, j,i]
281
+ delta = eps[k-1, j+1, i] - eps[k-1, j, i]
274
282
  if delta == 0:
275
283
  rec_delta = numpy.inf
276
284
  elif numpy.isinf(delta) or numpy.isnan(delta):
277
285
  rec_delta = 0.0
278
286
  else:
279
287
  rec_delta = 1.0 / delta
280
- eps[k,j,i] = rec_delta
288
+ eps[k, j, i] = rec_delta
281
289
  if k > 1:
282
- eps[k,j,i] += eps[k-2,j+1,i]
290
+ eps[k, j, i] += eps[k-2, j+1, i]
283
291
 
284
292
  if (N % 2) == 0:
285
293
  N -= 1
286
-
294
+
287
295
  return eps[N-1, 0, :].reshape(x.shape)
288
296
 
297
+
289
298
  # ========
290
299
  # fit pade
291
300
  # ========
292
301
 
302
+
293
303
  def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
294
304
  safety=1.0, max_outer=40, xtol=1e-12, ftol=1e-12, optimizer='ls',
295
305
  verbose=0):
@@ -139,7 +139,7 @@ def _auto_bins(array, method='scott', factor=5):
139
139
  # ============
140
140
 
141
141
  def plot_density(x, rho, eig=None, support=None, label='',
142
- title='Spectral density', latex=False, save=False):
142
+ title='Spectral Density', latex=False, save=False):
143
143
  """
144
144
  """
145
145
 
@@ -147,8 +147,11 @@ def plot_density(x, rho, eig=None, support=None, label='',
147
147
 
148
148
  fig, ax = plt.subplots(figsize=(6, 2.7))
149
149
 
150
- if (support is not None) and (eig is not None):
151
- lam_m, lam_p = support
150
+ if eig is not None:
151
+ if support is not None:
152
+ lam_m, lam_p = support
153
+ else:
154
+ lam_m, lam_p = min(eig), max(eig)
152
155
  bins = numpy.linspace(lam_m, lam_p, _auto_bins(eig))
153
156
  _ = ax.hist(eig, bins, density=True, color='silver',
154
157
  edgecolor='none', label='Histogram')