freealg 0.5.3__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
freealg/__init__.py CHANGED
@@ -8,9 +8,12 @@
8
8
 
9
9
  from .freeform import FreeForm
10
10
  from ._linalg import eigvalsh, cond, norm, trace, slogdet
11
+ from ._support import supp
12
+ from ._sample import sample
13
+ from ._util import kde
11
14
  from . import distributions
12
15
 
13
16
  __all__ = ['FreeForm', 'distributions', 'eigvalsh', 'cond', 'norm', 'trace',
14
- 'slogdet']
17
+ 'slogdet', 'supp', 'sample', 'kde']
15
18
 
16
19
  from .__version__ import __version__ # noqa: F401 E402
freealg/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.5.3"
1
+ __version__ = "0.6.0"
freealg/_chebyshev.py CHANGED
@@ -43,10 +43,10 @@ def chebyshev_sample_proj(eig, support, K=10, reg=0.0):
43
43
  The assumed compact support of rho.
44
44
 
45
45
  K : int
46
- Highest ChebyshevII order.
46
+ Highest Chebyshev-II order.
47
47
 
48
48
  reg : float
49
- Tikhonovstyle ridge on each coefficient (defaults to 0).
49
+ Tikhonov-style ridge on each coefficient (defaults to 0).
50
50
 
51
51
  Returns
52
52
  -------
@@ -57,10 +57,10 @@ def chebyshev_sample_proj(eig, support, K=10, reg=0.0):
57
57
 
58
58
  lam_m, lam_p = support
59
59
 
60
- # Map to [1,1] interval
60
+ # Map to [-1,1] interval
61
61
  t = (2 * eig - (lam_m + lam_p)) / (lam_p - lam_m)
62
62
 
63
- # Innerproduct norm of each U_k under w(t) = sqrt{1t^2} is \\pi/2
63
+ # Inner-product norm of each U_k under w(t) = sqrt{1-t^2} is \\pi/2
64
64
  norm = numpy.pi / 2
65
65
 
66
66
  psi = numpy.empty(K+1)
@@ -92,12 +92,12 @@ def chebyshev_kernel_proj(xs, pdf, support, K=10, reg=0.0):
92
92
  Projection of a *continuous* density given on a grid (xs, pdf)
93
93
  onto the Chebyshev-II basis.
94
94
 
95
- xs : 1-D numpy array (original xaxis, not the t-variable)
95
+ xs : 1-D numpy array (original x-axis, not the t-variable)
96
96
  pdf : same shape as xs, integrates to 1 on xs
97
97
  """
98
98
 
99
99
  lam_m, lam_p = support
100
- t = (2.0 * xs - (lam_m + lam_p)) / (lam_p - lam_m) # map to [1,1]
100
+ t = (2.0 * xs - (lam_m + lam_p)) / (lam_p - lam_m) # map to [-1,1]
101
101
 
102
102
  norm = numpy.pi / 2.0
103
103
  psi = numpy.empty(K + 1)
@@ -140,15 +140,15 @@ def chebyshev_density(x, psi, support):
140
140
  -------
141
141
 
142
142
  rho_x : ndarray, same shape as x
143
- Approximated spectral density on the original xaxis.
143
+ Approximated spectral density on the original x-axis.
144
144
  """
145
145
 
146
146
  lam_m, lam_p = support
147
147
 
148
- # Map to [1,1] interval
148
+ # Map to [-1,1] interval
149
149
  t = (2 * numpy.asarray(x) - (lam_m + lam_p)) / (lam_p - lam_m)
150
150
 
151
- # Weight sqrt{1t^2} (clip for numerical safety)
151
+ # Weight sqrt{1-t^2} (clip for numerical safety)
152
152
  w = numpy.sqrt(numpy.clip(1 - t**2, a_min=0, a_max=None))
153
153
 
154
154
  # Summation approximation
@@ -165,22 +165,23 @@ def chebyshev_density(x, psi, support):
165
165
  # chebushev stieltjes
166
166
  # ===================
167
167
 
168
- def chebyshev_stieltjes(z, psi, support, continuation='pade'):
168
+ def chebyshev_stieltjes(z, psi, support, continuation='pade',
169
+ dtype=numpy.complex128):
169
170
  """
170
- Compute the Stieltjes transform m(z) for a ChebyshevII expansion
171
+ Compute the Stieltjes transform m(z) for a Chebyshev-II expansion
171
172
 
172
- rho(x) = (2/(lam_p - lam_m)) * sqrt(1t(x)^2) * sum_{k=0}^K psi_k U_k(t(x))
173
+ rho(x) = (2/(lam_p - lam_m)) * sqrt(1-t(x)^2) * sum_{k=0}^K psi_k U_k(t(x))
173
174
 
174
- via the closedform
175
+ via the closed-form
175
176
 
176
- \\int_{-1}^1 U_k(t) sqrt(1t^2)/(u - t) dt = \\pi J(u)^(k+1),
177
+ \\int_{-1}^1 U_k(t) sqrt(1-t^2)/(u - t) dt = \\pi J(u)^(k+1),
177
178
 
178
179
  where
179
180
 
180
- u = (2(zcenter))/span,
181
+ u = (2(z-center))/span,
181
182
  center = (lam_p + lam_m)/2,
182
183
  span = lam_p - lam_m,
183
- J(u) = u sqrt(u^21)
184
+ J(u) = u - sqrt(u^2-1)
184
185
 
185
186
  and then
186
187
 
@@ -193,7 +194,7 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
193
194
  Points in the complex plane.
194
195
 
195
196
  psi : array_like, shape (K+1,)
196
- ChebyshevII coefficients \\psi.
197
+ Chebyshev-II coefficients \\psi.
197
198
 
198
199
  support : tuple
199
200
  The support interval of the original density.
@@ -201,6 +202,9 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
201
202
  continuation : str, default= ``'pade'``
202
203
  Methof of analytiv continuation.
203
204
 
205
+ dtype : numpy.type, default=numpy.complex128
206
+ Data type for compelx arrays. This might enhance series acceleration.
207
+
204
208
  Returns
205
209
  -------
206
210
 
@@ -208,7 +212,8 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
208
212
  The Stieltjes transform m(z) on the same shape as z.
209
213
  """
210
214
 
211
- z = numpy.asarray(z, dtype=numpy.complex128)
215
+ z = numpy.asarray(z, dtype=dtype)
216
+
212
217
  lam_m, lam_p = support
213
218
  span = lam_p - lam_m
214
219
  center = 0.5 * (lam_m + lam_p)
@@ -236,7 +241,7 @@ def chebyshev_stieltjes(z, psi, support, continuation='pade'):
236
241
 
237
242
  else:
238
243
  # Flatten J before passing to Wynn method.
239
- psi_zero = numpy.concatenate([[0], psi])
244
+ psi_zero = numpy.concatenate([[0.0], psi])
240
245
  Sn = partial_sum(psi_zero, J.ravel(), p=0)
241
246
 
242
247
  if continuation == 'wynn-eps':
freealg/_decompress.py CHANGED
@@ -567,7 +567,8 @@ def _newton_method(f, z_init, a, support, enforce_wall=False, tol=1e-4,
567
567
 
568
568
  def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
569
569
  alpha=0.5, max_bt=1, eps=1e-30, step_factor=5.0,
570
- post_smooth=True, jump_tol=10.0, verbose=False):
570
+ post_smooth=True, jump_tol=10.0, dtype=numpy.complex128,
571
+ verbose=False):
571
572
  """
572
573
  Solves :math:``f(z) = a`` for many starting points simultaneously using the
573
574
  secant method in the complex plane.
@@ -581,7 +582,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
581
582
  Two initial guesses. ``z1`` may be broadcast to ``z0``.
582
583
 
583
584
  a : complex or array_like, optional
584
- Righthandside targets (broadcasted to ``z0``). Defaults to ``0+0j``.
585
+ Right-hand-side targets (broadcasted to ``z0``). Defaults to ``0+0j``.
585
586
 
586
587
  tol : float, optional
587
588
  Convergence criterion on ``|f(z) - a|``. Defaults to ``1e-12``.
@@ -590,10 +591,10 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
590
591
  Maximum number of secant iterations. Defaults to ``100``.
591
592
 
592
593
  alpha : float, optional
593
- Backtracking shrink factor (``0 < alpha < 1``). Defaults to ``0.5``.
594
+ Back-tracking shrink factor (``0 < alpha < 1``). Defaults to ``0.5``.
594
595
 
595
596
  max_bt : int, optional
596
- Maximum backtracking trials per iteration. Defaults to ``0``.
597
+ Maximum back-tracking trials per iteration. Defaults to ``0``.
597
598
 
598
599
  eps : float, optional
599
600
  Safeguard added to tiny denominators. Defaults to ``1e-30``.
@@ -607,6 +608,9 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
607
608
  Sensitivity of the clean-up pass; larger tolerance implies fewer
608
609
  re-solves.
609
610
 
611
+ dtype : {``'complex128'``, ``'complex256'``}, default = ``'complex128'``
612
+ Data type for inner computations of complex variables.
613
+
610
614
  verbose : bool, optional
611
615
  If *True*, prints progress every 10 iterations.
612
616
 
@@ -622,9 +626,9 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
622
626
 
623
627
  # Broadcast inputs
624
628
  z0, z1, a = numpy.broadcast_arrays(
625
- numpy.asarray(z0, numpy.complex128),
626
- numpy.asarray(z1, numpy.complex128),
627
- numpy.asarray(a, numpy.complex128),
629
+ numpy.asarray(z0, dtype=dtype),
630
+ numpy.asarray(z1, dtype=dtype),
631
+ numpy.asarray(a, dtype=dtype),
628
632
  )
629
633
  orig_shape = z0.shape
630
634
  z0, z1, a = (x.ravel() for x in (z0, z1, a))
@@ -677,7 +681,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
677
681
  if not worse.any():
678
682
  break
679
683
 
680
- # Bookkeeping
684
+ # Book-keeping
681
685
  newly_conv = (numpy.abs(f2) < tol) & active
682
686
  converged[newly_conv] = True
683
687
  iterations[newly_conv] = k + 1
@@ -691,7 +695,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
691
695
  if verbose and k % 10 == 0:
692
696
  print(f"Iter {k}: {converged.sum()} / {n_points} converged")
693
697
 
694
- # Nonconverged points
698
+ # Non-converged points
695
699
  remaining = ~converged
696
700
  roots[remaining] = z1[remaining]
697
701
  residuals[remaining] = numpy.abs(f1[remaining])
@@ -723,7 +727,7 @@ def _secant_complex(f, z0, z1, a=0+0j, tol=1e-12, max_iter=100,
723
727
  new_root, new_res, new_iter = _secant_complex(
724
728
  f, z_first, z_second, a[bad], tol=tol, max_iter=max_iter,
725
729
  alpha=alpha, max_bt=max_bt, eps=eps, step_factor=step_factor,
726
- post_smooth=False, # avoid recursion
730
+ dtype=dtype, post_smooth=False, # avoid recursion
727
731
  )
728
732
 
729
733
  roots[bad] = new_root
@@ -894,7 +898,7 @@ def decompress(freeform, alpha, x, roots_init=None, method='newton',
894
898
  # Initialize roots below the real axis
895
899
  if roots_init is None:
896
900
  roots_init = numpy.full(x.shape, numpy.mean(freeform.support) - 0.1j,
897
- dtype=numpy.complex128)
901
+ dtype=freeform.dtype)
898
902
 
899
903
  # Finding roots
900
904
  if method == 'newton':
@@ -925,11 +929,11 @@ def decompress(freeform, alpha, x, roots_init=None, method='newton',
925
929
 
926
930
  elif method == 'secant':
927
931
  z0 = numpy.full(x.shape, numpy.mean(freeform.support) + 0.1j,
928
- dtype=numpy.complex128)
932
+ dtype=freeform.dtype)
929
933
  z1 = z0 - 0.2j
930
934
 
931
935
  roots, _, _ = _secant_complex(_char_z, z0, z1, a=target, tol=tolerance,
932
- max_iter=max_iter)
936
+ max_iter=max_iter, dtype=freeform.dtype)
933
937
  else:
934
938
  raise NotImplementedError('"method" is invalid.')
935
939
 
@@ -960,7 +964,8 @@ def decompress(freeform, alpha, x, roots_init=None, method='newton',
960
964
  # =======================
961
965
 
962
966
  def reverse_characteristics(freeform, z_inits, T, iterations=500,
963
- step_size=0.1, tolerance=1e-8):
967
+ step_size=0.1, tolerance=1e-8,
968
+ dtype=numpy.complex128):
964
969
  """
965
970
  """
966
971
 
@@ -975,7 +980,7 @@ def reverse_characteristics(freeform, z_inits, T, iterations=500,
975
980
  target_z, target_t = numpy.meshgrid(z_inits, t_eval)
976
981
 
977
982
  z = numpy.full(target_z.shape, numpy.mean(freeform.support) - 0.1j,
978
- dtype=numpy.complex128)
983
+ dtype=dtype)
979
984
 
980
985
  # Broken Newton steps can produce a lot of warnings. Removing them for now.
981
986
  with numpy.errstate(all='ignore'):
freealg/_jacobi.py CHANGED
@@ -144,11 +144,23 @@ def jacobi_density(x, psi, support, alpha=0.0, beta=0.0):
144
144
  lam_m, lam_p = support
145
145
  t = (2 * x - (lam_p + lam_m)) / (lam_p - lam_m)
146
146
  w = (1 - t)**alpha * (1 + t)**beta
147
+
148
+ # The function eval_jacobi does not accept complex256 type
149
+ down_cast = False
150
+ if numpy.issubdtype(t.dtype, numpy.complexfloating) and \
151
+ t.itemsize > numpy.dtype(numpy.complex128).itemsize:
152
+ t = t.astype(numpy.complex128)
153
+ down_cast = True
154
+
147
155
  P = numpy.vstack([eval_jacobi(k, alpha, beta, t) for k in range(len(psi))])
148
156
 
149
157
  rho_t = w * (psi @ P) # density in t-variable
150
158
  rho_x = rho_t * (2.0 / (lam_p - lam_m)) # back to x-variable
151
159
 
160
+ # Case up to complex256
161
+ if down_cast:
162
+ rho_x = rho_x.astype(t.dtype)
163
+
152
164
  return rho_x
153
165
 
154
166
 
@@ -157,13 +169,13 @@ def jacobi_density(x, psi, support, alpha=0.0, beta=0.0):
157
169
  # ================
158
170
 
159
171
  def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
160
- continuation='pade'):
172
+ continuation='pade', dtype=numpy.complex128):
161
173
  """
162
174
  Compute m(z) = sum_k psi_k * m_k(z) where
163
175
 
164
176
  m_k(z) = \\int w^{(alpha, beta)}(t) P_k^{(alpha, beta)}(t) / (u(z)-t) dt
165
177
 
166
- Each m_k is evaluated *separately* with a GaussJacobi rule sized
178
+ Each m_k is evaluated *separately* with a Gauss-Jacobi rule sized
167
179
  for that k. This follows the user's request: 1 quadrature rule per P_k.
168
180
 
169
181
  Parameters
@@ -184,6 +196,9 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
184
196
  continuation : str, default= ``'pade'``
185
197
  Methof of analytiv continuation.
186
198
 
199
+ dtype : numpy.type, default=numpy.complex128
200
+ Data type for compelx arrays. This might enhance series acceleration.
201
+
187
202
  Returns
188
203
  -------
189
204
 
@@ -194,7 +209,7 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
194
209
  Same shape as z
195
210
  """
196
211
 
197
- z = numpy.asarray(z, dtype=numpy.complex128)
212
+ z = numpy.asarray(z, dtype=dtype)
198
213
  lam_minus, lam_plus = support
199
214
  span = lam_plus - lam_minus
200
215
  centre = 0.5 * (lam_plus + lam_minus)
@@ -202,11 +217,11 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
202
217
  # Map z -> u in the standard [-1,1] domain
203
218
  u = (2.0 / span) * (z - centre)
204
219
 
205
- m_total = numpy.zeros_like(z, dtype=numpy.complex128)
220
+ m_total = numpy.zeros_like(z, dtype=dtype)
206
221
 
207
222
  if continuation != 'pade':
208
223
  # Stores m with the ravel size of z.
209
- m_partial = numpy.zeros((psi.size, z.size), dtype=numpy.complex128)
224
+ m_partial = numpy.zeros((psi.size, z.size), dtype=dtype)
210
225
 
211
226
  for k, psi_k in enumerate(psi):
212
227
  # Select quadrature size tailored to this P_k
@@ -221,7 +236,7 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40,
221
236
 
222
237
  # Evaluate jacobi polynomals of the second kind, Q_k using quadrature
223
238
  diff = t_nodes[:, None, None] - u[None, ...] # (n_quad, Ny, Nx)
224
- Q_k = (integrand[:, None, None] / diff).sum(axis=0)
239
+ Q_k = (integrand[:, None, None] / diff).sum(axis=0).astype(dtype)
225
240
 
226
241
  # Principal branch
227
242
  m_k = (2.0 / span) * Q_k
freealg/_linalg.py CHANGED
@@ -151,7 +151,7 @@ def eigvalsh(A, size=None, psd=None, seed=None, plot=False, **kwargs):
151
151
  # Perform fit and estimate eigenvalues
152
152
  order = 1 + int(len(samples)**0.2)
153
153
  ff.fit(method='chebyshev', K=order, projection='sample',
154
- continuation='wynn', force=True, plot=False, latex=False,
154
+ continuation='wynn-eps', force=True, plot=False, latex=False,
155
155
  save=False)
156
156
 
157
157
  if plot:
freealg/_pade.py CHANGED
@@ -27,8 +27,8 @@ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
27
27
  """
28
28
  Generate q real poles outside [lam_m, lam_p].
29
29
 
30
- even q : q/2 on each side (Chebyshev-like layout)
31
- odd q : (q+1)/2 on the *left*, (q1)/2 on the right
30
+ * even q : q/2 on each side (Chebyshev-like layout)
31
+ * odd q : (q+1)/2 on the *left*, (q-1)/2 on the right
32
32
  so q=1 => single pole on whichever side `odd_side` says.
33
33
 
34
34
  safety >= 1: 1, then poles start half an interval away; >1 pushes them
@@ -73,13 +73,13 @@ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
73
73
 
74
74
  def _encode_poles(a, lam_m, lam_p):
75
75
  """
76
- Map real pole a_j unconstrained s_j,
76
+ Map real pole a_j => unconstrained s_j,
77
77
  so that the default left-of-interval pole stays left.
78
78
  """
79
79
 
80
80
  # half-width of the interval
81
81
  d = 0.5 * (lam_p - lam_m)
82
- # if a < lam_m, we want s 0; if a > lam_p, s < 0
82
+ # if a < lam_m, we want s >= 0; if a > lam_p, s < 0
83
83
  return numpy.where(
84
84
  a < lam_m,
85
85
  numpy.log((lam_m - a) / d), # zero at a = lam_m - d
@@ -93,13 +93,13 @@ def _encode_poles(a, lam_m, lam_p):
93
93
 
94
94
  def _decode_poles(s, lam_m, lam_p):
95
95
  """
96
- Inverse map s_j real pole a_j outside the interval.
96
+ Inverse map s_j => real pole a_j outside the interval.
97
97
  """
98
98
 
99
99
  d = 0.5 * (lam_p - lam_m)
100
100
  return numpy.where(
101
101
  s >= 0,
102
- lam_m - d * numpy.exp(s), # maps s=0 to a=lam_md (left)
102
+ lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m-d (left)
103
103
  lam_p + d * numpy.exp(-s) # maps s=0 to a=lam_p+d (right)
104
104
  )
105
105
 
@@ -186,7 +186,7 @@ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
186
186
  else:
187
187
  skip = 0 # all entries are residues
188
188
 
189
- # add λ only for the residue positions
189
+ # add lambda only for the residue positions
190
190
  n = ATA.shape[0]
191
191
  for i in range(skip, n):
192
192
  ATA[i, i] += pade_reg
@@ -343,7 +343,7 @@ def eval_pade(z, pade_sol):
343
343
  """
344
344
 
345
345
  # z_arr = numpy.asanyarray(z) # shape=(M,N)
346
- # flat = z_arr.ravel() # shape=(M·N,)
346
+ # flat = z_arr.ravel() # shape=(M*N,)
347
347
  # c, D = pade_sol['c'], pade_sol['D']
348
348
  # poles = pade_sol['poles']
349
349
  # resid = pade_sol['resid']
@@ -362,7 +362,7 @@ def eval_pade(z, pade_sol):
362
362
 
363
363
  out = c + D*z
364
364
  for bj, rj in zip(poles, resid):
365
- out += rj/(z - bj) # each is an (N,) op, no N×q temp
365
+ out += rj/(z - bj) # each is an (N,) op, no N*q temp
366
366
  return out
367
367
 
368
368
 
@@ -384,7 +384,7 @@ def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
384
384
  b_j in (-infty, lam_m - delta] cup [lam_p + delta, infty)
385
385
 
386
386
  Approach:
387
- - Bruteforce all 2^q left/right assignments for denominator roots
387
+ - Brute-force all 2^q left/right assignments for denominator roots
388
388
  - Global search with differential_evolution, fallback to zeros if needed
389
389
  - Local refinement with least_squares
390
390
 
freealg/_sample.py CHANGED
@@ -15,7 +15,7 @@ from scipy.integrate import cumulative_trapezoid
15
15
  from scipy.interpolate import PchipInterpolator
16
16
  from scipy.stats import qmc
17
17
 
18
- __all__ = ['qmc_sample']
18
+ __all__ = ['sample']
19
19
 
20
20
 
21
21
  # =============
@@ -32,60 +32,75 @@ def _quantile_func(x, rho, clamp=1e-4, eps=1e-8):
32
32
  rho_clamp[rho < clamp] = eps
33
33
  cdf = cumulative_trapezoid(rho_clamp, x, initial=0)
34
34
  cdf /= cdf[-1]
35
+ cdf_inv = PchipInterpolator(cdf, x, extrapolate=False)
35
36
 
36
- return PchipInterpolator(cdf, x, extrapolate=False)
37
+ return cdf_inv
37
38
 
38
39
 
39
- # ==========
40
- # qmc sample
41
- # ==========
40
+ # ======
41
+ # sample
42
+ # ======
42
43
 
43
- def qmc_sample(x, rho, num_pts, seed=None):
44
+ def sample(x, rho, num_pts, method='qmc', seed=None):
44
45
  """
45
- Low-discrepancy sampling from a univariate density estimate using
46
- Quasi-Monte Carlo.
46
+ Low-discrepancy sampling from density estimate.
47
47
 
48
48
  Parameters
49
49
  ----------
50
50
 
51
- x : numpy.array, shape (n,)
52
- Sorted abscissae at which the density has been evaluated.
51
+ x : numpy.array
52
+ Sorted abscissae at which the density has been evaluated. Shape `(n,)`.
53
53
 
54
- rho : numpy.array, shape (n,)
54
+ rho : numpy.array
55
55
  Density values corresponding to `x`. Must be non-negative and define
56
56
  a valid probability density (i.e., integrate to 1 over the support).
57
+ Shape `(n,)`.
57
58
 
58
59
  num_pts : int
59
60
  Number of sample points to generate from the density estimate.
60
61
 
62
+ method : {``'mc'``, ``'qmc'``}, default= ``'qmc'``
63
+ Method of drawing samples from uniform distribution:
64
+
65
+ * ``'mc'``: Monte Carlo
66
+ * ``'qmc'``: Quasi Monte Carlo
67
+
61
68
  seed : int, default=None
62
69
  Seed for random number generator
63
70
 
64
71
  Returns
65
72
  -------
73
+
66
74
  samples : numpy.array, shape (num_pts,)
67
75
  Samples drawn from the estimated density using a one-dimensional Halton
68
76
  sequence mapped through the estimated quantile function.
69
77
 
70
78
  See Also
71
79
  --------
72
- scipy.stats.qmc.Halton
73
- Underlying Quasi-Monte Carlo engine used for generating low-discrepancy
74
- points.
80
+
81
+ freealg.supp
82
+ freealg.kde
83
+
84
+ Notes
85
+ -----
86
+
87
+ The underlying Quasi-Monte Carlo engine uses ``scipy.stats.qmc.Halton``
88
+ function for generating low-discrepancy points.
75
89
 
76
90
  Examples
77
91
  --------
78
92
 
79
93
  .. code-block:: python
94
+ :emphasize-lines: 8
80
95
 
81
96
  >>> import numpy
82
- >>> from freealg import qmc_sample
97
+ >>> from freealg import sample
83
98
 
84
99
  >>> # density of Beta(3,1) on [0,1]
85
100
  >>> x = numpy.linspace(0, 1, 200)
86
101
  >>> rho = 3 * x**2
87
102
 
88
- >>> samples = qmc_sample(x, rho, num_pts=1000)
103
+ >>> samples = sample(x, rho, num_pts=1000, method='qmc')
89
104
  >>> assert samples.shape == (1000,)
90
105
 
91
106
  >>> # Empirical mean should be close to 3/4
@@ -94,8 +109,17 @@ def qmc_sample(x, rho, num_pts, seed=None):
94
109
 
95
110
  rng = numpy.random.default_rng(seed)
96
111
  quantile = _quantile_func(x, rho)
97
- engine = qmc.Halton(d=1, rng=rng)
98
- u = engine.random(num_pts)
112
+
113
+ # Draw from uniform distribution
114
+ if method == 'mc':
115
+ u = rng.random(num_pts)
116
+ elif method == 'qmc':
117
+ engine = qmc.Halton(d=1, rng=rng)
118
+ u = engine.random(num_pts)
119
+ else:
120
+ raise NotImplementedError('"method" is invalid.')
121
+
122
+ # Draw from distribution by mapping from inverse CDF
99
123
  samples = quantile(u)
100
124
 
101
125
  return samples.ravel()
freealg/_series.py CHANGED
@@ -183,13 +183,13 @@ def wynn_rho(Sn, beta=0.0):
183
183
  -------
184
184
 
185
185
  S : numpy.ndarray
186
- A 1D array of shape ``(d,)`` giving the rhoaccelerated estimate
186
+ A 1D array of shape ``(d,)`` giving the rho-accelerated estimate
187
187
  of the series limit for each component.
188
188
 
189
189
  Notes
190
190
  -----
191
191
 
192
- Let ``S_n`` be the *n*‑th partial sum of the (possibly divergent)
192
+ Let ``S_n`` be the *n*-th partial sum of the (possibly divergent)
193
193
  sequence. Wynn's rho algorithm builds a triangular table
194
194
  ``rho[k, n]`` (row *k*, column *n*) as follows:
195
195
 
@@ -200,7 +200,7 @@ def wynn_rho(Sn, beta=0.0):
200
200
  (n + beta + k - 1) / (rho[k-1, n+1] - rho[k-1, n])
201
201
 
202
202
  Only even rows (k even) provide improved approximants. As with
203
- ``wynn_epsilon``, we apply the scalar recursion componentwise so that a
203
+ ``wynn_epsilon``, we apply the scalar recursion component-wise so that a
204
204
  slowly converging component does not stall the others.
205
205
  """
206
206
 
@@ -255,7 +255,7 @@ def wynn_rho(Sn, beta=0.0):
255
255
 
256
256
  def levin_u(Sn, omega=None, beta=0.0):
257
257
  """
258
- Levin utransform (vector form).
258
+ Levin u-transform (vector form).
259
259
 
260
260
  Parameters
261
261
  ----------
@@ -339,13 +339,13 @@ def weniger_delta(Sn):
339
339
  -------
340
340
 
341
341
  S : numpy.ndarray
342
- Array of shape (d,) giving the Δ²‑accelerated limit estimate for each
343
- component.
342
+ Array of shape (d,) giving the delta2 accelerated limit estimate for
343
+ each component.
344
344
  """
345
345
 
346
346
  N, d = Sn.shape
347
347
 
348
- # Need at least three partial sums to form Δ²
348
+ # Need at least three partial sums to form delta2
349
349
  if N < 3:
350
350
  return Sn[-1, :].copy()
351
351
 
@@ -384,14 +384,14 @@ def brezinski_theta(Sn):
384
384
  ----------
385
385
 
386
386
  Sn : numpy.ndarray
387
- A 2D array of the size ``(N, d)``, where `N` is the number of partial
387
+ A 2-D array of the size ``(N, d)``, where `N` is the number of partial
388
388
  sums and `d` is the vector size.
389
389
 
390
390
  Returns
391
391
  -------
392
392
 
393
393
  S : numpy.ndarray
394
- A 1D array of the size ``(d,)`` the thetaaccelerated estimate of
394
+ A 1-D array of the size ``(d,)``. The theta-accelerated estimate of
395
395
  the series limit in each vector component.
396
396
  """
397
397
 
freealg/_support.py CHANGED
@@ -14,7 +14,7 @@ import numpy
14
14
  import numba
15
15
  from scipy.stats import gaussian_kde
16
16
 
17
- __all__ = ['support_from_density', 'detect_support']
17
+ __all__ = ['support_from_density', 'supp']
18
18
 
19
19
 
20
20
  # ====================
@@ -34,26 +34,26 @@ def support_from_density(dx, density):
34
34
  n = density.shape[0]
35
35
  target = 1.0 / dx
36
36
 
37
- # 1) compute total_sum once
37
+ # compute total_sum once
38
38
  total_sum = 0.0
39
39
  for t in range(n):
40
40
  total_sum += density[t]
41
41
 
42
- # 2) set up our bestsofar trackers
42
+ # set up our "best-so-far" trackers
43
43
  large = 1e300
44
44
  best_nonneg_sum = large
45
45
  best_nonneg_idx = -1
46
46
  best_nonpos_sum = -large
47
47
  best_nonpos_idx = -1
48
48
 
49
- # 3) seed with first element (i.e. prefix_sum for k=1)
49
+ # seed with first element (i.e. prefix_sum for k=1)
50
50
  prefix_sum = density[0]
51
51
  if prefix_sum >= 0.0:
52
52
  best_nonneg_sum, best_nonneg_idx = prefix_sum, 1
53
53
  else:
54
54
  best_nonpos_sum, best_nonpos_idx = prefix_sum, 1
55
55
 
56
- # 4) sweep j from 2...n1, updating prefix_sum on the fly
56
+ # sweep j from 2, ..., n-1, updating prefix_sum on the fly
57
57
  optimal_i, optimal_j = 1, 2
58
58
  minimal_cost = large
59
59
 
@@ -88,7 +88,7 @@ def support_from_density(dx, density):
88
88
  minimal_cost = total_cost
89
89
  optimal_i, optimal_j = i_cand, j
90
90
 
91
- # update our prefixsum trackers
91
+ # update our prefix-sum trackers
92
92
  if prefix_sum >= 0.0:
93
93
  if prefix_sum < best_nonneg_sum:
94
94
  best_nonneg_sum, best_nonneg_idx = prefix_sum, j
@@ -99,36 +99,34 @@ def support_from_density(dx, density):
99
99
  return optimal_i, optimal_j
100
100
 
101
101
 
102
- # ==============
103
- # detect support
104
- # ==============
102
+ # ====
103
+ # supp
104
+ # ====
105
105
 
106
- def detect_support(eigs, method='asymp', k=None, p=0.001, **kwargs):
106
+ def supp(eigs, method='asymp', k=None, p=0.001):
107
107
  """
108
108
  Estimates the support of the eigenvalue density.
109
109
 
110
110
  Parameters
111
111
  ----------
112
112
 
113
- method : {``'range'``, ``'asymp'``, ``'jackknife'``, ``'regression'``,
114
- ``'interior'``, ``'interior_smooth'``}, \
115
- default= ``'asymp'``
113
+ method : {``'range'``, ``'asymp'``, ``'jackknife'``, ``'regression'``, \
114
+ ``'interior'``, ``'interior_smooth'``}, default= ``'asymp'``
116
115
  The method of support estimation:
117
116
 
118
117
  * ``'range'``: no estimation; the support is the range of the
119
- eigenvalues.
118
+ eigenvalues.
120
119
  * ``'asymp'``: assume the relative error in the min/max estimator is
121
- 1/n.
122
- * ``'jackknife'``: estimates the support using Quenouille's [1]
123
- jackknife estimator. Fast and simple, more accurate than the
124
- range.
120
+ :math:`1/n`.
121
+ * ``'jackknife'``: estimates the support using Quenouille's [1]_
122
+ jackknife estimator. Fast and simple, more accurate than the range.
125
123
  * ``'regression'``: estimates the support by performing a regression
126
- under the assumption that the edge behavior is of square-root
127
- type. Often most accurate.
124
+ under the assumption that the edge behavior is of square-root type.
125
+ Often most accurate.
128
126
  * ``'interior'``: estimates a support assuming the range overestimates;
129
- uses quantiles (p, 1-p).
127
+ uses quantiles :math:`(p, 1-p)`.
130
128
  * ``'interior_smooth'``: same as ``'interior'`` but using kernel
131
- density estimation, from [2]_.
129
+ density estimation, from [2]_.
132
130
 
133
131
  k : int, default = None
134
132
  Number of extreme order statistics to use for ``method='regression'``.
@@ -140,6 +138,21 @@ def detect_support(eigs, method='asymp', k=None, p=0.001, **kwargs):
140
138
  This value should be between 0 and 1, ideally a small number close to
141
139
  zero.
142
140
 
141
+ Returns
142
+ -------
143
+
144
+ lam_m : float
145
+ Lower end of support interval :math:`[\\lambda_{-}, \\lambda_{+}]`.
146
+
147
+ lam_p : float
148
+ Upper end of support interval :math:`[\\lambda_{-}, \\lambda_{+}]`.
149
+
150
+ See Also
151
+ --------
152
+
153
+ freealg.sample
154
+ freealg.kde
155
+
143
156
  References
144
157
  ----------
145
158
 
freealg/_util.py CHANGED
@@ -13,14 +13,60 @@
13
13
 
14
14
  import numpy
15
15
  import scipy
16
+ from scipy.stats import gaussian_kde
16
17
  from scipy.stats import beta
18
+ # from statsmodels.nonparametric.kde import KDEUnivariate
17
19
  from scipy.optimize import minimize
20
+ import matplotlib.pyplot as plt
21
+ import texplot
22
+ from ._plot_util import _auto_bins
18
23
 
19
24
  # Fallback to previous API
20
25
  if not hasattr(numpy, 'trapezoid'):
21
26
  numpy.trapezoid = numpy.trapz
22
27
 
23
- __all__ = ['compute_eig', 'beta_kde', 'force_density']
28
+ __all__ = ['resolve_complex_dtype', 'compute_eig', 'kde', 'force_density']
29
+
30
+
31
+ # =====================
32
+ # resolve complex dtype
33
+ # =====================
34
+
35
+ def resolve_complex_dtype(dtype):
36
+ """
37
+ Convert a user-supplied dtype name to a NumPy dtype object and fall back
38
+ safely if the requested precision is unavailable.
39
+ """
40
+
41
+ # Normalise the string
42
+ dtype = str(dtype).lower()
43
+
44
+ if not isinstance(numpy.dtype(dtype), numpy.dtype):
45
+ raise ValueError(f'{dtype} is not a recognized numpy dtype.')
46
+ elif not numpy.issubdtype(numpy.dtype(dtype), numpy.complexfloating):
47
+ raise ValueError(f'{dtype} is not a complex dtype.')
48
+
49
+ if dtype in {'complex128', '128'}:
50
+ cdtype = numpy.complex128
51
+
52
+ elif dtype in ['complex256', '256', 'longcomplex', 'clongcomplex']:
53
+
54
+ complex256_found = False
55
+ for name in ['complex256', 'clongcomplex']:
56
+ if hasattr(numpy, name):
57
+ cdtype = getattr(numpy, name)
58
+ complex256_found = True
59
+
60
+ if not complex256_found:
61
+ raise RuntimeWarning(
62
+ 'NumPy on this platform has no 256-bit complex type. ' +
63
+ 'Falling back to complex128.')
64
+ cdtype = numpy.complex128
65
+
66
+ else:
67
+ raise ValueError('Unsupported dtype.')
68
+
69
+ return cdtype
24
70
 
25
71
 
26
72
  # ===========
@@ -37,50 +83,107 @@ def compute_eig(A, lower=False):
37
83
  return eig
38
84
 
39
85
 
40
- # ========
41
- # beta kde
42
- # ========
86
+ # ===
87
+ # kde
88
+ # ===
43
89
 
44
- def beta_kde(eig, xs, lam_m, lam_p, h):
90
+ def kde(eig, xs, lam_m, lam_p, h, kernel='beta', plot=False):
45
91
  """
46
- Beta-kernel KDE with automatic guards against NaNs.
92
+ Kernel density estimation of eigenvalues.
47
93
 
48
94
  Parameters
49
95
  ----------
50
- eig : (n,) 1-D array of samples
51
- xs : evaluation grid (must lie within [lam_m, lam_p])
52
- lam_m, lam_p : float, support endpoints (lam_m < lam_p)
53
- h : bandwidth in rescaled units (0 < h < 1)
54
96
 
55
- Returns
56
- -------
57
- pdf : ndarray same length as xs
58
- """
97
+ eig : numpy.array
98
+ 1D array of samples of size `n`.
99
+
100
+ xs : numpy.array
101
+ 1D array of evaluation grid (must lie within ``[lam_m, lam_p]``)
102
+
103
+ lam_m : float
104
+ Lower end of the support endpoints with ``lam_m < lam_p``.
59
105
 
60
- span = lam_p - lam_m
61
- if span <= 0:
62
- raise ValueError("lam_p must be larger than lam_m")
106
+ lam_p : float
107
+ Upper end of the support endpoints with ``lam_m < lam_p``.
63
108
 
64
- # map samples and grid to [0, 1]
65
- u = (eig - lam_m) / span
66
- t = (xs - lam_m) / span
109
+ h : float
110
+ Kernel bandwidth in rescaled units where ``0 < h < 1``.
67
111
 
68
- if u.min() < 0 or u.max() > 1:
69
- mask = (u > 0) & (u < 1)
70
- u = u[mask]
112
+ kernel : {``'gaussian'``, ``'beta'``}, default= ``'beta'``
113
+ Kernel function using either Gaussian or Beta distribution.
71
114
 
72
- pdf = numpy.zeros_like(xs, dtype=float)
73
- n = len(u)
115
+ plot : bool, default=False
116
+ If `True`, the KDE is plotted.
74
117
 
75
- # tiny positive number to keep shape parameters >0
76
- eps = 1e-6
77
- for ui in u:
78
- a = max(ui / h + 1.0, eps)
79
- b = max((1.0 - ui) / h + 1.0, eps)
80
- pdf += beta.pdf(t, a, b)
118
+ Returns
119
+ -------
120
+
121
+ pdf : numpy.ndarray
122
+ Probability distribution function with the same length as ``xs``.
123
+
124
+ See Also
125
+ --------
126
+
127
+ freealg.supp
128
+ freealg.sample
129
+ """
81
130
 
82
- pdf /= n * span # renormalise
83
- pdf[(t < 0) | (t > 1)] = 0.0 # exact zeros outside
131
+ if kernel == 'gaussian':
132
+ pdf = gaussian_kde(eig, bw_method=h)(xs)
133
+
134
+ # Adaptive KDE
135
+ # k = KDEUnivariate(eig)
136
+ # k.fit(kernel='gau', bw='silverman', fft=False, weights=None,
137
+ # gridsize=1024, adaptive=True)
138
+ # pdf = k.evaluate(xs)
139
+
140
+ elif kernel == 'beta':
141
+
142
+ span = lam_p - lam_m
143
+ if span <= 0:
144
+ raise ValueError("lam_p must be larger than lam_m")
145
+
146
+ # map samples and grid to [0, 1]
147
+ u = (eig - lam_m) / span
148
+ t = (xs - lam_m) / span
149
+
150
+ if u.min() < 0 or u.max() > 1:
151
+ mask = (u > 0) & (u < 1)
152
+ u = u[mask]
153
+
154
+ pdf = numpy.zeros_like(xs, dtype=float)
155
+ n = len(u)
156
+
157
+ # tiny positive number to keep shape parameters > 0
158
+ eps = 1e-6
159
+ for ui in u:
160
+ a = max(ui / h + 1.0, eps)
161
+ b = max((1.0 - ui) / h + 1.0, eps)
162
+ pdf += beta.pdf(t, a, b)
163
+
164
+ pdf /= n * span # renormalise
165
+ pdf[(t < 0) | (t > 1)] = 0.0 # exact zeros outside
166
+
167
+ else:
168
+ raise NotImplementedError('"kernel" is invalid.')
169
+
170
+ if plot:
171
+ with texplot.theme(use_latex=False):
172
+ fig, ax = plt.subplots(figsize=(6, 4))
173
+
174
+ x_min = numpy.min(xs)
175
+ x_max = numpy.max(xs)
176
+ bins = numpy.linspace(x_min, x_max, _auto_bins(eig))
177
+ _ = ax.hist(eig, bins, density=True, color='silver',
178
+ edgecolor='none', label='Samples histogram')
179
+ ax.plot(xs, pdf, color='black', label='KDE')
180
+ ax.set_xlabel(r'$x$')
181
+ ax.set_ylabel(r'$\\rho(x)$')
182
+ ax.set_xlim([xs[0], xs[-1]])
183
+ ax.set_ylim(bottom=0)
184
+ ax.set_title('Kernel Density Estimation')
185
+ ax.legend(fontsize='x-small')
186
+ plt.show()
84
187
 
85
188
  return pdf
86
189
 
@@ -95,8 +198,8 @@ def force_density(psi0, support, density, grid, alpha=0.0, beta=0.0):
95
198
  min 0.5 ||psi - psi0||^2
96
199
  s.t. F_pos psi >= 0 (positivity on grid)
97
200
  psi[0] = psi0[0] (mass)
98
- f(lam_m)·psi = 0 (zero at left edge)
99
- f(lam_p)·psi = 0 (zero at right edge)
201
+ f(lam_m) psi = 0 (zero at left edge)
202
+ f(lam_p) psi = 0 (zero at right edge)
100
203
  """
101
204
 
102
205
  lam_m, lam_p = support
@@ -78,7 +78,7 @@ class KestenMcKay(object):
78
78
  ----------
79
79
 
80
80
  .. [1] Kesten, H. (1959). Symmetric random walks on groups. Transactions of
81
- the American Mathematical Society, 92(2), 336354.
81
+ the American Mathematical Society, 92(2), 336-354.
82
82
 
83
83
  .. [2] McKay, B. D. (1981). The expected eigenvalue distribution of a large
84
84
  regular graph. Linear Algebra and its Applications, 40, 203-216
@@ -290,7 +290,7 @@ class MarchenkoPastur(object):
290
290
  m1 = (-B + sqrtD) / (2 * A)
291
291
  m2 = (-B - sqrtD) / (2 * A)
292
292
 
293
- # pick correct branch only for nonmasked entries
293
+ # pick correct branch only for non-masked entries
294
294
  upper = z[not_mask].imag >= 0
295
295
  branch = numpy.empty_like(m1)
296
296
  branch[upper] = numpy.where(sign*m1[upper].imag > 0, m1[upper],
@@ -83,7 +83,7 @@ class Meixner(object):
83
83
 
84
84
  .. [1] Saitoh, N. & Yosnida, M. (2001). The infinite divisibility and
85
85
  orthogonal polynomials with a constant recursion formula in free
86
- probability theory. Probab. Math. Statist., 21, 159170.
86
+ probability theory. Probab. Math. Statist., 21, 159-170.
87
87
 
88
88
  Examples
89
89
  --------
@@ -315,7 +315,7 @@ class Meixner(object):
315
315
  m1 = (-B + sqrtD) / (2 * A)
316
316
  m2 = (-B - sqrtD) / (2 * A)
317
317
 
318
- # pick correct branch only for nonmasked entries
318
+ # pick correct branch only for non-masked entries
319
319
  upper = z.imag >= 0
320
320
  branch = numpy.empty_like(m1)
321
321
  branch[upper] = numpy.where(
@@ -290,7 +290,7 @@ class Wachter(object):
290
290
  m1 = (-B + sqrtD) / (2 * A)
291
291
  m2 = (-B - sqrtD) / (2 * A)
292
292
 
293
- # pick correct branch only for nonmasked entries
293
+ # pick correct branch only for non-masked entries
294
294
  upper = z.imag >= 0
295
295
  branch = numpy.empty_like(m1)
296
296
  branch[upper] = numpy.where(sign*m1[upper].imag > 0, m1[upper],
freealg/freeform.py CHANGED
@@ -12,10 +12,8 @@
12
12
  # =======
13
13
 
14
14
  import numpy
15
- from scipy.stats import gaussian_kde
16
- # from statsmodels.nonparametric.kde import KDEUnivariate
17
15
  from functools import partial
18
- from ._util import compute_eig, beta_kde, force_density
16
+ from ._util import resolve_complex_dtype, compute_eig, kde, force_density
19
17
  from ._jacobi import jacobi_sample_proj, jacobi_kernel_proj, jacobi_density, \
20
18
  jacobi_stieltjes
21
19
  from ._chebyshev import chebyshev_sample_proj, chebyshev_kernel_proj, \
@@ -25,8 +23,8 @@ from ._damp import jackson_damping, lanczos_damping, fejer_damping, \
25
23
  from ._plot_util import plot_fit, plot_density, plot_hilbert, plot_stieltjes
26
24
  from ._pade import fit_pade, eval_pade
27
25
  from ._decompress import decompress
28
- from ._sample import qmc_sample
29
- from ._support import detect_support
26
+ from ._sample import sample
27
+ from ._support import supp
30
28
 
31
29
  # Fallback to previous numpy API
32
30
  if not hasattr(numpy, 'trapezoid'):
@@ -59,8 +57,21 @@ class FreeForm(object):
59
57
  Size of perturbations into the upper half plane for Plemelj's
60
58
  formula.
61
59
 
60
+ dtype : {``'complex128'``, ``'complex256'``}, default = ``'complex128'``
61
+ Data type for inner computations of complex variables:
62
+
63
+ * ``'complex128'``: 128-bit complex numbers, equivalent of two double
64
+ precision floating point.
65
+ * ``'complex256'``: 256-bit complex numbers, equivalent of two long
66
+ double precision floating point. This optino is only available on
67
+ Linux machines.
68
+
69
+ When using series acceleration methods (such as setting
70
+ ``continuation`` in :func:`fit` function to ``wynn-eps``), setting a
71
+ higher precision floating point arithmetics might improve conference.
72
+
62
73
  **kwargs : dict, optional
63
- Parameters for the ``detect_support`` function can also be prescribed
74
+ Parameters for the :func:`supp` function can also be prescribed
64
75
  here when ``support=None``.
65
76
 
66
77
  Notes
@@ -135,7 +146,8 @@ class FreeForm(object):
135
146
  # init
136
147
  # ====
137
148
 
138
- def __init__(self, A, support=None, delta=1e-6, **kwargs):
149
+ def __init__(self, A, support=None, delta=1e-6, dtype='complex128',
150
+ **kwargs):
139
151
  """
140
152
  Initialization.
141
153
  """
@@ -144,6 +156,9 @@ class FreeForm(object):
144
156
  self.eig = None
145
157
  self.delta = delta # Offset above real axis to apply Plemelj formula
146
158
 
159
+ # Data type for complex arrays
160
+ self.dtype = resolve_complex_dtype(dtype)
161
+
147
162
  # Eigenvalues
148
163
  if A.ndim == 1:
149
164
  # When A is a 1D array, it is assumed A is the eigenvalue array.
@@ -160,7 +175,8 @@ class FreeForm(object):
160
175
 
161
176
  # Support
162
177
  if support is None:
163
- self.lam_m, self.lam_p = detect_support(self.eig, **kwargs)
178
+ # Detect support
179
+ self.lam_m, self.lam_p = supp(self.eig, **kwargs)
164
180
  else:
165
181
  self.lam_m = support[0]
166
182
  self.lam_p = support[1]
@@ -234,7 +250,7 @@ class FreeForm(object):
234
250
  If `True`, it forces the density to have unit mass and to be
235
251
  strictly positive.
236
252
 
237
- continuation : {``'pade'``, ``'wynn-eps'``, ``'wynn-rho'``,
253
+ continuation : {``'pade'``, ``'wynn-eps'``, ``'wynn-rho'``, \
238
254
  ``'levin'``, ``'weniger'``, ``'brezinski'``}, default= ``'pade'``
239
255
  Method of analytic continuation to construct the second branch of
240
256
  Steltjes transform in the lower-half complex plane:
@@ -242,10 +258,12 @@ class FreeForm(object):
242
258
  * ``'pade'``: using Riemann-Hilbert problem with Pade
243
259
  approximation.
244
260
  * ``'wynn-eps'``: Wynn's :math:`\\epsilon` algorithm.
245
- * ``'wynn-rho'``: Wynn's :math:`\\rho` algorithm.
246
- * ``'levin'``: Levin's :math:`u` transform.
247
- * ``'weniger'``: Weniger's :math:`\\delta^2` algorithm.
248
- * ``'brezinski'``: Brezinski's :math:`\\theta` algorithm.
261
+ * ``'wynn-rho'``: Wynn's :math:`\\rho` algorithm (`experimental`).
262
+ * ``'levin'``: Levin's :math:`u` transform (`experimental`).
263
+ * ``'weniger'``: Weniger's :math:`\\delta^2` algorithm
264
+ (`experimental`).
265
+ * ``'brezinski'``: Brezinski's :math:`\\theta` algorithm
266
+ (`experimental`).
249
267
 
250
268
  pade_p : int, default=0
251
269
  Degree of polynomial :math:`P(z)` is :math:`q+p` where :math:`p`
@@ -336,53 +354,35 @@ class FreeForm(object):
336
354
  if projection == 'sample':
337
355
  psi = jacobi_sample_proj(self.eig, support=self.support, K=K,
338
356
  alpha=alpha, beta=beta, reg=reg)
339
- else:
357
+ elif projection in ['gaussian', 'beta']:
340
358
  # smooth KDE on a fixed grid
341
359
  xs = numpy.linspace(self.lam_m, self.lam_p, 2000)
342
360
 
343
- if projection == 'gaussian':
344
- pdf = gaussian_kde(self.eig, bw_method=kernel_bw)(xs)
345
- else:
346
- pdf = beta_kde(self.eig, xs, self.lam_m, self.lam_p,
347
- kernel_bw)
348
-
349
- # Adaptive KDE
350
- # k = KDEUnivariate(self.eig)
351
- # k.fit(bw="silverman", fft=False, weights=None, gridsize=1024,
352
- # adaptive=True)
353
- # pdf = k.evaluate(xs)
354
-
355
- # import matplotlib.pyplot as plt
356
- # plt.plot(xs, pdf)
357
- # plt.grid(True)
358
- # plt.show()
361
+ pdf = kde(self.eig, xs, self.lam_m, self.lam_p, kernel_bw,
362
+ kernel=projection)
359
363
 
360
364
  psi = jacobi_kernel_proj(xs, pdf, support=self.support, K=K,
361
365
  alpha=alpha, beta=beta, reg=reg)
366
+ else:
367
+ raise NotImplementedError('"projection" is invalid.')
362
368
 
363
369
  elif method == 'chebyshev':
364
370
 
365
371
  if projection == 'sample':
366
372
  psi = chebyshev_sample_proj(self.eig, support=self.support,
367
373
  K=K, reg=reg)
368
- else:
374
+ elif projection in ['gaussian', 'beta']:
369
375
  # smooth KDE on a fixed grid
370
376
  xs = numpy.linspace(self.lam_m, self.lam_p, 2000)
371
377
 
372
- if projection == 'gaussian':
373
- pdf = gaussian_kde(self.eig, bw_method=kernel_bw)(xs)
374
- else:
375
- pdf = beta_kde(self.eig, xs, self.lam_m, self.lam_p,
376
- kernel_bw)
377
-
378
- # Adaptive KDE
379
- # k = KDEUnivariate(self.eig)
380
- # k.fit(bw="silverman", fft=False, weights=None, gridsize=1024,
381
- # adaptive=True)
382
- # pdf = k.evaluate(xs)
378
+ pdf = kde(self.eig, xs, self.lam_m, self.lam_p, kernel_bw,
379
+ kernel=projection)
383
380
 
384
381
  psi = chebyshev_kernel_proj(xs, pdf, support=self.support,
385
382
  K=K, reg=reg)
383
+ else:
384
+ raise NotImplementedError('"projection" is invalid.')
385
+
386
386
  else:
387
387
  raise NotImplementedError('"method" is invalid.')
388
388
 
@@ -639,8 +639,8 @@ class FreeForm(object):
639
639
  diff = x[:, None] - x_s[None, :]
640
640
  D = rho_s[None, :] / diff
641
641
 
642
- # Principalvalue: wherever t == x_i, then diff == 0, zero that entry
643
- # (numpy.isclose handles floatingpoint exactly)
642
+ # Principal-value: wherever t == x_i, then diff == 0, zero that entry
643
+ # (numpy.isclose handles floating-point exactly)
644
644
  D[numpy.isclose(diff, 0.0)] = 0.0
645
645
 
646
646
  # Integrate each row over t using trapezoid rule on x_s
@@ -825,13 +825,15 @@ class FreeForm(object):
825
825
  if self.method == 'jacobi':
826
826
  stieltjes = partial(jacobi_stieltjes, psi=self.psi,
827
827
  support=self.support, alpha=self.alpha,
828
- beta=self.beta, continuation=self.continuation)
828
+ beta=self.beta, continuation=self.continuation,
829
+ dtype=self.dtype)
829
830
  # n_base = n_base
830
831
 
831
832
  elif self.method == 'chebyshev':
832
833
  stieltjes = partial(chebyshev_stieltjes, psi=self.psi,
833
834
  support=self.support,
834
- continuation=self.continuation)
835
+ continuation=self.continuation,
836
+ dtype=self.dtype)
835
837
 
836
838
  mask_p = z.imag >= 0.0
837
839
  mask_m = z.imag < 0.0
@@ -1002,7 +1004,7 @@ class FreeForm(object):
1002
1004
  # Initial guess for roots (only for the first iteration)
1003
1005
  # if i == 0:
1004
1006
  # roots = numpy.full(x.shape, numpy.mean(self.support) - 0.1j,
1005
- # dtype=numpy.complex128)
1007
+ # dtype=self.dtype)
1006
1008
  roots = None
1007
1009
 
1008
1010
  rho[i, :], roots = decompress(
@@ -1081,7 +1083,7 @@ class FreeForm(object):
1081
1083
  size = self.n
1082
1084
 
1083
1085
  rho, x = self.decompress(size, **kwargs)
1084
- eigs = numpy.sort(qmc_sample(x, rho, size, seed=seed))
1086
+ eigs = numpy.sort(sample(x, rho, size, method='qmc', seed=seed))
1085
1087
 
1086
1088
  return eigs
1087
1089
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: freealg
3
- Version: 0.5.3
3
+ Version: 0.6.0
4
4
  Summary: Free probability for large matrices
5
5
  Home-page: https://github.com/ameli/freealg
6
6
  Download-URL: https://github.com/ameli/freealg/archive/main.zip
@@ -0,0 +1,26 @@
1
+ freealg/__init__.py,sha256=muuYCvlsXjuX1W67YGFca9nFxprFsALLyB3CrJpXFnY,728
2
+ freealg/__version__.py,sha256=cID1jLnC_vj48GgMN6Yb1FA3JsQ95zNmCHmRYE8TFhY,22
3
+ freealg/_chebyshev.py,sha256=1RqSKHgm6WPWbflBk4O0xaQbgven5MiucUnjuAMyUcs,6912
4
+ freealg/_damp.py,sha256=k2vtBtWOxQBf4qXaWu_En81lQBXbEO4QbxxWpvuVhdE,1802
5
+ freealg/_decompress.py,sha256=bFhQx--uptWJ7OjVwEs_tWYT6mLijBKJ9EbrD24Sbl0,32199
6
+ freealg/_jacobi.py,sha256=6oFY6aZpuosozP2PNEWd3zcWJIGHfeEF5ElFgtxT6z0,8064
7
+ freealg/_linalg.py,sha256=0BzJNTXiY1VH3OKrCFgbE0QHLgRoKyiILsBWtnygFGc,13141
8
+ freealg/_pade.py,sha256=BthDHScn2lILTTU2hlGNP-8YqddU3Uyxe0n0FkprwDs,13645
9
+ freealg/_plot_util.py,sha256=GKvmc1wjVGeqoomrULPbzBEt6P86FdoR2idBLYh5EDY,20068
10
+ freealg/_sample.py,sha256=yLJSGlq27j8tA-kDntRwfHIUU8Oo2IOmOTxS8yTRGRU,3075
11
+ freealg/_series.py,sha256=33LLCUe4svmV0eWyzhP_XClfDzccQHTW9WBJlYlLfHY,11475
12
+ freealg/_support.py,sha256=nxDa2OFlWBgjD0_1qoSMWG7kub6-GIuxIA04n5bdaYw,6614
13
+ freealg/_util.py,sha256=NaEhcOxbue44l_xAhefnNZYTy3pBBGBFyk9HdaRjQKo,6899
14
+ freealg/freeform.py,sha256=Grp1sdEy2QPagtkZunBixxykY8nTMdbV68t3B8WU-bQ,42679
15
+ freealg/distributions/__init__.py,sha256=t_yZyEkW_W_tSV9IvgYXtVASxD2BEdiNVXcV2ebMy8M,579
16
+ freealg/distributions/_kesten_mckay.py,sha256=BM_U8cX3eRstbAA4IZRK4qA_6S9zcogaXeuHyKXen14,19897
17
+ freealg/distributions/_marchenko_pastur.py,sha256=xwk40GwpLvEm9--FN7-T2NWtHTkfzcvOS4tFyrm71ww,16990
18
+ freealg/distributions/_meixner.py,sha256=8zmDnoCp-GOMnd6T2rKLQaMfn6uFmSnd-i5PLlfGOUM,17526
19
+ freealg/distributions/_wachter.py,sha256=d601xAaFSVGeK13SSDavjsJ5a-MJnI2mgzWiplX0Quk,16898
20
+ freealg/distributions/_wigner.py,sha256=w8OlZL9pSfGnXVSSB6A4KBiImr0Zz4iH2PDLCHFfpaY,15877
21
+ freealg-0.6.0.dist-info/licenses/AUTHORS.txt,sha256=0b67Nz4_JgIzUupHJTAZxu5QdSUM_HRM_X_w4xCb17o,30
22
+ freealg-0.6.0.dist-info/licenses/LICENSE.txt,sha256=J-EEYEtxb3VVf_Bn1TYfWnpY5lMFIM15iLDDcnaDTPA,1443
23
+ freealg-0.6.0.dist-info/METADATA,sha256=5iMEjHYzw0gEmpxLhLRBZMXW39OSIS-bwVK46ZtcmqY,5530
24
+ freealg-0.6.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
+ freealg-0.6.0.dist-info/top_level.txt,sha256=eR2wrgYwDdnnJ9Zf5PruPqe4kQav0GMvRsqct6y00Q8,8
26
+ freealg-0.6.0.dist-info/RECORD,,
@@ -1,26 +0,0 @@
1
- freealg/__init__.py,sha256=KfzqG7qig6ZZCKqgHcC2ApTg4rTLkrbJsvVoJd8UFG8,625
2
- freealg/__version__.py,sha256=tgzuqHKcEdKBaP57F5oXxq4XlW2n9J4Fj8ZGu7nGOZg,22
3
- freealg/_chebyshev.py,sha256=UHU9e9LyiTJcs_01h3oIbX8_QQ9wH0P8GRmA3phvWhU,6780
4
- freealg/_damp.py,sha256=k2vtBtWOxQBf4qXaWu_En81lQBXbEO4QbxxWpvuVhdE,1802
5
- freealg/_decompress.py,sha256=RKYnuLKMx1V6-XrFjYX1UkGMRbUD9Gmf9XTyS4ffPSc,31970
6
- freealg/_jacobi.py,sha256=k1zYZtvoA3_gcanqepIVSoA0RJrExtluQzDamkRz_Uk,7570
7
- freealg/_linalg.py,sha256=khfRwdQIDtf45aOIq3QSeihAEtUCL-HgwEYtEs0YELc,13137
8
- freealg/_pade.py,sha256=Ecm373RvBWznAaf8a5D9qtjcRA8O_8NQsePwto-dsP4,13656
9
- freealg/_plot_util.py,sha256=GKvmc1wjVGeqoomrULPbzBEt6P86FdoR2idBLYh5EDY,20068
10
- freealg/_sample.py,sha256=MUY9ZkJbdhbC5thoy5_JMRWOHz_OfqMfaH0g2VhKPhg,2550
11
- freealg/_series.py,sha256=MV8yDHentc06aqPuPlqrIQUNgG0-WiviD8ZWMmLWC0I,11490
12
- freealg/_support.py,sha256=ZfU0T7CCDaTXMdEHSbwOL-rMnCPvJWsRb6JtqUMsrzY,6441
13
- freealg/_util.py,sha256=xKhtPgbW4vHidNAG6F5c5aQhTaG4MmaUqCsxwjBznfU,3786
14
- freealg/freeform.py,sha256=i09DcRikLL6dlsu4JzoC1AQ7EHTyacx2fgyEz3hFakI,42432
15
- freealg/distributions/__init__.py,sha256=t_yZyEkW_W_tSV9IvgYXtVASxD2BEdiNVXcV2ebMy8M,579
16
- freealg/distributions/_kesten_mckay.py,sha256=4EvW0seBZUv-cZjuLzfVCredmrCiOJcQ_hJM0mJDl6g,19899
17
- freealg/distributions/_marchenko_pastur.py,sha256=r-jmz8Wq5vG0-x7rB0qvVemqvu0Oq6S-BN67pqH0uiw,16992
18
- freealg/distributions/_meixner.py,sha256=bZv-x4mwS4yjisKoYSZAffHXtvDEKEis-74BeAjK4_s,17530
19
- freealg/distributions/_wachter.py,sha256=rkAmrBLqD2zvpBh0F32aQBscjXv8earXDH9_BITgFF0,16900
20
- freealg/distributions/_wigner.py,sha256=w8OlZL9pSfGnXVSSB6A4KBiImr0Zz4iH2PDLCHFfpaY,15877
21
- freealg-0.5.3.dist-info/licenses/AUTHORS.txt,sha256=0b67Nz4_JgIzUupHJTAZxu5QdSUM_HRM_X_w4xCb17o,30
22
- freealg-0.5.3.dist-info/licenses/LICENSE.txt,sha256=J-EEYEtxb3VVf_Bn1TYfWnpY5lMFIM15iLDDcnaDTPA,1443
23
- freealg-0.5.3.dist-info/METADATA,sha256=p8NU7kItll4WbsBnavnrJQDOdvE5SxKsoQtEHLOL_1w,5530
24
- freealg-0.5.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
- freealg-0.5.3.dist-info/top_level.txt,sha256=eR2wrgYwDdnnJ9Zf5PruPqe4kQav0GMvRsqct6y00Q8,8
26
- freealg-0.5.3.dist-info/RECORD,,