freealg 0.6.1__tar.gz → 0.6.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {freealg-0.6.1 → freealg-0.6.2}/PKG-INFO +1 -1
  2. freealg-0.6.2/freealg/__version__.py +1 -0
  3. {freealg-0.6.1 → freealg-0.6.2}/freealg/_pade.py +33 -151
  4. {freealg-0.6.1 → freealg-0.6.2}/freealg/_sample.py +7 -2
  5. {freealg-0.6.1 → freealg-0.6.2}/freealg/_util.py +45 -13
  6. {freealg-0.6.1 → freealg-0.6.2}/freealg/distributions/_kesten_mckay.py +8 -2
  7. {freealg-0.6.1 → freealg-0.6.2}/freealg/distributions/_marchenko_pastur.py +8 -2
  8. {freealg-0.6.1 → freealg-0.6.2}/freealg/distributions/_meixner.py +28 -26
  9. {freealg-0.6.1 → freealg-0.6.2}/freealg/distributions/_wachter.py +8 -2
  10. {freealg-0.6.1 → freealg-0.6.2}/freealg/distributions/_wigner.py +8 -2
  11. {freealg-0.6.1 → freealg-0.6.2}/freealg/freeform.py +7 -6
  12. {freealg-0.6.1 → freealg-0.6.2}/freealg.egg-info/PKG-INFO +1 -1
  13. freealg-0.6.1/freealg/__version__.py +0 -1
  14. {freealg-0.6.1 → freealg-0.6.2}/AUTHORS.txt +0 -0
  15. {freealg-0.6.1 → freealg-0.6.2}/CHANGELOG.rst +0 -0
  16. {freealg-0.6.1 → freealg-0.6.2}/LICENSE.txt +0 -0
  17. {freealg-0.6.1 → freealg-0.6.2}/MANIFEST.in +0 -0
  18. {freealg-0.6.1 → freealg-0.6.2}/README.rst +0 -0
  19. {freealg-0.6.1 → freealg-0.6.2}/freealg/__init__.py +0 -0
  20. {freealg-0.6.1 → freealg-0.6.2}/freealg/_chebyshev.py +0 -0
  21. {freealg-0.6.1 → freealg-0.6.2}/freealg/_damp.py +0 -0
  22. {freealg-0.6.1 → freealg-0.6.2}/freealg/_decompress.py +0 -0
  23. {freealg-0.6.1 → freealg-0.6.2}/freealg/_jacobi.py +0 -0
  24. {freealg-0.6.1 → freealg-0.6.2}/freealg/_linalg.py +0 -0
  25. {freealg-0.6.1 → freealg-0.6.2}/freealg/_plot_util.py +0 -0
  26. {freealg-0.6.1 → freealg-0.6.2}/freealg/_series.py +0 -0
  27. {freealg-0.6.1 → freealg-0.6.2}/freealg/_support.py +0 -0
  28. {freealg-0.6.1 → freealg-0.6.2}/freealg/distributions/__init__.py +0 -0
  29. {freealg-0.6.1 → freealg-0.6.2}/freealg.egg-info/SOURCES.txt +0 -0
  30. {freealg-0.6.1 → freealg-0.6.2}/freealg.egg-info/dependency_links.txt +0 -0
  31. {freealg-0.6.1 → freealg-0.6.2}/freealg.egg-info/not-zip-safe +0 -0
  32. {freealg-0.6.1 → freealg-0.6.2}/freealg.egg-info/requires.txt +0 -0
  33. {freealg-0.6.1 → freealg-0.6.2}/freealg.egg-info/top_level.txt +0 -0
  34. {freealg-0.6.1 → freealg-0.6.2}/pyproject.toml +0 -0
  35. {freealg-0.6.1 → freealg-0.6.2}/requirements.txt +0 -0
  36. {freealg-0.6.1 → freealg-0.6.2}/setup.cfg +0 -0
  37. {freealg-0.6.1 → freealg-0.6.2}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: freealg
3
- Version: 0.6.1
3
+ Version: 0.6.2
4
4
  Summary: Free probability for large matrices
5
5
  Home-page: https://github.com/ameli/freealg
6
6
  Download-URL: https://github.com/ameli/freealg/archive/main.zip
@@ -0,0 +1 @@
1
+ __version__ = "0.6.2"
@@ -108,32 +108,35 @@ def _decode_poles(s, lam_m, lam_p):
108
108
  # inner ls
109
109
  # ========
110
110
 
111
- def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
111
+ def _inner_ls(x, f, poles, dpq=1, pade_reg=0.0):
112
112
  """
113
113
  This is the inner least square (blazing fast).
114
+
115
+ dqp is the difference between the order of P (numerator) and Q
116
+ (denominator).
114
117
  """
115
118
 
116
- if poles.size == 0 and p == -1:
119
+ if poles.size == 0 and dpq == -1:
117
120
  return 0.0, 0.0, numpy.empty(0)
118
121
 
119
122
  if poles.size == 0: # q = 0
120
123
  # A = numpy.column_stack((numpy.ones_like(x), x))
121
- cols = [numpy.ones_like(x)] if p >= 0 else []
122
- if p == 1:
124
+ cols = [numpy.ones_like(x)] if dpq >= 0 else []
125
+ if dpq == 1:
123
126
  cols.append(x)
124
127
  A = numpy.column_stack(cols)
125
128
  # ---
126
129
  theta, *_ = lstsq(A, f, rcond=None)
127
130
  # c, D = theta # TEST
128
- if p == -1:
131
+ if dpq == -1:
129
132
  c = 0.0
130
133
  D = 0.0
131
134
  resid = numpy.empty(0)
132
- elif p == 0:
135
+ elif dpq == 0:
133
136
  c = theta[0]
134
137
  D = 0.0
135
138
  resid = numpy.empty(0)
136
- else: # p == 1
139
+ else: # dpq == 1
137
140
  c, D = theta
138
141
  resid = numpy.empty(0)
139
142
  else:
@@ -142,28 +145,28 @@ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
142
145
  # # theta, *_ = lstsq(A, f, rcond=None)
143
146
  # # c, D, resid = theta[0], theta[1], theta[2:]
144
147
  # phi = 1.0 / (x[:, None] - poles[None, :])
145
- # cols = [numpy.ones_like(x)] if p >= 0 else []
146
- # if p == 1:
148
+ # cols = [numpy.ones_like(x)] if dpq >= 0 else []
149
+ # if dpq == 1:
147
150
  # cols.append(x)
148
151
  # cols.append(phi)
149
152
  # A = numpy.column_stack(cols)
150
153
  # theta, *_ = lstsq(A, f, rcond=None)
151
- # if p == -1:
154
+ # if dpq == -1:
152
155
  # c = 0.0
153
156
  # D = 0.0
154
157
  # resid = theta
155
- # elif p == 0:
158
+ # elif dpq == 0:
156
159
  # c = theta[0]
157
160
  # D = 0.0
158
161
  # resid = theta[1:]
159
- # else: # p == 1
162
+ # else: # dpq == 1
160
163
  # c = theta[0]
161
164
  # D = theta[1]
162
165
  # resid = theta[2:]
163
166
 
164
167
  phi = 1.0 / (x[:, None] - poles[None, :])
165
- cols = [numpy.ones_like(x)] if p >= 0 else []
166
- if p == 1:
168
+ cols = [numpy.ones_like(x)] if dpq >= 0 else []
169
+ if dpq == 1:
167
170
  cols.append(x)
168
171
  cols.append(phi)
169
172
 
@@ -179,9 +182,9 @@ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
179
182
  # theta = numpy.linalg.solve(ATA, ATf)
180
183
 
181
184
  # figure out how many elements to skip
182
- if p == 1:
185
+ if dpq == 1:
183
186
  skip = 2 # skip c and D
184
- elif p == 0:
187
+ elif dpq == 0:
185
188
  skip = 1 # skip c only
186
189
  else:
187
190
  skip = 0 # all entries are residues
@@ -198,11 +201,11 @@ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
198
201
  else:
199
202
  theta, *_ = lstsq(A, f, rcond=None)
200
203
 
201
- if p == -1:
204
+ if dpq == -1:
202
205
  c, D, resid = 0.0, 0.0, theta
203
- elif p == 0:
206
+ elif dpq == 0:
204
207
  c, D, resid = theta[0], 0.0, theta[1:]
205
- else: # p == 1
208
+ else: # dpq == 1
206
209
  c, D, resid = theta[0], theta[1], theta[2:]
207
210
 
208
211
  return c, D, resid
@@ -240,7 +243,7 @@ def _eval_rational(z, c, D, poles, resid):
240
243
  # fit pade
241
244
  # ========
242
245
 
243
- def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
246
+ def fit_pade(x, f, lam_m, lam_p, p=2, q=2, odd_side='left', pade_reg=0.0,
244
247
  safety=1.0, max_outer=40, xtol=1e-12, ftol=1e-12, optimizer='ls',
245
248
  verbose=0):
246
249
  """
@@ -251,16 +254,19 @@ def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
251
254
  if not (odd_side in ['left', 'right']):
252
255
  raise ValueError('"odd_side" can only be "left" or "right".')
253
256
 
254
- if not (p in [-1, 0, 1]):
255
- raise ValueError('"pade_p" can only be -1, 0, or 1.')
257
+ # Difference between the degrees of numerator and denominator
258
+ dpq = p - q
259
+ if not (dpq in [-1, 0, 1]):
260
+ raise ValueError('"pade_p" and "pade_q" can only differ by "+1", ' +
261
+ '"0", or "-1".')
256
262
 
257
263
  x = numpy.asarray(x, float)
258
264
  f = numpy.asarray(f, float)
259
265
 
260
266
  poles0 = _default_poles(q, lam_m, lam_p, safety=safety, odd_side=odd_side)
261
- if q == 0 and p <= 0:
267
+ if q == 0 and dpq <= 0:
262
268
  # c, D, resid = _inner_ls(x, f, poles0, pade_reg=pade_reg) # TEST
263
- c, D, resid = _inner_ls(x, f, poles0, p, pade_reg=pade_reg)
269
+ c, D, resid = _inner_ls(x, f, poles0, dpq, pade_reg=pade_reg)
264
270
  pade_sol = {
265
271
  'c': c, 'D': D, 'poles': poles0, 'resid': resid,
266
272
  'outer_iters': 0
@@ -274,10 +280,10 @@ def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
274
280
  # residual
275
281
  # --------
276
282
 
277
- def residual(s, p=p):
283
+ def residual(s, dpq=dpq):
278
284
  poles = _decode_poles(s, lam_m, lam_p)
279
285
  # c, D, resid = _inner_ls(x, f, poles, pade_reg=pade_reg) # TEST
280
- c, D, resid = _inner_ls(x, f, poles, p, pade_reg=pade_reg)
286
+ c, D, resid = _inner_ls(x, f, poles, dpq, pade_reg=pade_reg)
281
287
  return _eval_rational(x, c, D, poles, resid) - f
282
288
 
283
289
  # ----------------
@@ -324,7 +330,7 @@ def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
324
330
 
325
331
  poles = _decode_poles(res.x, lam_m, lam_p)
326
332
  # c, D, resid = _inner_ls(x, f, poles, pade_reg=pade_reg) # TEST
327
- c, D, resid = _inner_ls(x, f, poles, p, pade_reg=pade_reg)
333
+ c, D, resid = _inner_ls(x, f, poles, dpq, pade_reg=pade_reg)
328
334
 
329
335
  pade_sol = {
330
336
  'c': c, 'D': D, 'poles': poles, 'resid': resid,
@@ -364,127 +370,3 @@ def eval_pade(z, pade_sol):
364
370
  for bj, rj in zip(poles, resid):
365
371
  out += rj/(z - bj) # each is an (N,) op, no N*q temp
366
372
  return out
367
-
368
-
369
- # ============
370
- # fit pade old
371
- # ============
372
-
373
- def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
374
- S=numpy.inf, B_default=10.0, S_factor=2.0, maxiter_de=200):
375
- """
376
- Deprecated.
377
-
378
- Fit a [p/q] rational P/Q of the form:
379
- P(x) = s * prod_{i=0..p-1}(x - a_i)
380
- Q(x) = prod_{j=0..q-1}(x - b_j)
381
-
382
- Constraints:
383
- a_i in [lam_m, lam_p]
384
- b_j in (-infty, lam_m - delta] cup [lam_p + delta, infty)
385
-
386
- Approach:
387
- - Brute-force all 2^q left/right assignments for denominator roots
388
- - Global search with differential_evolution, fallback to zeros if needed
389
- - Local refinement with least_squares
390
-
391
- Returns a dict with keys:
392
- 's' : optimal scale factor
393
- 'a' : array of p numerator roots (in [lam_m, lam_p])
394
- 'b' : array of q denominator roots (outside the interval)
395
- 'resid' : final residual norm
396
- 'signs' : tuple indicating left/right pattern for each b_j
397
- """
398
-
399
- # Determine finite bounds for DE
400
- if not numpy.isfinite(B):
401
- B_eff = B_default
402
- else:
403
- B_eff = B
404
- if not numpy.isfinite(S):
405
- # scale bound: S_factor * max|f| * interval width + safety
406
- S_eff = S_factor * numpy.max(numpy.abs(f)) * (lam_p - lam_m) + 1.0
407
- if S_eff <= 0:
408
- S_eff = 1.0
409
- else:
410
- S_eff = S
411
-
412
- def map_roots(signs, b):
413
- """Map unconstrained b_j -> real root outside the interval."""
414
- out = numpy.empty_like(b)
415
- for j, (s_val, bj) in enumerate(zip(signs, b)):
416
- if s_val > 0:
417
- out[j] = lam_p + delta + numpy.exp(bj)
418
- else:
419
- out[j] = lam_m - delta - numpy.exp(bj)
420
- return out
421
-
422
- best = {'resid': numpy.inf}
423
-
424
- # Enumerate all left/right sign patterns
425
- for signs in product([-1, 1], repeat=q):
426
- # Residual vector for current pattern
427
- def resid_vec(z):
428
- s_val = z[0]
429
- a = z[1:1+p]
430
- b = z[1+p:]
431
- P = s_val * numpy.prod(x[:, None] - a[None, :], axis=1)
432
- roots_Q = map_roots(signs, b)
433
- Q = numpy.prod(x[:, None] - roots_Q[None, :], axis=1)
434
- return P - f * Q
435
-
436
- def obj(z):
437
- r = resid_vec(z)
438
- return r.dot(r)
439
-
440
- # Build bounds for DE
441
- bounds = []
442
- bounds.append((-S_eff, S_eff)) # s
443
- bounds += [(lam_m, lam_p)] * p # a_i
444
- bounds += [(-B_eff, B_eff)] * q # b_j
445
-
446
- # 1) Global search
447
- try:
448
- de = differential_evolution(obj, bounds,
449
- maxiter=maxiter_de,
450
- polish=False)
451
- z0 = de.x
452
- except ValueError:
453
- # fallback: start at zeros
454
- z0 = numpy.zeros(1 + p + q)
455
-
456
- # 2) Local refinement
457
- ls = least_squares(resid_vec, z0, xtol=1e-12, ftol=1e-12)
458
-
459
- rnorm = numpy.linalg.norm(resid_vec(ls.x))
460
- if rnorm < best['resid']:
461
- best.update(resid=rnorm, signs=signs, x=ls.x.copy())
462
-
463
- # Unpack best solution
464
- z_best = best['x']
465
- s_opt = z_best[0]
466
- a_opt = z_best[1:1+p]
467
- b_opt = map_roots(best['signs'], z_best[1+p:])
468
-
469
- return {
470
- 's': s_opt,
471
- 'a': a_opt,
472
- 'b': b_opt,
473
- 'resid': best['resid'],
474
- 'signs': best['signs'],
475
- }
476
-
477
-
478
- # =============
479
- # eval pade old
480
- # =============
481
-
482
- def eval_pade_old(z, s, a, b):
483
- """
484
- Deprecated.
485
- """
486
-
487
- Pz = s * numpy.prod([z - aj for aj in a], axis=0)
488
- Qz = numpy.prod([z - bj for bj in b], axis=0)
489
-
490
- return Pz / Qz
@@ -113,9 +113,14 @@ def sample(x, rho, num_pts, method='qmc', seed=None):
113
113
  # Draw from uniform distribution
114
114
  if method == 'mc':
115
115
  u = rng.random(num_pts)
116
+
116
117
  elif method == 'qmc':
117
- engine = qmc.Halton(d=1, rng=rng)
118
- u = engine.random(num_pts)
118
+ try:
119
+ engine = qmc.Halton(d=1, scramble=True, rng=rng)
120
+ except TypeError:
121
+ engine = qmc.Halton(d=1, scramble=True, seed=rng)
122
+ u = engine.random(num_pts).ravel()
123
+
119
124
  else:
120
125
  raise NotImplementedError('"method" is invalid.')
121
126
 
@@ -126,6 +126,27 @@ def kde(eig, xs, lam_m, lam_p, h, kernel='beta', plot=False):
126
126
 
127
127
  freealg.supp
128
128
  freealg.sample
129
+
130
+ Notes
131
+ -----
132
+
133
+ In Beta kernel density estimation, the shape parameters "a" and "b" of the
134
+ Beta(a, b)) distribution are computed for each data point "u" as:
135
+
136
+ a = (u / h) + 1.0
137
+ b = ((1.0 - u) / h) + 1.0
138
+
139
+ This is a standard way of using Beta kernel (see R-package documentation:
140
+ https://search.r-project.org/CRAN/refmans/DELTD/html/Beta.html
141
+
142
+ These equations are derived from "moment matching" method, where
143
+
144
+ Mean(Beta(a,b)) = u
145
+ Var(Beta(a,b)) = (1-u) u h
146
+
147
+ Solving these two equations for "a" and "b" yields the relations above.
148
+ See paper (page 134)
149
+ https://www.songxichen.com/Uploads/Files/Publication/Chen-CSD-99.pdf
129
150
  """
130
151
 
131
152
  if kernel == 'gaussian':
@@ -141,28 +162,39 @@ def kde(eig, xs, lam_m, lam_p, h, kernel='beta', plot=False):
141
162
 
142
163
  span = lam_p - lam_m
143
164
  if span <= 0:
144
- raise ValueError("lam_p must be larger than lam_m")
165
+ raise ValueError('"lam_p" must be larger than "lam_m".')
145
166
 
146
167
  # map samples and grid to [0, 1]
147
168
  u = (eig - lam_m) / span
148
169
  t = (xs - lam_m) / span
149
170
 
150
- if u.min() < 0 or u.max() > 1:
151
- mask = (u > 0) & (u < 1)
152
- u = u[mask]
171
+ # keep only samples strictly inside (0,1)
172
+ if (u.min() < 0) or (u.max() > 1):
173
+ u = u[(u > 0) & (u < 1)]
174
+
175
+ n = u.size
176
+ if n == 0:
177
+ return numpy.zeros_like(xs, dtype=float)
153
178
 
154
- pdf = numpy.zeros_like(xs, dtype=float)
155
- n = len(u)
179
+ # Shape parameters "a" and "b" or the kernel Beta(a, b), which is
180
+ # computed for each data point "u" (see notes above). These are
181
+ # vectorized.
182
+ a = (u / h) + 1.0
183
+ b = ((1.0 - u) / h) + 1.0
156
184
 
157
- # tiny positive number to keep shape parameters > 0
185
+ # # tiny positive number to keep shape parameters > 0
158
186
  eps = 1e-6
159
- for ui in u:
160
- a = max(ui / h + 1.0, eps)
161
- b = max((1.0 - ui) / h + 1.0, eps)
162
- pdf += beta.pdf(t, a, b)
187
+ a = numpy.clip(a, eps, None)
188
+ b = numpy.clip(b, eps, None)
189
+
190
+ # Beta kernel
191
+ pdf_matrix = beta.pdf(t[None, :], a[:, None], b[:, None])
192
+
193
+ # Average and re-normalize back to x variable
194
+ pdf = pdf_matrix.sum(axis=0) / (n * span)
163
195
 
164
- pdf /= n * span # renormalise
165
- pdf[(t < 0) | (t > 1)] = 0.0 # exact zeros outside
196
+ # Exact zeros outside [lam_m, lam_p]
197
+ pdf[(t < 0) | (t > 1)] = 0.0
166
198
 
167
199
  else:
168
200
  raise NotImplementedError('"kernel" is invalid.')
@@ -526,9 +526,15 @@ class KestenMcKay(object):
526
526
  # Draw from uniform distribution
527
527
  if method == 'mc':
528
528
  u = rng.random(size)
529
+
529
530
  elif method == 'qmc':
530
- engine = qmc.Halton(d=1, rng=rng)
531
- u = engine.random(size)
531
+ try:
532
+ engine = qmc.Halton(d=1, scramble=True, rng=rng)
533
+ except TypeError:
534
+ # Older scipy versions
535
+ engine = qmc.Halton(d=1, scramble=True, seed=rng)
536
+ u = engine.random(size).ravel()
537
+
532
538
  else:
533
539
  raise NotImplementedError('"method" is invalid.')
534
540
 
@@ -533,9 +533,15 @@ class MarchenkoPastur(object):
533
533
  # Draw from uniform distribution
534
534
  if method == 'mc':
535
535
  u = rng.random(size)
536
+
536
537
  elif method == 'qmc':
537
- engine = qmc.Halton(d=1, rng=rng)
538
- u = engine.random(size)
538
+ try:
539
+ engine = qmc.Halton(d=1, scramble=True, rng=rng)
540
+ except TypeError:
541
+ # Older scipy versions
542
+ engine = qmc.Halton(d=1, scramble=True, seed=rng)
543
+ u = engine.random(size).ravel()
544
+
539
545
  else:
540
546
  raise NotImplementedError('"method" is invalid.')
541
547
 
@@ -177,18 +177,12 @@ class Meixner(object):
177
177
  rho = numpy.zeros_like(x)
178
178
  mask = numpy.logical_and(x > self.lam_m, x < self.lam_p)
179
179
 
180
- # rho[mask] = \
181
- # numpy.sqrt(4.0 * (1.0 + self.b) - (x[mask] - self.a)**2) / \
182
- # (2.0 * numpy.pi * (self.b * x[mask]**2 + self.a * x[mask] + 1))
183
-
184
180
  numer = numpy.zeros_like(x)
185
181
  denom = numpy.ones_like(x)
186
182
  numer[mask] = self.c * numpy.sqrt(4.0 * self.b - (x[mask] - self.a)**2)
187
- denom[mask] = (1 - self.c)*(x[mask] - self.a)**2
188
- denom[mask] += self.a * (2 - self.c)*(x[mask] - self.a)
189
- denom[mask] += self.a**2 + self.b * self.c**2
190
- denom[mask] *= 2 * numpy.pi
191
-
183
+ denom[mask] = 2.0 * numpy.pi * (
184
+ (1.0 - self.c) * x[mask]**2 + self.a * self.c * x[mask] +
185
+ self.b * self.c**2)
192
186
  rho[mask] = numer[mask] / denom[mask]
193
187
 
194
188
  if plot:
@@ -260,14 +254,14 @@ class Meixner(object):
260
254
  def _P(x):
261
255
  # denom = 1.0 + self.b
262
256
  # return ((1.0 + 2.0 * self.b) * x + self.a) / denom
263
- P = ((self.c - 2.0) * x - self.a * self.c) / 2.0
257
+ P = (self.c - 2.0) * x - self.a * self.c
264
258
  return P
265
259
 
266
260
  def _Q(x):
267
261
  # denom = 1.0 + self.b
268
262
  # return (self.b * x**2 + self.a * x + 1.0) / denom
269
- Q = ((1.0 - self.c) * x**2 + self.a * self.c * x +
270
- self.b * self.c**2) / 4.0
263
+ Q = (1.0 - self.c) * x**2 + self.a * self.c * x + \
264
+ self.b * self.c**2
271
265
  return Q
272
266
 
273
267
  P = _P(x)
@@ -277,9 +271,6 @@ class Meixner(object):
277
271
  sign = numpy.sign(P)
278
272
  hilb = (P - sign * Delta) / (2.0 * Q)
279
273
 
280
- # using negative sign convention
281
- hilb = -hilb
282
-
283
274
  if plot:
284
275
  plot_hilbert(x, hilb, support=self.support, latex=latex, save=save)
285
276
 
@@ -299,21 +290,26 @@ class Meixner(object):
299
290
  # denom = 1.0 + self.b
300
291
  # A = (self.b * z**2 + self.a * z + 1.0) / denom
301
292
  # B = ((1.0 + 2.0 * self.b) * z + self.a) / denom
302
- A = ((1.0 - self.c) * z**2 + self.a * self.c * z +
303
- self.b * self.c**2) / 4.0
304
- B = ((self.c - 2.0) * z - self.a * self.c) / 2.0
293
+ # A = ((1.0 - self.c) * z**2 + self.a * self.c * z +
294
+ # self.b * self.c**2) / 4.0
295
+ # B = ((self.c - 2.0) * z - self.a * self.c) / 2.0
296
+
297
+ Q = (1.0 - self.c) * z**2 + self.a * self.c * z + \
298
+ self.b * self.c**2
299
+ P = (self.c - 2.0) * z - self.a * self.c
305
300
 
306
301
  # D = B**2 - 4 * A
307
302
  # sqrtD = numpy.sqrt(D)
308
303
 
309
304
  # Avoid numpy picking the wrong branch
310
- d = 2 * numpy.sqrt(1.0 + self.b)
311
- r_min = self.a - d
312
- r_max = self.a + d
313
- sqrtD = numpy.sqrt(z - r_min) * numpy.sqrt(z - r_max)
305
+ # d = 2 * numpy.sqrt(1.0 + self.b)
306
+ # r_min = self.a - d
307
+ # r_max = self.a + d
308
+ # sqrtD = numpy.sqrt(z - r_min) * numpy.sqrt(z - r_max)
309
+ sqrtD = numpy.sqrt(P**2 - 4.0 * Q)
314
310
 
315
- m1 = (-B + sqrtD) / (2 * A)
316
- m2 = (-B - sqrtD) / (2 * A)
311
+ m1 = (P + sqrtD) / (2 * Q)
312
+ m2 = (P - sqrtD) / (2 * Q)
317
313
 
318
314
  # pick correct branch only for non-masked entries
319
315
  upper = z.imag >= 0
@@ -558,9 +554,15 @@ class Meixner(object):
558
554
  # Draw from uniform distribution
559
555
  if method == 'mc':
560
556
  u = rng.random(size)
557
+
561
558
  elif method == 'qmc':
562
- engine = qmc.Halton(d=1, rng=rng)
563
- u = engine.random(size)
559
+ try:
560
+ engine = qmc.Halton(d=1, scramble=True, rng=rng)
561
+ except TypeError:
562
+ # Older scipy versions
563
+ engine = qmc.Halton(d=1, scramble=True, seed=rng)
564
+ u = engine.random(size).ravel()
565
+
564
566
  else:
565
567
  raise NotImplementedError('"method" is invalid.')
566
568
 
@@ -533,9 +533,15 @@ class Wachter(object):
533
533
  # Draw from uniform distribution
534
534
  if method == 'mc':
535
535
  u = rng.random(size)
536
+
536
537
  elif method == 'qmc':
537
- engine = qmc.Halton(d=1, rng=rng)
538
- u = engine.random(size)
538
+ try:
539
+ engine = qmc.Halton(d=1, scramble=True, rng=rng)
540
+ except TypeError:
541
+ # Older scipy versions
542
+ engine = qmc.Halton(d=1, scramble=True, seed=rng)
543
+ u = engine.random(size).ravel()
544
+
539
545
  else:
540
546
  raise NotImplementedError('"method" is invalid.')
541
547
 
@@ -510,9 +510,15 @@ class Wigner(object):
510
510
  # Draw from uniform distribution
511
511
  if method == 'mc':
512
512
  u = rng.random(size)
513
+
513
514
  elif method == 'qmc':
514
- engine = qmc.Halton(d=1, rng=rng)
515
- u = engine.random(size)
515
+ try:
516
+ engine = qmc.Halton(d=1, scramble=True, rng=rng)
517
+ except TypeError:
518
+ # Older scipy versions
519
+ engine = qmc.Halton(d=1, scramble=True, seed=rng)
520
+ u = engine.random(size).ravel()
521
+
516
522
  else:
517
523
  raise NotImplementedError('"method" is invalid.')
518
524
 
@@ -200,7 +200,7 @@ class FreeForm(object):
200
200
 
201
201
  def fit(self, method='jacobi', K=10, alpha=0.0, beta=0.0, n_quad=60,
202
202
  reg=0.0, projection='gaussian', kernel_bw=0.001, damp=None,
203
- force=False, continuation='pade', pade_p=0, pade_q=1,
203
+ force=False, continuation='pade', pade_p=1, pade_q=1,
204
204
  odd_side='left', pade_reg=0.0, optimizer='ls', plot=False,
205
205
  latex=False, save=False):
206
206
  """
@@ -275,14 +275,15 @@ class FreeForm(object):
275
275
  * ``'brezinski'``: Brezinski's :math:`\\theta` algorithm
276
276
  (`experimental`).
277
277
 
278
- pade_p : int, default=0
279
- Degree of polynomial :math:`P(z)` is :math:`q+p` where :math:`p`
280
- can only be ``-1``, ``0``, or ``1``. See notes below. This option
278
+ pade_p : int, default=1
279
+ Degree of polynomial :math:`P(z)` is :math:`p` where :math:`p` can
280
+ only be ``q-1``, ``q``, or ``q+1``. See notes below. This option
281
281
  is applicable if ``continuation='pade'``.
282
282
 
283
283
  pade_q : int, default=1
284
- Degree of polynomial :math:`Q(z)` is :math:`q`. See notes below.
285
- This option is applicable if ``continuation='pade'``.
284
+ Degree of polynomial :math:`Q(z)` is :math:`q` where :math:`q` can
285
+ only be ``p-1``, ``p``, or ``p+1``. See notes below. This option
286
+ is applicable if ``continuation='pade'``.
286
287
 
287
288
  odd_side : {``'left'``, ``'right'``}, default= ``'left'``
288
289
  In case of odd number of poles (when :math:`q` is odd), the extra
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: freealg
3
- Version: 0.6.1
3
+ Version: 0.6.2
4
4
  Summary: Free probability for large matrices
5
5
  Home-page: https://github.com/ameli/freealg
6
6
  Download-URL: https://github.com/ameli/freealg/archive/main.zip
@@ -1 +0,0 @@
1
- __version__ = "0.6.1"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes