freealg 0.4.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
freealg/_linalg.py CHANGED
@@ -21,19 +21,20 @@ __all__ = ['eigvalsh', 'cond', 'norm', 'trace', 'slogdet']
21
21
  # subsample apply
22
22
  # ===============
23
23
 
24
- def _subsample_apply(f, A, output_array=False):
24
+ def _subsample_apply(f, A, output_array=False, seed=None):
25
25
  """
26
26
  Compute f(A_n) over subsamples A_n of A. If the output of
27
27
  f is an array (e.g. eigvals), specify output_array to be True.
28
28
  """
29
29
 
30
- if A.ndim != 2 or A.shape[0] != A.shape[1]:
30
+ if (A.ndim != 2) or (A.shape[0] != A.shape[1]):
31
31
  raise RuntimeError("Only square matrices are permitted.")
32
32
 
33
33
  n = A.shape[0]
34
34
 
35
35
  # Size of sample matrix
36
- n_s = int(80*(1 + numpy.log(n)))
36
+ n_s = int(80.0 * (1.0 + numpy.log(n)))
37
+
37
38
  # If matrix is not large enough, return eigenvalues
38
39
  if n < n_s:
39
40
  return f(A), n, n
@@ -43,14 +44,15 @@ def _subsample_apply(f, A, output_array=False):
43
44
 
44
45
  # Collect eigenvalue samples
45
46
  samples = []
47
+ rng = numpy.random.default_rng(seed=seed)
46
48
  for _ in range(num_samples):
47
- indices = numpy.random.choice(n, n_s, replace=False)
49
+ indices = rng.choice(n, n_s, replace=False)
48
50
  samples.append(f(A[numpy.ix_(indices, indices)]))
49
51
 
50
52
  if output_array:
51
- return numpy.concatenate(samples).ravel(), n, n_s
52
-
53
- return numpy.array(samples), n, n_s
53
+ return numpy.concatenate(samples), n, n_s
54
+ else:
55
+ return numpy.array(samples), n, n_s
54
56
 
55
57
 
56
58
  # ========
@@ -83,7 +85,8 @@ def eigvalsh(A, size=None, psd=None, seed=None, plot=False, **kwargs):
83
85
  if all sampled eigenvalues are positive.
84
86
 
85
87
  seed : int, default=None
86
- The seed for the Quasi-Monte Carlo sampler.
88
+ The seed for sampling rows/columns of matirx as well as the Quasi-Monte
89
+ Carlo sampler for eigenvalues from density.
87
90
 
88
91
  plot : bool, default=False
89
92
  Print out all relevant plots for diagnosing eigenvalue accuracy.
@@ -130,7 +133,8 @@ def eigvalsh(A, size=None, psd=None, seed=None, plot=False, **kwargs):
130
133
  >>> eigs = eigvalsh(A)
131
134
  """
132
135
 
133
- samples, n, n_s = _subsample_apply(compute_eig, A, output_array=True)
136
+ samples, n, n_s = _subsample_apply(compute_eig, A, output_array=True,
137
+ seed=seed)
134
138
 
135
139
  if size is None:
136
140
  size = n
@@ -140,18 +144,28 @@ def eigvalsh(A, size=None, psd=None, seed=None, plot=False, **kwargs):
140
144
  psd = samples.min() > 0
141
145
 
142
146
  ff = FreeForm(samples)
147
+
143
148
  # Since we are resampling, we need to provide the correct matrix size
144
149
  ff.n = n_s
145
150
 
146
151
  # Perform fit and estimate eigenvalues
147
152
  order = 1 + int(len(samples)**0.2)
148
153
  ff.fit(method='chebyshev', K=order, projection='sample',
149
- force=True, plot=False, latex=False, save=False)
154
+ continuation='wynn', force=True, plot=False, latex=False,
155
+ save=False)
150
156
 
151
157
  if plot:
152
158
  ff.density(plot=True)
153
159
  ff.stieltjes(plot=True)
154
160
 
161
+ # Sampling method using Pade seems to need a lower tolerance to properly
162
+ # work. Here we set defaults unless user provides otherwise. Note that the
163
+ # default of tolerance in ff._decompress is much larger (1e-4) for other
164
+ # methods (Newton, and non-sampling projections such as Gaussian and beta)
165
+ # to work properly.
166
+ kwargs.setdefault('tolerance', 1e-9)
167
+ kwargs.setdefault('method', 'secant')
168
+
155
169
  eigs = ff.eigvalsh(size, seed=seed, plot=plot, **kwargs)
156
170
 
157
171
  if psd:
freealg/_pade.py CHANGED
@@ -12,7 +12,6 @@
12
12
  # =======
13
13
 
14
14
  import numpy
15
- import numba
16
15
  from numpy.linalg import lstsq
17
16
  from itertools import product
18
17
  from scipy.optimize import least_squares, differential_evolution
@@ -237,64 +236,6 @@ def _eval_rational(z, c, D, poles, resid):
237
236
  return c + D * z + term
238
237
 
239
238
 
240
- # =========
241
- # Wynn pade
242
- # =========
243
-
244
- @numba.jit(nopython=True, parallel=True)
245
- def wynn_pade(coeffs, x):
246
- """
247
- Given the coefficients of a power series
248
- f(x) = sum_{n=0}^∞ coeffs[n] * x^n,
249
- returns a function handle that computes the Pade approximant at any x
250
- using Wynn's epsilon algorithm.
251
-
252
- Parameters
253
- ----------
254
-
255
- coeffs (list or array):
256
- Coefficients [a0, a1, a2, ...] of the power series.
257
-
258
- Returns
259
- -------
260
-
261
- function:
262
- A function approximant(x) that returns the approximated value f(x).
263
- """
264
-
265
- # Number of coefficients
266
- xn = x.ravel()
267
- d = len(xn)
268
- N = len(coeffs)
269
-
270
- # Compute the partial sums s_n = sum_{i=0}^n a_i * x^i for n=0,...,N-1
271
- eps = numpy.zeros((N+1, N, d), dtype=numpy.complex128)
272
- for i in numba.prange(d):
273
- partial_sum = 0.0
274
- for n in range(N):
275
- partial_sum += coeffs[n] * (xn[i] ** n)
276
- eps[0, n, i] = partial_sum
277
-
278
- for i in numba.prange(d):
279
- for k in range(1, N+1):
280
- for j in range(N - k):
281
- delta = eps[k-1, j+1, i] - eps[k-1, j, i]
282
- if delta == 0:
283
- rec_delta = numpy.inf
284
- elif numpy.isinf(delta) or numpy.isnan(delta):
285
- rec_delta = 0.0
286
- else:
287
- rec_delta = 1.0 / delta
288
- eps[k, j, i] = rec_delta
289
- if k > 1:
290
- eps[k, j, i] += eps[k-2, j+1, i]
291
-
292
- if (N % 2) == 0:
293
- N -= 1
294
-
295
- return eps[N-1, 0, :].reshape(x.shape)
296
-
297
-
298
239
  # ========
299
240
  # fit pade
300
241
  # ========
@@ -432,6 +373,8 @@ def eval_pade(z, pade_sol):
432
373
  def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
433
374
  S=numpy.inf, B_default=10.0, S_factor=2.0, maxiter_de=200):
434
375
  """
376
+ Deprecated.
377
+
435
378
  Fit a [p/q] rational P/Q of the form:
436
379
  P(x) = s * prod_{i=0..p-1}(x - a_i)
437
380
  Q(x) = prod_{j=0..q-1}(x - b_j)
@@ -538,6 +481,7 @@ def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
538
481
 
539
482
  def eval_pade_old(z, s, a, b):
540
483
  """
484
+ Deprecated.
541
485
  """
542
486
 
543
487
  Pz = s * numpy.prod([z - aj for aj in a], axis=0)
freealg/_plot_util.py CHANGED
@@ -34,7 +34,17 @@ def plot_fit(psi, x_supp, g_supp, g_supp_approx, support, latex=False,
34
34
 
35
35
  with texplot.theme(use_latex=latex):
36
36
 
37
- fig, ax = plt.subplots(figsize=(9, 3), ncols=2)
37
+ if g_supp is None:
38
+ figsize = (4.5, 3)
39
+ ncols = 1
40
+ else:
41
+ figsize = (9, 3)
42
+ ncols = 2
43
+
44
+ fig, ax = plt.subplots(figsize=figsize, ncols=ncols)
45
+
46
+ if g_supp is None:
47
+ ax = [ax]
38
48
 
39
49
  # Plot psi
40
50
  n = numpy.arange(1, 1+psi.size)
@@ -46,22 +56,24 @@ def plot_fit(psi, x_supp, g_supp, g_supp_approx, support, latex=False,
46
56
  ax[0].set_yscale('log')
47
57
 
48
58
  # Plot pade
49
- lam_m, lam_p = support
50
- g_supp_min = numpy.min(g_supp)
51
- g_supp_max = numpy.max(g_supp)
52
- g_supp_dif = g_supp_max - g_supp_min
53
- g_min = g_supp_min - g_supp_dif * 1.1
54
- g_max = g_supp_max + g_supp_dif * 1.1
55
-
56
- ax[1].plot(x_supp, g_supp, color='firebrick',
57
- label=r'$2 \pi \times $ Hilbert Transform')
58
- ax[1].plot(x_supp, g_supp_approx, color='black', label='Pade estimate')
59
- ax[1].legend(fontsize='small')
60
- ax[1].set_xlim([lam_m, lam_p])
61
- ax[1].set_ylim([g_min, g_max])
62
- ax[1].set_title('Approximation of Glue Function')
63
- ax[1].set_xlabel(r'$x$')
64
- ax[1].set_ylabel(r'$G(x)$')
59
+ if g_supp is not None:
60
+ lam_m, lam_p = support
61
+ g_supp_min = numpy.min(g_supp)
62
+ g_supp_max = numpy.max(g_supp)
63
+ g_supp_dif = g_supp_max - g_supp_min
64
+ g_min = g_supp_min - g_supp_dif * 1.1
65
+ g_max = g_supp_max + g_supp_dif * 1.1
66
+
67
+ ax[1].plot(x_supp, g_supp, color='firebrick',
68
+ label=r'$2 \pi \times $ Hilbert Transform')
69
+ ax[1].plot(x_supp, g_supp_approx, color='black',
70
+ label='Pade estimate')
71
+ ax[1].legend(fontsize='small')
72
+ ax[1].set_xlim([lam_m, lam_p])
73
+ ax[1].set_ylim([g_min, g_max])
74
+ ax[1].set_title('Approximation of Glue Function')
75
+ ax[1].set_xlabel(r'$x$')
76
+ ax[1].set_ylabel(r'$G(x)$')
65
77
 
66
78
  plt.tight_layout()
67
79
 
@@ -129,7 +141,7 @@ def _auto_bins(array, method='scott', factor=5):
129
141
  num_bins = int(numpy.ceil(numpy.log2(len(array)) + 1))
130
142
 
131
143
  else:
132
- raise ValueError('"method" is invalid.')
144
+ raise NotImplementedError('"method" is invalid.')
133
145
 
134
146
  return num_bins * factor
135
147
 
freealg/_sample.py CHANGED
@@ -27,10 +27,12 @@ def _quantile_func(x, rho, clamp=1e-4, eps=1e-8):
27
27
  Construct a quantile function from evaluations of an estimated density
28
28
  on a grid (x, rho(x)).
29
29
  """
30
+
30
31
  rho_clamp = rho.copy()
31
32
  rho_clamp[rho < clamp] = eps
32
33
  cdf = cumulative_trapezoid(rho_clamp, x, initial=0)
33
34
  cdf /= cdf[-1]
35
+
34
36
  return PchipInterpolator(cdf, x, extrapolate=False)
35
37
 
36
38
 
@@ -45,6 +47,7 @@ def qmc_sample(x, rho, num_pts, seed=None):
45
47
 
46
48
  Parameters
47
49
  ----------
50
+
48
51
  x : numpy.array, shape (n,)
49
52
  Sorted abscissae at which the density has been evaluated.
50
53
 
@@ -72,23 +75,27 @@ def qmc_sample(x, rho, num_pts, seed=None):
72
75
 
73
76
  Examples
74
77
  --------
78
+
75
79
  .. code-block:: python
76
80
 
77
81
  >>> import numpy
78
- >>> from your_module import qmc_sample
82
+ >>> from freealg import qmc_sample
83
+
84
+ >>> # density of Beta(3,1) on [0,1]
79
85
  >>> x = numpy.linspace(0, 1, 200)
80
- >>> rho = 3 * x**2 # density of Beta(3,1) on [0,1]
86
+ >>> rho = 3 * x**2
87
+
81
88
  >>> samples = qmc_sample(x, rho, num_pts=1000)
82
89
  >>> assert samples.shape == (1000,)
90
+
83
91
  >>> # Empirical mean should be close to 3/4
84
92
  >>> numpy.allclose(samples.mean(), 0.75, atol=0.02)
85
93
  """
86
94
 
87
- if seed is not None:
88
- numpy.random.rand(seed)
89
-
95
+ rng = numpy.random.default_rng(seed)
90
96
  quantile = _quantile_func(x, rho)
91
- engine = qmc.Halton(d=1)
97
+ engine = qmc.Halton(d=1, rng=rng)
92
98
  u = engine.random(num_pts)
93
99
  samples = quantile(u)
100
+
94
101
  return samples.ravel()
freealg/_series.py ADDED
@@ -0,0 +1,123 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # SPDX-FileType: SOURCE
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify it under
5
+ # the terms of the license found in the LICENSE.txt file in the root directory
6
+ # of this source tree.
7
+
8
+
9
+ # =======
10
+ # Imports
11
+ # =======
12
+
13
+ import numpy
14
+
15
+ __all__ = ['partial_sum', 'wynn_epsilon']
16
+
17
+
18
+ # ===========
19
+ # partial sum
20
+ # ===========
21
+
22
+ def partial_sum(coeffs, x):
23
+ """
24
+ Compute partial sum:
25
+
26
+ .. math::
27
+
28
+ S_n(x) = \\sum_{n=0}^{N} coeffs[n] * x^n.
29
+
30
+ Parameters
31
+ ----------
32
+
33
+ coeffs : array_like
34
+ Coefficients [a0, a1, a2, ...] of the power series of the size N.
35
+
36
+ x : numpy.array
37
+ A flattened array of the size d.
38
+
39
+ Returns
40
+ -------
41
+
42
+ Sn : numpy.ndarray
43
+ Partial sums of the size (N, d), where the n-th row is the n-th
44
+ partial sum.
45
+ """
46
+
47
+ xn = x.ravel()
48
+ N = len(coeffs)
49
+ d = xn.size
50
+
51
+ # Forming partial sum via Horner method
52
+ Sn = numpy.zeros((N, d), dtype=x.dtype)
53
+ sum_ = numpy.zeros((d,), dtype=x.dtype)
54
+ pow_x = numpy.ones((d,), dtype=x.dtype)
55
+
56
+ for n in range(N):
57
+ sum_ += coeffs[n] * pow_x
58
+ Sn[n, :] = sum_
59
+
60
+ if n < N-1:
61
+ pow_x *= xn
62
+
63
+ return Sn
64
+
65
+
66
+ # ============
67
+ # wynn epsilon
68
+ # ============
69
+
70
+ def wynn_epsilon(Sn):
71
+ """
72
+ Accelerate conversion of a series using Wynn's epsilon algorithm.
73
+
74
+ Parameters
75
+ ----------
76
+
77
+ Sn : numpy.ndarray
78
+ A 2D array of the size (N, d), where N is the number of partial sums
79
+ and d is the vector size.
80
+
81
+ Returns
82
+ -------
83
+
84
+ S : numpy.array
85
+ A 1D array of the size (d,) which is the accelerated value of the
86
+ series at each vector element.
87
+ """
88
+
89
+ # N: number of partial sums, d: vector size
90
+ N, d = Sn.shape
91
+
92
+ eps = numpy.zeros((N+1, N, d), dtype=Sn.dtype)
93
+ eps[0, :, :] = Sn
94
+
95
+ tol = numpy.finfo(float).eps
96
+
97
+ # Wynn's epsilon algorithm
98
+ for k in range(1, N+1):
99
+ Nk = N - k
100
+
101
+ delta = eps[k-1, 1:N-k+1, :] - eps[k-1, :Nk, :]
102
+
103
+ # Reciprocal of delta
104
+ rec_delta = numpy.empty_like(delta)
105
+
106
+ # Avoid division by zero error
107
+ mask_inf = numpy.abs(delta) < tol
108
+ rec_delta[mask_inf] = numpy.inf
109
+ rec_delta[~mask_inf] = 1.0 / delta[~mask_inf]
110
+
111
+ mask_zero = numpy.logical_or(numpy.isinf(delta),
112
+ numpy.isnan(delta))
113
+ rec_delta[mask_zero] = 0.0
114
+
115
+ eps[k, :Nk, :] = rec_delta
116
+
117
+ if k > 1:
118
+ eps[k, :Nk, :] += eps[k-2, 1:Nk+1, :]
119
+
120
+ k_even = 2 * (N // 2)
121
+ series = eps[k_even, 0, :]
122
+
123
+ return series
freealg/_support.py CHANGED
@@ -109,6 +109,7 @@ def detect_support(eigs, method='asymp', k=None, p=0.001, **kwargs):
109
109
 
110
110
  Parameters
111
111
  ----------
112
+
112
113
  method : {``'range'``, ``'asymp'``, ``'jackknife'``, ``'regression'``,
113
114
  ``'interior'``, ``'interior_smooth'``}, \
114
115
  default= ``'asymp'``
@@ -146,6 +147,7 @@ def detect_support(eigs, method='asymp', k=None, p=0.001, **kwargs):
146
147
  time-series. In Mathematical Proceedings of the Cambridge
147
148
  Philosophical Society (Vol. 45, No. 3, pp. 483-484). Cambridge
148
149
  University Press.
150
+
149
151
  .. [2] Cuevas, A., & Fraiman, R. (1997). A plug-in approach to support
150
152
  estimation. The Annals of Statistics, 2300-2312.
151
153
  """
freealg/_util.py CHANGED
@@ -61,7 +61,7 @@ def beta_kde(eig, xs, lam_m, lam_p, h):
61
61
  if span <= 0:
62
62
  raise ValueError("lam_p must be larger than lam_m")
63
63
 
64
- # map samples and grid to [0,1]
64
+ # map samples and grid to [0, 1]
65
65
  u = (eig - lam_m) / span
66
66
  t = (xs - lam_m) / span
67
67
 
@@ -502,9 +502,6 @@ class KestenMcKay(object):
502
502
  :class: custom-dark
503
503
  """
504
504
 
505
- if seed is not None:
506
- numpy.random.seed(seed)
507
-
508
505
  if x_min is None:
509
506
  x_min = self.lam_m
510
507
 
@@ -523,14 +520,17 @@ class KestenMcKay(object):
523
520
  inv_cdf = interp1d(cdf, xs, bounds_error=False,
524
521
  fill_value=(x_min, x_max))
525
522
 
523
+ # Random generator
524
+ rng = numpy.random.default_rng(seed)
525
+
526
526
  # Draw from uniform distribution
527
527
  if method == 'mc':
528
- u = numpy.random.rand(size)
528
+ u = rng.random(size)
529
529
  elif method == 'qmc':
530
- engine = qmc.Halton(d=1)
530
+ engine = qmc.Halton(d=1, rng=rng)
531
531
  u = engine.random(size)
532
532
  else:
533
- raise ValueError('"method" is invalid.')
533
+ raise NotImplementedError('"method" is invalid.')
534
534
 
535
535
  # Draw from distribution by mapping from inverse CDF
536
536
  samples = inv_cdf(u).ravel()
@@ -268,7 +268,7 @@ class MarchenkoPastur(object):
268
268
  def _m_mp_numeric_vectorized(self, z, alt_branch=False, tol=1e-8):
269
269
  """
270
270
  Stieltjes transform (principal or secondary branch)
271
- for MarchenkoPastur distribution on upper half-plane.
271
+ for Marchenko-Pastur distribution on upper half-plane.
272
272
  """
273
273
 
274
274
  sigma = 1.0
@@ -509,9 +509,6 @@ class MarchenkoPastur(object):
509
509
  :class: custom-dark
510
510
  """
511
511
 
512
- if seed is not None:
513
- numpy.random.seed(seed)
514
-
515
512
  if x_min is None:
516
513
  x_min = self.lam_m
517
514
 
@@ -530,14 +527,17 @@ class MarchenkoPastur(object):
530
527
  inv_cdf = interp1d(cdf, xs, bounds_error=False,
531
528
  fill_value=(x_min, x_max))
532
529
 
530
+ # Random generator
531
+ rng = numpy.random.default_rng(seed)
532
+
533
533
  # Draw from uniform distribution
534
534
  if method == 'mc':
535
- u = numpy.random.rand(size)
535
+ u = rng.random(size)
536
536
  elif method == 'qmc':
537
- engine = qmc.Halton(d=1)
537
+ engine = qmc.Halton(d=1, rng=rng)
538
538
  u = engine.random(size)
539
539
  else:
540
- raise ValueError('"method" is invalid.')
540
+ raise NotImplementedError('"method" is invalid.')
541
541
 
542
542
  # Draw from distribution by mapping from inverse CDF
543
543
  samples = inv_cdf(u).ravel()
@@ -587,14 +587,12 @@ class MarchenkoPastur(object):
587
587
  >>> A = mp.matrix(2000)
588
588
  """
589
589
 
590
- if seed is not None:
591
- numpy.random.seed(seed)
592
-
593
590
  # Parameters
594
591
  m = int(size / self.lam)
595
592
 
596
593
  # Generate random matrix X (n x m) with i.i.d. standard normal entries.
597
- X = numpy.random.randn(size, m)
594
+ rng = numpy.random.default_rng(seed)
595
+ X = rng.standard_normal((size, m))
598
596
 
599
597
  # Form the sample covariance matrix A = (1/m)*XX^T.
600
598
  A = X @ X.T / m
@@ -534,9 +534,6 @@ class Meixner(object):
534
534
  :class: custom-dark
535
535
  """
536
536
 
537
- if seed is not None:
538
- numpy.random.seed(seed)
539
-
540
537
  if x_min is None:
541
538
  x_min = self.lam_m
542
539
 
@@ -555,14 +552,17 @@ class Meixner(object):
555
552
  inv_cdf = interp1d(cdf, xs, bounds_error=False,
556
553
  fill_value=(x_min, x_max))
557
554
 
555
+ # Random generator
556
+ rng = numpy.random.default_rng(seed)
557
+
558
558
  # Draw from uniform distribution
559
559
  if method == 'mc':
560
- u = numpy.random.rand(size)
560
+ u = rng.random(size)
561
561
  elif method == 'qmc':
562
- engine = qmc.Halton(d=1)
562
+ engine = qmc.Halton(d=1, rng=rng)
563
563
  u = engine.random(size)
564
564
  else:
565
- raise ValueError('"method" is invalid.')
565
+ raise NotImplementedError('"method" is invalid.')
566
566
 
567
567
  # Draw from distribution by mapping from inverse CDF
568
568
  samples = inv_cdf(u).ravel()
@@ -509,9 +509,6 @@ class Wachter(object):
509
509
  :class: custom-dark
510
510
  """
511
511
 
512
- if seed is not None:
513
- numpy.random.seed(seed)
514
-
515
512
  if x_min is None:
516
513
  x_min = self.lam_m
517
514
 
@@ -530,14 +527,17 @@ class Wachter(object):
530
527
  inv_cdf = interp1d(cdf, xs, bounds_error=False,
531
528
  fill_value=(x_min, x_max))
532
529
 
530
+ # Random generator
531
+ rng = numpy.random.default_rng(seed)
532
+
533
533
  # Draw from uniform distribution
534
534
  if method == 'mc':
535
- u = numpy.random.rand(size)
535
+ u = rng.random(size)
536
536
  elif method == 'qmc':
537
- engine = qmc.Halton(d=1)
537
+ engine = qmc.Halton(d=1, rng=rng)
538
538
  u = engine.random(size)
539
539
  else:
540
- raise ValueError('"method" is invalid.')
540
+ raise NotImplementedError('"method" is invalid.')
541
541
 
542
542
  # Draw from distribution by mapping from inverse CDF
543
543
  samples = inv_cdf(u).ravel()
@@ -590,15 +590,13 @@ class Wachter(object):
590
590
  >>> A = wa.matrix(2000)
591
591
  """
592
592
 
593
- if seed is not None:
594
- numpy.random.seed(seed)
595
-
596
593
  n = size
597
594
  m1 = int(self.a * n)
598
595
  m2 = int(self.b * n)
599
596
 
600
- X = numpy.random.randn(n, m1)
601
- Y = numpy.random.randn(n, m2)
597
+ rng = numpy.random.default_rng(seed)
598
+ X = rng.standard_normal((n, m1))
599
+ Y = rng.standard_normal((n, m2))
602
600
 
603
601
  Sx = X @ X.T
604
602
  Sy = Y @ Y.T
@@ -486,9 +486,6 @@ class Wigner(object):
486
486
  :class: custom-dark
487
487
  """
488
488
 
489
- if seed is not None:
490
- numpy.random.seed(seed)
491
-
492
489
  if x_min is None:
493
490
  x_min = self.lam_m
494
491
 
@@ -507,14 +504,17 @@ class Wigner(object):
507
504
  inv_cdf = interp1d(cdf, xs, bounds_error=False,
508
505
  fill_value=(x_min, x_max))
509
506
 
507
+ # Random generator
508
+ rng = numpy.random.default_rng(seed)
509
+
510
510
  # Draw from uniform distribution
511
511
  if method == 'mc':
512
- u = numpy.random.rand(size)
512
+ u = rng.random(size)
513
513
  elif method == 'qmc':
514
- engine = qmc.Halton(d=1)
514
+ engine = qmc.Halton(d=1, rng=rng)
515
515
  u = engine.random(size)
516
516
  else:
517
- raise ValueError('"method" is invalid.')
517
+ raise NotImplementedError('"method" is invalid.')
518
518
 
519
519
  # Draw from distribution by mapping from inverse CDF
520
520
  samples = inv_cdf(u).ravel()
@@ -564,12 +564,11 @@ class Wigner(object):
564
564
  >>> A = wg.matrix(2000)
565
565
  """
566
566
 
567
- if seed is not None:
568
- numpy.random.seed(seed)
567
+ rng = numpy.random.default_rng(seed)
569
568
 
570
569
  # Parameters
571
570
  n = size
572
- X = numpy.random.randn(n, n)
571
+ X = rng.standard_normal(size=(n, n))
573
572
  X = (numpy.triu(X, 0) + numpy.triu(X, 1).T)
574
573
 
575
574
  return X * (self.r / (2.0 * numpy.sqrt(n)))