freealg 0.0.3__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
freealg/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.0.3"
1
+ __version__ = "0.1.1"
freealg/_chebyshev.py CHANGED
@@ -14,14 +14,15 @@
14
14
  import numpy
15
15
  from scipy.special import eval_chebyu
16
16
 
17
- __all__ = ['chebyshev_proj', 'chebyshev_approx', 'chebyshev_stieltjes']
17
+ __all__ = ['chebyshev_sample_proj', 'chebyshev_kernel_proj',
18
+ 'chebyshev_approx', 'chebyshev_stieltjes']
18
19
 
19
20
 
20
- # ==============
21
- # chebyshev proj
22
- # ==============
21
+ # =====================
22
+ # chebyshev sample proj
23
+ # =====================
23
24
 
24
- def chebyshev_proj(eig, support, K=10, reg=0.0):
25
+ def chebyshev_sample_proj(eig, support, K=10, reg=0.0):
25
26
  """
26
27
  Estimate the coefficients \\psi_k in
27
28
 
@@ -81,6 +82,39 @@ def chebyshev_proj(eig, support, K=10, reg=0.0):
81
82
  return psi
82
83
 
83
84
 
85
+ # =====================
86
+ # chebyshev kernel proj
87
+ # =====================
88
+
89
+ def chebyshev_kernel_proj(xs, pdf, support, K=10, reg=0.0):
90
+ """
91
+ Projection of a *continuous* density given on a grid (xs, pdf)
92
+ onto the Chebyshev-II basis.
93
+
94
+ xs : 1-D numpy array (original x–axis, not the t-variable)
95
+ pdf : same shape as xs, integrates to 1 on xs
96
+ """
97
+
98
+ lam_m, lam_p = support
99
+ t = (2.0 * xs - (lam_m + lam_p)) / (lam_p - lam_m) # map to [−1,1]
100
+
101
+ norm = numpy.pi / 2.0
102
+ psi = numpy.empty(K + 1)
103
+
104
+ for k in range(K + 1):
105
+ Pk = eval_chebyu(k, t) # U_k(t) on the grid
106
+ moment = numpy.trapz(Pk * pdf, xs) # \int U_k(t) \rho(x) dx
107
+
108
+ if k == 0:
109
+ penalty = 0
110
+ else:
111
+ penalty = reg * (k / (K + 1))**2
112
+
113
+ psi[k] = moment / (norm + penalty)
114
+
115
+ return psi
116
+
117
+
84
118
  # ================
85
119
  # chebyshev approx
86
120
  # ================
freealg/_jacobi.py CHANGED
@@ -15,7 +15,8 @@ import numpy
15
15
  from scipy.special import eval_jacobi, roots_jacobi
16
16
  from scipy.special import gammaln, beta as Beta
17
17
 
18
- __all__ = ['jacobi_proj', 'jacobi_approx', 'jacobi_stieltjes']
18
+ __all__ = ['jacobi_sample_proj', 'jacobi_kernel_proj', 'jacobi_approx',
19
+ 'jacobi_stieltjes']
19
20
 
20
21
 
21
22
  # ==============
@@ -43,11 +44,11 @@ def jacobi_sq_norm(k, alpha, beta):
43
44
  return numpy.exp(lg_num - lg_den)
44
45
 
45
46
 
46
- # ===========
47
- # jacobi pro
48
- # ===========
47
+ # ==================
48
+ # jacobi sample proj
49
+ # ==================
49
50
 
50
- def jacobi_proj(eig, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
51
+ def jacobi_sample_proj(eig, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
51
52
  """
52
53
  """
53
54
 
@@ -76,6 +77,37 @@ def jacobi_proj(eig, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
76
77
  return psi
77
78
 
78
79
 
80
+ # ==================
81
+ # jacobi kernel proj
82
+ # ==================
83
+
84
+ def jacobi_kernel_proj(xs, pdf, support, K=10, alpha=0.0, beta=0.0, reg=0.0):
85
+ """
86
+ Same moments as `jacobi_proj`, but the target is a *continuous* density
87
+ given on a grid (xs, pdf).
88
+ """
89
+
90
+ lam_m, lam_p = support
91
+ t = (2.0 * xs - (lam_p + lam_m)) / (lam_p - lam_m) # map to [-1,1]
92
+ psi = numpy.empty(K + 1)
93
+
94
+ for k in range(K + 1):
95
+ Pk = eval_jacobi(k, alpha, beta, t)
96
+ N_k = jacobi_sq_norm(k, alpha, beta)
97
+
98
+ # \int P_k(t) w(t) \rho(t) dt. w(t) cancels with pdf already being rho
99
+ moment = numpy.trapz(Pk * pdf, xs)
100
+
101
+ if k == 0:
102
+ penalty = 0
103
+ else:
104
+ penalty = reg * (k / (K + 1))**2
105
+
106
+ psi[k] = moment / (N_k + penalty)
107
+
108
+ return psi
109
+
110
+
79
111
  # =============
80
112
  # jacobi approx
81
113
  # =============
freealg/_pade.py CHANGED
@@ -12,26 +12,374 @@
12
12
  # =======
13
13
 
14
14
  import numpy
15
+ from numpy.linalg import lstsq
15
16
  from itertools import product
16
17
  from scipy.optimize import least_squares, differential_evolution
17
18
 
18
19
  __all__ = ['fit_pade', 'eval_pade']
19
20
 
20
21
 
22
+ # =============
23
+ # default poles
24
+ # =============
25
+
26
+ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
27
+ """
28
+ Generate q real poles outside [lam_m, lam_p].
29
+
30
+ • even q : q/2 on each side (Chebyshev-like layout)
31
+ • odd q : (q+1)/2 on the *left*, (q–1)/2 on the right
32
+ so q=1 => single pole on whichever side `odd_side` says.
33
+
34
+ safety >= 1: 1, then poles start half an interval away; >1 pushes them
35
+ farther.
36
+ """
37
+
38
+ if q == 0:
39
+ return numpy.empty(0)
40
+
41
+ Delta = 0.5 * (lam_p - lam_m)
42
+
43
+ # Decide how many poles on each side. m_L and m_R determine how many poles
44
+ # to be on the left and right of the support interval.
45
+ if q % 2 == 0:
46
+ m_L = m_R = q // 2
47
+ else:
48
+ if odd_side == 'left':
49
+ m_L = (q + 1) // 2
50
+ m_R = q // 2
51
+ else:
52
+ m_L = q // 2
53
+ m_R = (q + 1) // 2
54
+
55
+ # Chebyshev-extrema offsets (all positive)
56
+ kL = numpy.arange(m_L)
57
+ tL = (2 * kL + 1) * numpy.pi / (2 * m_L)
58
+ offsL = safety * Delta * (1 + numpy.cos(tL))
59
+
60
+ kR = numpy.arange(m_R)
61
+ tR = (2 * kR + 1) * numpy.pi / (2 * m_R + (m_R == 0))
62
+ offsR = safety * Delta * (1 + numpy.cos(tR))
63
+
64
+ left = lam_m - offsL
65
+ right = lam_p + offsR
66
+
67
+ return numpy.sort(numpy.concatenate([left, right]))
68
+
69
+
70
+ # ============
71
+ # encode poles
72
+ # ============
73
+
74
+ def _encode_poles(a, lam_m, lam_p):
75
+ """
76
+ Map real pole a_j → unconstrained s_j,
77
+ so that the default left-of-interval pole stays left.
78
+ """
79
+
80
+ # half-width of the interval
81
+ d = 0.5 * (lam_p - lam_m)
82
+ # if a < lam_m, we want s ≥ 0; if a > lam_p, s < 0
83
+ return numpy.where(
84
+ a < lam_m,
85
+ numpy.log((lam_m - a) / d), # zero at a = lam_m - d
86
+ -numpy.log((a - lam_p) / d) # zero at a = lam_p + d
87
+ )
88
+
89
+
90
+ # ============
91
+ # decode poles
92
+ # ============
93
+
94
+ def _decode_poles(s, lam_m, lam_p):
95
+ """
96
+ Inverse map s_j → real pole a_j outside the interval.
97
+ """
98
+
99
+ d = 0.5 * (lam_p - lam_m)
100
+ return numpy.where(
101
+ s >= 0,
102
+ lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m−d (left)
103
+ lam_p + d * numpy.exp(-s) # maps s=0 to a=lam_p+d (right)
104
+ )
105
+
106
+
107
+ # ========
108
+ # inner ls
109
+ # ========
110
+
111
+ def _inner_ls(x, f, poles, p=1, pade_reg=0.0):
112
+ """
113
+ This is the inner least square (blazing fast).
114
+ """
115
+
116
+ if poles.size == 0 and p == -1:
117
+ return 0.0, 0.0, numpy.empty(0)
118
+
119
+ if poles.size == 0: # q = 0
120
+ # A = numpy.column_stack((numpy.ones_like(x), x))
121
+ cols = [numpy.ones_like(x)] if p >= 0 else []
122
+ if p == 1:
123
+ cols.append(x)
124
+ A = numpy.column_stack(cols)
125
+ # ---
126
+ theta, *_ = lstsq(A, f, rcond=None)
127
+ # c, D = theta # TEST
128
+ if p == -1:
129
+ c = 0.0
130
+ D = 0.0
131
+ resid = numpy.empty(0)
132
+ elif p == 0:
133
+ c = theta[0]
134
+ D = 0.0
135
+ resid = numpy.empty(0)
136
+ else: # p == 1
137
+ c, D = theta
138
+ resid = numpy.empty(0)
139
+ else:
140
+ # phi = 1.0 / (x[:, None] - poles[None, :])
141
+ # # A = numpy.column_stack((numpy.ones_like(x), x, phi)) # TEST
142
+ # # theta, *_ = lstsq(A, f, rcond=None)
143
+ # # c, D, resid = theta[0], theta[1], theta[2:]
144
+ # phi = 1.0 / (x[:, None] - poles[None, :])
145
+ # cols = [numpy.ones_like(x)] if p >= 0 else []
146
+ # if p == 1:
147
+ # cols.append(x)
148
+ # cols.append(phi)
149
+ # A = numpy.column_stack(cols)
150
+ # theta, *_ = lstsq(A, f, rcond=None)
151
+ # if p == -1:
152
+ # c = 0.0
153
+ # D = 0.0
154
+ # resid = theta
155
+ # elif p == 0:
156
+ # c = theta[0]
157
+ # D = 0.0
158
+ # resid = theta[1:]
159
+ # else: # p == 1
160
+ # c = theta[0]
161
+ # D = theta[1]
162
+ # resid = theta[2:]
163
+
164
+ phi = 1.0 / (x[:, None] - poles[None, :])
165
+ cols = [numpy.ones_like(x)] if p >= 0 else []
166
+ if p == 1:
167
+ cols.append(x)
168
+ cols.append(phi)
169
+
170
+ A = numpy.column_stack(cols)
171
+
172
+ # theta, *_ = lstsq(A, f, rcond=None) # TEST
173
+ if pade_reg > 0:
174
+ ATA = A.T.dot(A)
175
+
176
+ # # add pade_reg * I
177
+ # ATA.flat[:: ATA.shape[1]+1] += pade_reg
178
+ # ATf = A.T.dot(f)
179
+ # theta = numpy.linalg.solve(ATA, ATf)
180
+
181
+ # figure out how many elements to skip
182
+ if p == 1:
183
+ skip = 2 # skip c and D
184
+ elif p == 0:
185
+ skip = 1 # skip c only
186
+ else:
187
+ skip = 0 # all entries are residues
188
+
189
+ # add λ only for the residue positions
190
+ n = ATA.shape[0]
191
+ for i in range(skip, n):
192
+ ATA[i, i] += pade_reg
193
+
194
+ # then solve
195
+ ATf = A.T.dot(f)
196
+ theta = numpy.linalg.solve(ATA, ATf)
197
+
198
+ else:
199
+ theta, *_ = lstsq(A, f, rcond=None)
200
+
201
+ if p == -1:
202
+ c, D, resid = 0.0, 0.0, theta
203
+ elif p == 0:
204
+ c, D, resid = theta[0], 0.0, theta[1:]
205
+ else: # p == 1
206
+ c, D, resid = theta[0], theta[1], theta[2:]
207
+
208
+ return c, D, resid
209
+
210
+
211
+ # =============
212
+ # eval rational
213
+ # =============
214
+
215
+ def _eval_rational(z, c, D, poles, resid):
216
+ """
217
+ """
218
+
219
+ # z = z[:, None]
220
+ # if poles.size == 0:
221
+ # term = 0.0
222
+ # else:
223
+ # term = numpy.sum(resid / (z - poles), axis=1)
224
+ #
225
+ # return c + D * z.ravel() + term
226
+
227
+ # ensure z is a 1-D array
228
+ z = numpy.asarray(z)
229
+ z_col = z[:, None]
230
+
231
+ if poles.size == 0:
232
+ term = 0.0
233
+ else:
234
+ term = numpy.sum(resid / (z_col - poles[None, :]), axis=1)
235
+
236
+ return c + D * z + term
237
+
238
+
21
239
  # ========
22
240
  # fit pade
23
241
  # ========
24
242
 
25
- def fit_pade(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf, S=numpy.inf,
26
- B_default=10.0, S_factor=2.0, maxiter_de=200):
243
+ def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', pade_reg=0.0,
244
+ safety=1.0, max_outer=40, xtol=1e-12, ftol=1e-12, optimizer='ls',
245
+ verbose=0):
246
+ """
247
+ This is the outer optimiser.
248
+ """
249
+
250
+ # Checks
251
+ if not (odd_side in ['left', 'right']):
252
+ raise ValueError('"odd_side" can only be "left" or "right".')
253
+
254
+ if not (p in [-1, 0, 1]):
255
+ raise ValueError('"pade_p" can only be -1, 0, or 1.')
256
+
257
+ x = numpy.asarray(x, float)
258
+ f = numpy.asarray(f, float)
259
+
260
+ poles0 = _default_poles(q, lam_m, lam_p, safety=safety, odd_side=odd_side)
261
+ if q == 0 and p <= 0:
262
+ # c, D, resid = _inner_ls(x, f, poles0, pade_reg=pade_reg) # TEST
263
+ c, D, resid = _inner_ls(x, f, poles0, p, pade_reg=pade_reg)
264
+ pade_sol = {
265
+ 'c': c, 'D': D, 'poles': poles0, 'resid': resid,
266
+ 'outer_iters': 0
267
+ }
268
+
269
+ return pade_sol
270
+
271
+ s0 = _encode_poles(poles0, lam_m, lam_p)
272
+
273
+ # --------
274
+ # residual
275
+ # --------
276
+
277
+ def residual(s, p=p):
278
+ poles = _decode_poles(s, lam_m, lam_p)
279
+ # c, D, resid = _inner_ls(x, f, poles, pade_reg=pade_reg) # TEST
280
+ c, D, resid = _inner_ls(x, f, poles, p, pade_reg=pade_reg)
281
+ return _eval_rational(x, c, D, poles, resid) - f
282
+
283
+ # ----------------
284
+
285
+ # Optimizer
286
+ if optimizer == 'ls':
287
+ # scale = numpy.maximum(1.0, numpy.abs(s0))
288
+ res = least_squares(residual, s0,
289
+ method='trf',
290
+ # method='lm',
291
+ # x_scale=scale,
292
+ max_nfev=max_outer, xtol=xtol, ftol=ftol,
293
+ verbose=verbose)
294
+
295
+ elif optimizer == 'de':
296
+
297
+ # Bounds
298
+ # span = lam_p - lam_m
299
+ # B = 3.0 # multiples of span
300
+ # L = numpy.log(B * span)
301
+ # bounds = [(-L, L)] * len(s0)
302
+
303
+ d = 0.5*(lam_p - lam_m)
304
+ # the minimum factor so that lam_m - d*exp(s)=0 is exp(s)=lam_m/d
305
+ min_factor = lam_m/d
306
+ B = max(10.0, min_factor*10.0)
307
+ L = numpy.log(B)
308
+ bounds = [(-L, L)] * len(s0)
309
+
310
+ # Global stage
311
+ glob = differential_evolution(lambda s: numpy.sum(residual(s)**2),
312
+ bounds, maxiter=50, popsize=10,
313
+ polish=False)
314
+
315
+ # local polish
316
+ res = least_squares(
317
+ residual, glob.x,
318
+ method='lm',
319
+ max_nfev=max_outer, xtol=xtol, ftol=ftol,
320
+ verbose=verbose)
321
+
322
+ else:
323
+ raise RuntimeError('"optimizer" is invalid.')
324
+
325
+ poles = _decode_poles(res.x, lam_m, lam_p)
326
+ # c, D, resid = _inner_ls(x, f, poles, pade_reg=pade_reg) # TEST
327
+ c, D, resid = _inner_ls(x, f, poles, p, pade_reg=pade_reg)
328
+
329
+ pade_sol = {
330
+ 'c': c, 'D': D, 'poles': poles, 'resid': resid,
331
+ 'outer_iters': res.nfev
332
+ }
333
+
334
+ return pade_sol
335
+
336
+
337
+ # =========
338
+ # eval pade
339
+ # =========
340
+
341
+ def eval_pade(z, pade_sol):
342
+ """
343
+ """
344
+
345
+ # z_arr = numpy.asanyarray(z) # shape=(M,N)
346
+ # flat = z_arr.ravel() # shape=(M·N,)
347
+ # c, D = pade_sol['c'], pade_sol['D']
348
+ # poles = pade_sol['poles']
349
+ # resid = pade_sol['resid']
350
+ #
351
+ # # _eval_rational takes a 1-D array of z's and returns 1-D outputs
352
+ # flat_out = _eval_rational(flat, c, D, poles, resid)
353
+ #
354
+ # # restore the original shape
355
+ # out = flat_out.reshape(z_arr.shape) # shape=(M,N)
356
+ #
357
+ # return out
358
+
359
+ z = numpy.asanyarray(z) # complex or real, any shape
360
+ c, D = pade_sol['c'], pade_sol['D']
361
+ poles, resid = pade_sol['poles'], pade_sol['resid']
362
+
363
+ out = c + D*z
364
+ for bj, rj in zip(poles, resid):
365
+ out += rj/(z - bj) # each is an (N,) op, no N×q temp
366
+ return out
367
+
368
+
369
+ # ============
370
+ # fit pade old
371
+ # ============
372
+
373
+ def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
374
+ S=numpy.inf, B_default=10.0, S_factor=2.0, maxiter_de=200):
27
375
  """
28
376
  Fit a [p/q] rational P/Q of the form:
29
377
  P(x) = s * prod_{i=0..p-1}(x - a_i)
30
378
  Q(x) = prod_{j=0..q-1}(x - b_j)
31
379
 
32
380
  Constraints:
33
- a_i [lam_m, lam_p]
34
- b_j (-infty, lam_m - delta] cup [lam_p + delta, infty)
381
+ a_i in [lam_m, lam_p]
382
+ b_j in (-infty, lam_m - delta] cup [lam_p + delta, infty)
35
383
 
36
384
  Approach:
37
385
  - Brute‐force all 2^q left/right assignments for denominator roots
@@ -125,11 +473,11 @@ def fit_pade(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf, S=numpy.inf,
125
473
  }
126
474
 
127
475
 
128
- # =========
129
- # eval pade
130
- # =========
476
+ # =============
477
+ # eval pade old
478
+ # =============
131
479
 
132
- def eval_pade(z, s, a, b):
480
+ def eval_pade_old(z, s, a, b):
133
481
  """
134
482
  """
135
483
 
freealg/_plot_util.py CHANGED
@@ -23,34 +23,6 @@ __all__ = ['plot_fit', 'plot_density', 'plot_hilbert', 'plot_stieltjes',
23
23
  'plot_stieltjes_on_disk']
24
24
 
25
25
 
26
- # ==============
27
- # plot coeff fit
28
- # ==============
29
-
30
- def plot_coeff_fit(psi, latex=False, save=False):
31
- """
32
- """
33
-
34
- with texplot.theme(use_latex=latex):
35
-
36
- fig, ax = plt.subplots(figsize=(6, 2.7))
37
-
38
- # Save
39
- if save is False:
40
- save_status = False
41
- save_filename = ''
42
- else:
43
- save_status = True
44
- if isinstance(save, str):
45
- save_filename = save
46
- else:
47
- save_filename = 'energy.pdf'
48
-
49
- texplot.show_or_save_plot(plt, default_filename=save_filename,
50
- transparent_background=True, dpi=400,
51
- show_and_save=save_status, verbose=True)
52
-
53
-
54
26
  # ========
55
27
  # plot fit
56
28
  # ========
@@ -400,6 +372,12 @@ def plot_stieltjes_on_disk(r, t, m1_D, m2_D, support, latex=False, save=False):
400
372
  lam_m_z = (lam_m - 1j) / (lam_m + 1j)
401
373
  theta_p = numpy.angle(lam_p_z)
402
374
  theta_n = numpy.angle(lam_m_z)
375
+
376
+ if theta_n < 0:
377
+ theta_n += 2.0 * numpy.pi
378
+ if theta_p < 0:
379
+ theta_p += 2.0 * numpy.pi
380
+
403
381
  theta_branch = numpy.linspace(theta_n, theta_p, 100)
404
382
  theta_alt_branch = numpy.linspace(theta_p, theta_n + 2*numpy.pi, 100)
405
383
  r_branch = numpy.ones_like(theta_branch)
freealg/_sample.py ADDED
@@ -0,0 +1,85 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # SPDX-FileType: SOURCE
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify it under
5
+ # the terms of the license found in the LICENSE.txt file in the root directory
6
+ # of this source tree.
7
+
8
+
9
+ # =======
10
+ # Imports
11
+ # =======
12
+
13
+ from scipy.integrate import cumulative_trapezoid
14
+ from scipy.interpolate import interp1d
15
+ from scipy.stats import qmc
16
+
17
+ __all__ = ['qmc_sample']
18
+
19
+
20
+ # =============
21
+ # quantile func
22
+ # =============
23
+
24
+ def _quantile_func(x, rho):
25
+ """
26
+ Construct a quantile function from evaluations of an estimated density
27
+ on a grid (x, rho(x)).
28
+ """
29
+ cdf = cumulative_trapezoid(rho, x, initial=0)
30
+ cdf /= cdf[-1]
31
+ return interp1d(cdf, x, bounds_error=False, assume_sorted=True)
32
+
33
+
34
+ # ==========
35
+ # qmc sample
36
+ # ==========
37
+
38
+ def qmc_sample(x, rho, num_pts):
39
+ """
40
+ Low-discrepancy sampling from a univariate density estimate using
41
+ Quasi-Monte Carlo.
42
+
43
+ Parameters
44
+ ----------
45
+ x : numpy.array, shape (n,)
46
+ Sorted abscissae at which the density has been evaluated.
47
+
48
+ rho : numpy.array, shape (n,)
49
+ Density values corresponding to `x`. Must be non-negative and define
50
+ a valid probability density (i.e., integrate to 1 over the support).
51
+
52
+ num_pts : int
53
+ Number of sample points to generate from the density estimate.
54
+
55
+ Returns
56
+ -------
57
+ samples : numpy.array, shape (num_pts,)
58
+ Samples drawn from the estimated density using a one-dimensional Halton
59
+ sequence mapped through the estimated quantile function.
60
+
61
+ See Also
62
+ --------
63
+ scipy.stats.qmc.Halton
64
+ Underlying Quasi-Monte Carlo engine used for generating low-discrepancy
65
+ points.
66
+
67
+ Examples
68
+ --------
69
+ .. code-block:: python
70
+
71
+ >>> import numpy
72
+ >>> from your_module import qmc_sample
73
+ >>> x = numpy.linspace(0, 1, 200)
74
+ >>> rho = 3 * x**2 # density of Beta(3,1) on [0,1]
75
+ >>> samples = qmc_sample(x, rho, num_pts=1000)
76
+ >>> assert samples.shape == (1000,)
77
+ >>> # Empirical mean should be close to 3/4
78
+ >>> numpy.allclose(samples.mean(), 0.75, atol=0.02)
79
+ """
80
+
81
+ quantile = _quantile_func(x, rho)
82
+ engine = qmc.Halton(d=1)
83
+ u = engine.random(num_pts)
84
+ samples = quantile(u)
85
+ return samples.ravel()
@@ -7,10 +7,10 @@
7
7
  # directory of this source tree.
8
8
 
9
9
  from .marchenko_pastur import MarchenkoPastur
10
- # from .wigner import Wigner
11
- # from .kesten_mckay import KestenMcKay
12
- # from .wachter import Wachter
10
+ from .wigner import Wigner
11
+ from .kesten_mckay import KestenMcKay
12
+ from .wachter import Wachter
13
13
  # from .meixner import meixner
14
14
 
15
15
  # __all__ = ['MarchenkoPastur', 'Wigner', 'KestenMcKay', 'Wachter', 'Meixner']
16
- __all__ = ['MarchenkoPastur']
16
+ __all__ = ['MarchenkoPastur', 'Wigner', 'KestenMcKay', 'Wachter']