freealg 0.1.11__py3-none-any.whl → 0.7.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. freealg/__init__.py +8 -2
  2. freealg/__version__.py +1 -1
  3. freealg/_algebraic_form/__init__.py +12 -0
  4. freealg/_algebraic_form/_branch_points.py +288 -0
  5. freealg/_algebraic_form/_constraints.py +139 -0
  6. freealg/_algebraic_form/_continuation_algebraic.py +706 -0
  7. freealg/_algebraic_form/_decompress.py +641 -0
  8. freealg/_algebraic_form/_decompress2.py +204 -0
  9. freealg/_algebraic_form/_edge.py +330 -0
  10. freealg/_algebraic_form/_homotopy.py +323 -0
  11. freealg/_algebraic_form/_moments.py +448 -0
  12. freealg/_algebraic_form/_sheets_util.py +145 -0
  13. freealg/_algebraic_form/_support.py +309 -0
  14. freealg/_algebraic_form/algebraic_form.py +1232 -0
  15. freealg/_free_form/__init__.py +16 -0
  16. freealg/{_chebyshev.py → _free_form/_chebyshev.py} +75 -43
  17. freealg/_free_form/_decompress.py +993 -0
  18. freealg/_free_form/_density_util.py +243 -0
  19. freealg/_free_form/_jacobi.py +359 -0
  20. freealg/_free_form/_linalg.py +508 -0
  21. freealg/{_pade.py → _free_form/_pade.py} +42 -208
  22. freealg/{_plot_util.py → _free_form/_plot_util.py} +37 -22
  23. freealg/{_sample.py → _free_form/_sample.py} +58 -22
  24. freealg/_free_form/_series.py +454 -0
  25. freealg/_free_form/_support.py +214 -0
  26. freealg/_free_form/free_form.py +1362 -0
  27. freealg/_geometric_form/__init__.py +13 -0
  28. freealg/_geometric_form/_continuation_genus0.py +175 -0
  29. freealg/_geometric_form/_continuation_genus1.py +275 -0
  30. freealg/_geometric_form/_elliptic_functions.py +174 -0
  31. freealg/_geometric_form/_sphere_maps.py +63 -0
  32. freealg/_geometric_form/_torus_maps.py +118 -0
  33. freealg/_geometric_form/geometric_form.py +1094 -0
  34. freealg/_util.py +56 -110
  35. freealg/distributions/__init__.py +7 -1
  36. freealg/distributions/_chiral_block.py +494 -0
  37. freealg/distributions/_deformed_marchenko_pastur.py +726 -0
  38. freealg/distributions/_deformed_wigner.py +386 -0
  39. freealg/distributions/_kesten_mckay.py +29 -15
  40. freealg/distributions/_marchenko_pastur.py +224 -95
  41. freealg/distributions/_meixner.py +47 -37
  42. freealg/distributions/_wachter.py +29 -17
  43. freealg/distributions/_wigner.py +27 -14
  44. freealg/visualization/__init__.py +12 -0
  45. freealg/visualization/_glue_util.py +32 -0
  46. freealg/visualization/_rgb_hsv.py +125 -0
  47. freealg-0.7.12.dist-info/METADATA +172 -0
  48. freealg-0.7.12.dist-info/RECORD +53 -0
  49. {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/WHEEL +1 -1
  50. freealg/_decompress.py +0 -180
  51. freealg/_jacobi.py +0 -218
  52. freealg/_support.py +0 -85
  53. freealg/freeform.py +0 -967
  54. freealg-0.1.11.dist-info/METADATA +0 -140
  55. freealg-0.1.11.dist-info/RECORD +0 -24
  56. /freealg/{_damp.py → _free_form/_damp.py} +0 -0
  57. {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/licenses/AUTHORS.txt +0 -0
  58. {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/licenses/LICENSE.txt +0 -0
  59. {freealg-0.1.11.dist-info → freealg-0.7.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,454 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # SPDX-FileType: SOURCE
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify it under
5
+ # the terms of the license found in the LICENSE.txt file in the root directory
6
+ # of this source tree.
7
+
8
+
9
+ # =======
10
+ # Imports
11
+ # =======
12
+
13
+ import numpy
14
+
15
+ __all__ = ['partial_sum', 'wynn_epsilon', 'wynn_rho', 'levin_u',
16
+ 'weniger_delta', 'brezinski_theta']
17
+
18
+
19
+ # ===========
20
+ # partial sum
21
+ # ===========
22
+
23
+ def partial_sum(coeffs, x, p=0.0):
24
+ """
25
+ Compute partial sum:
26
+
27
+ .. math::
28
+
29
+ S_n(x) = \\sum_{n=0}^{N-1} coeffs[n] * x^{n+p}.
30
+
31
+ Parameters
32
+ ----------
33
+
34
+ coeffs : array_like
35
+ Coefficients [a_0, a_1, a_2, ..., a_{N-1}] of the power series of the
36
+ size N.
37
+
38
+ x : numpy.array
39
+ A flattened array of the size d.
40
+
41
+ d : float, default=0.0
42
+ Offset power.
43
+
44
+ Returns
45
+ -------
46
+
47
+ Sn : numpy.ndarray
48
+ Partial sums of the size (N, d), where the n-th row is the n-th
49
+ partial sum.
50
+ """
51
+
52
+ x_ = x.ravel()
53
+ N = len(coeffs)
54
+ d = x_.size
55
+
56
+ # Forming partial sum via Horner method
57
+ Sn = numpy.zeros((N, d), dtype=x.dtype)
58
+ sum_ = numpy.zeros((d,), dtype=x.dtype)
59
+ pow_x = numpy.ones((d,), dtype=x.dtype)
60
+
61
+ if p == 1:
62
+ pow_x *= x_
63
+ elif p != 0:
64
+ pow_x *= x_**p
65
+
66
+ for n in range(N):
67
+ sum_ += coeffs[n] * pow_x
68
+ Sn[n, :] = sum_
69
+
70
+ if n < N-1:
71
+ pow_x *= x_
72
+
73
+ return Sn
74
+
75
+
76
+ # ============
77
+ # wynn epsilon
78
+ # ============
79
+
80
+ def wynn_epsilon(Sn):
81
+ """
82
+ Accelerate conversion of a series using Wynn's epsilon algorithm.
83
+
84
+ Parameters
85
+ ----------
86
+
87
+ Sn : numpy.ndarray
88
+ A 2D array of the size (N, d), where N is the number of partial sums
89
+ and d is the vector size.
90
+
91
+ Returns
92
+ -------
93
+
94
+ S : numpy.array
95
+ A 1D array of the size (d,) which is the accelerated value of the
96
+ series at each vector element.
97
+
98
+ Notes
99
+ -----
100
+
101
+ Given a series of vectors:
102
+
103
+ .. math::
104
+
105
+ (S_n)_{n=1}^N = (S1, \\dots, S_n)
106
+
107
+ this function finds the limit S = \\lim_{n \\to infty} S_n.
108
+
109
+ Each :math:`S_i \\in \\mathbb{C}^d` is a vector. However, instead of using
110
+ the vector version of the Wynn's epsilon algorithm, we use the scalar
111
+ version on each component of the vector. The reason for this is that in our
112
+ dataset, each component has its own convergence rate. The convergence rate
113
+ of vector version of the algorithm is bounded by the worse point, and this
114
+ potentially stall convergence for all points. As such, vector version is
115
+ avoided.
116
+
117
+ In our dataset, the series is indeed divergent. The Wynn's accelerated
118
+ method computes the principal value of the convergence series.
119
+ """
120
+
121
+ # N: number of partial sums, d: vector size
122
+ N, d = Sn.shape
123
+
124
+ # Epsilons of stage k-1 and k-2
125
+ eps_prev = Sn.copy() # row k-1
126
+ eps_pprev = None # row k-2
127
+
128
+ tol = numpy.finfo(Sn.dtype).eps
129
+
130
+ # Wynn's epsilon triangle table
131
+ for k in range(1, N):
132
+ Nk = N - k
133
+
134
+ delta = eps_prev[1:Nk+1, :] - eps_prev[:Nk, :]
135
+ small = numpy.abs(delta) <= \
136
+ tol * numpy.maximum(1.0, numpy.abs(eps_prev[1:Nk+1, :]))
137
+
138
+ # Reciprocal of delta
139
+ rec_delta = numpy.empty_like(delta)
140
+ rec_delta[small] = 0.0j
141
+ rec_delta[~small] = 1.0 / delta[~small]
142
+
143
+ # Current epsilon of row k
144
+ eps_curr = rec_delta
145
+ if k > 1:
146
+ eps_curr += eps_pprev[1:Nk+1, :]
147
+
148
+ # Roll rows
149
+ eps_pprev = eps_prev
150
+ eps_prev = eps_curr
151
+
152
+ # Last even row
153
+ if (N - 1) % 2 == 0:
154
+ # N is odd, so use step k-1
155
+ S = eps_prev[0, :]
156
+ else:
157
+ # N is even, so use k-2
158
+ S = eps_pprev[0, :]
159
+
160
+ return S
161
+
162
+
163
+ # ========
164
+ # wynn rho
165
+ # ========
166
+
167
+ def wynn_rho(Sn, beta=0.0):
168
+ """
169
+ Accelerate convergence of a series using Wynn's rho algorithm.
170
+
171
+ Parameters
172
+ ----------
173
+
174
+ Sn : numpy.ndarray
175
+ A 2D array of shape ``(N, d)``, where *N* is the number of partial
176
+ sums and *d* is the vector size.
177
+
178
+ beta : float, default=0.0
179
+ Shift parameter in the rho recursion, usually chosen in the range
180
+ ``0 < beta <= 1``.
181
+
182
+ Returns
183
+ -------
184
+
185
+ S : numpy.ndarray
186
+ A 1D array of shape ``(d,)`` giving the rho-accelerated estimate
187
+ of the series limit for each component.
188
+
189
+ Notes
190
+ -----
191
+
192
+ Let ``S_n`` be the *n*-th partial sum of the (possibly divergent)
193
+ sequence. Wynn's rho algorithm builds a triangular table
194
+ ``rho[k, n]`` (row *k*, column *n*) as follows:
195
+
196
+ rho[-1, n] = 0
197
+ rho[ 0, n] = S_n
198
+
199
+ rho[k, n] = rho[k-2, n+1] +
200
+ (n + beta + k - 1) / (rho[k-1, n+1] - rho[k-1, n])
201
+
202
+ Only even rows (k even) provide improved approximants. As with
203
+ ``wynn_epsilon``, we apply the scalar recursion component-wise so that a
204
+ slowly converging component does not stall the others.
205
+ """
206
+
207
+ # N: number of partial sums, d: vector size
208
+ N, d = Sn.shape
209
+
210
+ # Rho of stage k-1 and k-2
211
+ rho_prev = Sn.copy() # row k-1
212
+ rho_pprev = None # row k-2
213
+
214
+ tol = numpy.finfo(Sn.dtype).eps
215
+
216
+ # Wynn's rho triangule table
217
+ for k in range(1, N):
218
+ Nk = N - k
219
+
220
+ delta = rho_prev[1:Nk+1, :] - rho_prev[:Nk, :]
221
+ small = numpy.abs(delta) <= \
222
+ tol * numpy.maximum(1.0, numpy.abs(rho_prev[1:Nk+1, :]))
223
+
224
+ coef = (beta + (k - 1) + numpy.arange(Nk))[:, None] # (Nk, 1)
225
+ coef = numpy.repeat(coef, d, axis=1) # (Nk, d)
226
+
227
+ # Current rho of row k
228
+ rho_curr = numpy.empty_like(delta)
229
+ rho_curr[small] = 0.0j # treat near-zero denominator
230
+
231
+ if k == 1:
232
+ rho_curr[~small] = coef[~small] / delta[~small]
233
+ else:
234
+ rho_curr[~small] = rho_pprev[1:Nk+1][~small] + \
235
+ coef[~small] / delta[~small]
236
+
237
+ # Roll rows
238
+ rho_pprev = rho_prev
239
+ rho_prev = rho_curr
240
+
241
+ # Last even row
242
+ if (N - 1) % 2 == 0:
243
+ # N is odd, so use step k-1
244
+ S = rho_prev[0, :]
245
+ else:
246
+ # N is even, so use k-2
247
+ S = rho_pprev[0, :]
248
+
249
+ return S
250
+
251
+
252
+ # ========
253
+ # levin u
254
+ # ========
255
+
256
+ def levin_u(Sn, omega=None, beta=0.0):
257
+ """
258
+ Levin u-transform (vector form).
259
+
260
+ Parameters
261
+ ----------
262
+ Sn : ndarray, shape (N, d)
263
+ First N partial sums of a vector series.
264
+ omega : None or ndarray, shape (N-1, d), optional
265
+ Remainder estimate. If None, uses omega_n = S_{n+1} - S_n.
266
+ beta : float, optional
267
+ Levin shift parameter (default 0.0).
268
+
269
+ Returns
270
+ -------
271
+ S : ndarray, shape (d,)
272
+ Accelerated sum / antilimit.
273
+ """
274
+
275
+ Sn = numpy.asarray(Sn)
276
+ N, d = Sn.shape
277
+ if N < 3:
278
+ raise ValueError("Need at least 3 partial sums for Levin u.")
279
+
280
+ # default omega_n = forward difference
281
+ if omega is None:
282
+ omega = Sn[1:, :] - Sn[:-1, :]
283
+ else:
284
+ omega = numpy.asarray(omega)
285
+ if omega.shape != (N - 1, d):
286
+ raise ValueError("omega must have shape (N-1, d).")
287
+
288
+ tol = numpy.finfo(Sn.dtype).eps
289
+ m = N - 2 # highest possible order
290
+
291
+ # binomial coefficients with alternating sign
292
+ Cmk = numpy.empty(m + 1, dtype=Sn.dtype)
293
+ Cmk[0] = 1.0
294
+ for k in range(1, m + 1):
295
+ Cmk[k] = Cmk[k - 1] * (m - k + 1) / k
296
+ Cmk *= (-1.0) ** numpy.arange(m + 1)
297
+
298
+ # powers (k + beta)^(m-1)
299
+ if m == 1:
300
+ Pk = numpy.ones(m + 1, dtype=Sn.dtype)
301
+ else:
302
+ Pk = (numpy.arange(m + 1, dtype=Sn.dtype) + beta) ** (m - 1)
303
+
304
+ numer = numpy.zeros(d, dtype=Sn.dtype)
305
+ denom = numpy.zeros(d, dtype=Sn.dtype)
306
+
307
+ for k in range(m + 1):
308
+ idx = k
309
+ w = omega[idx, :]
310
+
311
+ inv_w = numpy.empty_like(w)
312
+ mask = numpy.abs(w) < tol
313
+ inv_w[mask] = 0.0
314
+ inv_w[~mask] = 1.0 / w[~mask]
315
+
316
+ coeff = Cmk[k] * Pk[k]
317
+ numer += coeff * Sn[idx, :] * inv_w
318
+ denom += coeff * inv_w
319
+
320
+ return numer / denom
321
+
322
+
323
+ # =============
324
+ # weniger delta
325
+ # =============
326
+
327
+ def weniger_delta(Sn):
328
+ """
329
+ Weniger's nonlinear delta^2 sequence transformation.
330
+
331
+ Parameters
332
+ ----------
333
+
334
+ Sn : numpy.ndarray
335
+ Array of shape (N, d) containing the first N partial sums of the
336
+ series.
337
+
338
+ Returns
339
+ -------
340
+
341
+ S : numpy.ndarray
342
+ Array of shape (d,) giving the delta2 accelerated limit estimate for
343
+ each component.
344
+ """
345
+
346
+ N, d = Sn.shape
347
+
348
+ # Need at least three partial sums to form delta2
349
+ if N < 3:
350
+ return Sn[-1, :].copy()
351
+
352
+ # First and second forward differences
353
+ delta1 = Sn[1:] - Sn[:-1] # shape (N-1, d)
354
+ delta2 = delta1[1:] - delta1[:-1] # shape (N-2, d)
355
+
356
+ tol = numpy.finfo(Sn.real.dtype).eps
357
+
358
+ # Safe reciprocal of delta2
359
+ small = numpy.abs(delta2) <= tol * numpy.maximum(
360
+ 1.0, numpy.abs(delta1[:-1]))
361
+
362
+ rec_delta2 = numpy.empty_like(delta2)
363
+ rec_delta2[small] = 0.0j
364
+ rec_delta2[~small] = 1.0 / delta2[~small]
365
+
366
+ # Delta sequence, length N-2
367
+ delta_sq = Sn[:-2] - (delta1[:-1] ** 2) * rec_delta2
368
+
369
+ # Return the last Delta2 term as the accelerated estimate
370
+ S = delta_sq[-1, :]
371
+
372
+ return S
373
+
374
+
375
+ # ===============
376
+ # brezinski theta
377
+ # ===============
378
+
379
+ def brezinski_theta(Sn):
380
+ """
381
+ Accelerate convergence of a series using Brezinski's theta algorithm.
382
+
383
+ Parameters
384
+ ----------
385
+
386
+ Sn : numpy.ndarray
387
+ A 2-D array of the size ``(N, d)``, where `N` is the number of partial
388
+ sums and `d` is the vector size.
389
+
390
+ Returns
391
+ -------
392
+
393
+ S : numpy.ndarray
394
+ A 1-D array of the size ``(d,)``. The theta-accelerated estimate of
395
+ the series limit in each vector component.
396
+ """
397
+
398
+ N, d = Sn.shape
399
+
400
+ theta_prev = Sn.copy() # row k-1
401
+ theta_pprev = numpy.zeros_like(theta_prev) # row k-2
402
+
403
+ tol = numpy.finfo(Sn.dtype).eps
404
+
405
+ for k in range(1, N):
406
+ L_prev = theta_prev.shape[0] # current row length
407
+
408
+ if k % 2 == 1:
409
+
410
+ # Odd row 2m+1
411
+ if L_prev < 2:
412
+ break
413
+
414
+ delta = theta_prev[1:] - theta_prev[:-1] # len = L
415
+ theta_pp = theta_pprev[1:L_prev] # len = L
416
+
417
+ small = numpy.abs(delta) <= \
418
+ tol * numpy.maximum(1.0, numpy.abs(theta_prev[1:]))
419
+
420
+ theta_curr = numpy.empty_like(delta)
421
+ theta_curr[small] = 0.0j
422
+ theta_curr[~small] = theta_pp[~small] + 1.0 / delta[~small]
423
+
424
+ else:
425
+
426
+ # Even row 2m+2
427
+ if L_prev < 3:
428
+ break
429
+
430
+ delta_even = theta_pprev[2:L_prev] - theta_pprev[1:L_prev-1]
431
+ delta_odd = theta_prev[1:L_prev-1] - theta_prev[:L_prev-2]
432
+ delta2_odd = (theta_prev[2:L_prev] - 2.0 * theta_prev[1:L_prev-1]
433
+ + theta_prev[:L_prev-2])
434
+
435
+ small = numpy.abs(delta2_odd) <= tol * numpy.maximum(
436
+ 1.0, numpy.abs(theta_prev[1:L_prev-1]))
437
+
438
+ theta_curr = numpy.empty_like(delta_odd)
439
+ theta_curr[small] = theta_pprev[1:L_prev-1][small]
440
+ theta_curr[~small] = (
441
+ theta_pprev[1:L_prev-1][~small] +
442
+ (delta_even[~small] * delta_odd[~small]) /
443
+ delta2_odd[~small])
444
+
445
+ # roll rows
446
+ theta_pprev = theta_prev
447
+ theta_prev = theta_curr
448
+
449
+ if (N - 1) % 2 == 0:
450
+ S = theta_prev[0]
451
+ else:
452
+ S = theta_pprev[0]
453
+
454
+ return S
@@ -0,0 +1,214 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # SPDX-FileType: SOURCE
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify it under
5
+ # the terms of the license found in the LICENSE.txt file in the root directory
6
+ # of this source tree.
7
+
8
+
9
+ # =======
10
+ # Imports
11
+ # =======
12
+
13
+ import numpy
14
+ # import numba
15
+ from scipy.stats import gaussian_kde
16
+
17
+ __all__ = ['support_from_density', 'supp']
18
+
19
+
20
+ # ====================
21
+ # support from density
22
+ # ====================
23
+
24
+ # @numba.njit(numba.types.UniTuple(numba.types.int64, 2)(
25
+ # numba.types.float64,
26
+ # numba.types.float64[::1]
27
+ # ))
28
+ def support_from_density(dx, density):
29
+ """
30
+ Estimates the support from a collection of noisy observations of a
31
+ density over a grid of x-values with mesh spacing dx.
32
+ """
33
+
34
+ n = density.shape[0]
35
+ target = 1.0 / dx
36
+
37
+ # compute total_sum once
38
+ total_sum = 0.0
39
+ for t in range(n):
40
+ total_sum += density[t]
41
+
42
+ # set up our "best-so-far" trackers
43
+ large = 1e300
44
+ best_nonneg_sum = large
45
+ best_nonneg_idx = -1
46
+ best_nonpos_sum = -large
47
+ best_nonpos_idx = -1
48
+
49
+ # seed with first element (i.e. prefix_sum for k=1)
50
+ prefix_sum = density[0]
51
+ if prefix_sum >= 0.0:
52
+ best_nonneg_sum, best_nonneg_idx = prefix_sum, 1
53
+ else:
54
+ best_nonpos_sum, best_nonpos_idx = prefix_sum, 1
55
+
56
+ # sweep j from 2, ..., n-1, updating prefix_sum on the fly
57
+ optimal_i, optimal_j = 1, 2
58
+ minimal_cost = large
59
+
60
+ for j in range(2, n):
61
+ # extend prefix_sum to cover density[0]...density[j-1]
62
+ prefix_sum += density[j-1]
63
+
64
+ # cost for [0...i], [i...j]
65
+ diff_mid = prefix_sum - target
66
+ if diff_mid >= 0.0 and best_nonneg_sum <= diff_mid:
67
+ cost12 = diff_mid
68
+ i_cand = best_nonneg_idx
69
+ elif diff_mid < 0.0 and best_nonpos_sum >= diff_mid:
70
+ cost12 = -diff_mid
71
+ i_cand = best_nonpos_idx
72
+ else:
73
+ cost_using_nonpos = diff_mid - 2.0 * best_nonpos_sum
74
+ cost_using_nonneg = 2.0 * best_nonneg_sum - diff_mid
75
+ if cost_using_nonpos < cost_using_nonneg:
76
+ cost12, i_cand = cost_using_nonpos, best_nonpos_idx
77
+ else:
78
+ cost12, i_cand = cost_using_nonneg, best_nonneg_idx
79
+
80
+ # cost for [j...n]
81
+ cost3 = total_sum - prefix_sum
82
+ if cost3 < 0.0:
83
+ cost3 = -cost3
84
+
85
+ # total and maybe update best split
86
+ total_cost = cost12 + cost3
87
+ if total_cost < minimal_cost:
88
+ minimal_cost = total_cost
89
+ optimal_i, optimal_j = i_cand, j
90
+
91
+ # update our prefix-sum trackers
92
+ if prefix_sum >= 0.0:
93
+ if prefix_sum < best_nonneg_sum:
94
+ best_nonneg_sum, best_nonneg_idx = prefix_sum, j
95
+ else:
96
+ if prefix_sum > best_nonpos_sum:
97
+ best_nonpos_sum, best_nonpos_idx = prefix_sum, j
98
+
99
+ return optimal_i, optimal_j
100
+
101
+
102
+ # ====
103
+ # supp
104
+ # ====
105
+
106
+ def supp(eigs, method='asymp', k=None, p=0.001):
107
+ """
108
+ Estimates the support of the eigenvalue density.
109
+
110
+ Parameters
111
+ ----------
112
+
113
+ method : {``'range'``, ``'asymp'``, ``'jackknife'``, ``'regression'``, \
114
+ ``'interior'``, ``'interior_smooth'``}, default= ``'asymp'``
115
+ The method of support estimation:
116
+
117
+ * ``'range'``: no estimation; the support is the range of the
118
+ eigenvalues.
119
+ * ``'asymp'``: assume the relative error in the min/max estimator is
120
+ :math:`1/n`.
121
+ * ``'jackknife'``: estimates the support using Quenouille's [1]_
122
+ jackknife estimator. Fast and simple, more accurate than the range.
123
+ * ``'regression'``: estimates the support by performing a regression
124
+ under the assumption that the edge behavior is of square-root type.
125
+ Often most accurate.
126
+ * ``'interior'``: estimates a support assuming the range overestimates;
127
+ uses quantiles :math:`(p, 1-p)`.
128
+ * ``'interior_smooth'``: same as ``'interior'`` but using kernel
129
+ density estimation, from [2]_.
130
+
131
+ k : int, default = None
132
+ Number of extreme order statistics to use for ``method='regression'``.
133
+
134
+ p : float, default=0.001
135
+ The edges of the support of the distribution is detected by the
136
+ :math:`p`-quantile on the left and :math:`(1-p)`-quantile on the right
137
+ where ``method='interior'`` or ``method='interior_smooth'``.
138
+ This value should be between 0 and 1, ideally a small number close to
139
+ zero.
140
+
141
+ Returns
142
+ -------
143
+
144
+ lam_m : float
145
+ Lower end of support interval :math:`[\\lambda_{-}, \\lambda_{+}]`.
146
+
147
+ lam_p : float
148
+ Upper end of support interval :math:`[\\lambda_{-}, \\lambda_{+}]`.
149
+
150
+ See Also
151
+ --------
152
+
153
+ freealg.sample
154
+ freealg.kde
155
+
156
+ References
157
+ ----------
158
+
159
+ .. [1] Quenouille, M. H. (1949). Approximate tests of correlation in
160
+ time-series. In Mathematical Proceedings of the Cambridge
161
+ Philosophical Society (Vol. 45, No. 3, pp. 483-484). Cambridge
162
+ University Press.
163
+
164
+ .. [2] Cuevas, A., & Fraiman, R. (1997). A plug-in approach to support
165
+ estimation. The Annals of Statistics, 2300-2312.
166
+ """
167
+
168
+ if method == 'range':
169
+ lam_m = eigs.min()
170
+ lam_p = eigs.max()
171
+
172
+ elif method == 'asymp':
173
+ lam_m = eigs.min() - abs(eigs.min()) / len(eigs)
174
+ lam_p = eigs.max() + abs(eigs.max()) / len(eigs)
175
+
176
+ elif method == 'jackknife':
177
+ x, n = numpy.sort(eigs), len(eigs)
178
+ lam_m = x[0] - (n - 1)/n * (x[1] - x[0])
179
+ lam_p = x[-1] + (n - 1)/n * (x[-1] - x[-2])
180
+
181
+ elif method == 'regression':
182
+ x, n = numpy.sort(eigs), len(eigs)
183
+ if k is None:
184
+ k = int(round(n ** (2/3)))
185
+ k = max(5, min(k, n // 2))
186
+
187
+ # The theoretical cdf near the edge behaves like const*(x - a)^{3/2},
188
+ # so (i/n) ~ (x - a)^{3/2} -> x ~ a + const*(i/n)^{2/3}.
189
+ y = ((numpy.arange(1, k + 1) - 0.5) / n) ** (2 / 3)
190
+
191
+ # Left edge: regress x_{(i)} on y
192
+ _, lam_m = numpy.polyfit(y, x[:k], 1)
193
+
194
+ # Right edge: regress x_{(n-i+1)} on y
195
+ _, lam_p = numpy.polyfit(y, x[-k:][::-1], 1)
196
+
197
+ elif method == 'interior':
198
+ lam_m, lam_p = numpy.quantile(eigs, [p, 1-p])
199
+
200
+ elif method == 'interior_smooth':
201
+ kde = gaussian_kde(eigs)
202
+ xs = numpy.linspace(eigs.min(), eigs.max(), 1000)
203
+ fs = kde(xs)
204
+
205
+ cdf = numpy.cumsum(fs)
206
+ cdf /= cdf[-1]
207
+
208
+ lam_m = numpy.interp(p, cdf, xs)
209
+ lam_p = numpy.interp(1-p, cdf, xs)
210
+
211
+ else:
212
+ raise NotImplementedError("Unknown method")
213
+
214
+ return lam_m, lam_p