freealg 0.0.2__tar.gz → 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {freealg-0.0.2/freealg.egg-info → freealg-0.1.0}/PKG-INFO +2 -1
  2. freealg-0.1.0/freealg/__version__.py +1 -0
  3. freealg-0.1.0/freealg/_decompress.py +136 -0
  4. {freealg-0.0.2 → freealg-0.1.0}/freealg/_jacobi.py +0 -2
  5. freealg-0.1.0/freealg/_pade.py +463 -0
  6. {freealg-0.0.2 → freealg-0.1.0}/freealg/_plot_util.py +43 -53
  7. freealg-0.1.0/freealg/_sample.py +85 -0
  8. {freealg-0.0.2 → freealg-0.1.0}/freealg/distributions/__init__.py +4 -4
  9. freealg-0.1.0/freealg/distributions/kesten_mckay.py +559 -0
  10. {freealg-0.0.2 → freealg-0.1.0}/freealg/distributions/marchenko_pastur.py +4 -3
  11. freealg-0.1.0/freealg/distributions/wachter.py +568 -0
  12. freealg-0.1.0/freealg/distributions/wigner.py +552 -0
  13. {freealg-0.0.2 → freealg-0.1.0}/freealg/freeform.py +252 -52
  14. {freealg-0.0.2 → freealg-0.1.0/freealg.egg-info}/PKG-INFO +2 -1
  15. {freealg-0.0.2 → freealg-0.1.0}/freealg.egg-info/SOURCES.txt +6 -1
  16. {freealg-0.0.2 → freealg-0.1.0}/freealg.egg-info/requires.txt +1 -0
  17. {freealg-0.0.2 → freealg-0.1.0}/requirements.txt +2 -1
  18. freealg-0.0.2/freealg/__version__.py +0 -1
  19. freealg-0.0.2/freealg/_pade.py +0 -139
  20. {freealg-0.0.2 → freealg-0.1.0}/CHANGELOG.rst +0 -0
  21. {freealg-0.0.2 → freealg-0.1.0}/LICENSE.txt +0 -0
  22. {freealg-0.0.2 → freealg-0.1.0}/MANIFEST.in +0 -0
  23. {freealg-0.0.2 → freealg-0.1.0}/README.rst +0 -0
  24. {freealg-0.0.2 → freealg-0.1.0}/freealg/__init__.py +0 -0
  25. {freealg-0.0.2 → freealg-0.1.0}/freealg/_chebyshev.py +0 -0
  26. {freealg-0.0.2 → freealg-0.1.0}/freealg/_damp.py +0 -0
  27. {freealg-0.0.2 → freealg-0.1.0}/freealg/_util.py +0 -0
  28. {freealg-0.0.2 → freealg-0.1.0}/freealg.egg-info/dependency_links.txt +0 -0
  29. {freealg-0.0.2 → freealg-0.1.0}/freealg.egg-info/not-zip-safe +0 -0
  30. {freealg-0.0.2 → freealg-0.1.0}/freealg.egg-info/top_level.txt +0 -0
  31. {freealg-0.0.2 → freealg-0.1.0}/pyproject.toml +0 -0
  32. {freealg-0.0.2 → freealg-0.1.0}/setup.cfg +0 -0
  33. {freealg-0.0.2 → freealg-0.1.0}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: freealg
3
- Version: 0.0.2
3
+ Version: 0.1.0
4
4
  Summary: Free probability for large matrices
5
5
  Keywords: leaderboard bot chat
6
6
  Platform: Linux
@@ -29,6 +29,7 @@ Requires-Dist: scipy
29
29
  Requires-Dist: texplot
30
30
  Requires-Dist: matplotlib
31
31
  Requires-Dist: colorcet
32
+ Requires-Dist: networkx
32
33
  Provides-Extra: test
33
34
  Requires-Dist: tox; extra == "test"
34
35
  Requires-Dist: pytest-cov; extra == "test"
@@ -0,0 +1 @@
1
+ __version__ = "0.1.0"
@@ -0,0 +1,136 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # SPDX-FileType: SOURCE
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify it under
5
+ # the terms of the license found in the LICENSE.txt file in the root directory
6
+ # of this source tree.
7
+
8
+
9
+ # =======
10
+ # Imports
11
+ # =======
12
+
13
+ import numpy
14
+
15
+ __all__ = ['decompress']
16
+
17
+
18
+ # ==========
19
+ # decompress
20
+ # ==========
21
+
22
+ def decompress(matrix, size, x=None, delta=1e-4, iterations=500, step_size=0.1,
23
+ tolerance=1e-4):
24
+ """
25
+ Free decompression of spectral density.
26
+
27
+ Parameters
28
+ ----------
29
+
30
+ matrix : FreeForm
31
+ The initial matrix to be decompressed
32
+
33
+ size : int
34
+ Size of the decompressed matrix.
35
+
36
+ x : numpy.array, default=None
37
+ Positions where density to be evaluated at. If `None`, an interval
38
+ slightly larger than the support interval will be used.
39
+
40
+ delta: float, default=1e-4
41
+ Size of the perturbation into the upper half plane for Plemelj's
42
+ formula.
43
+
44
+ iterations: int, default=500
45
+ Maximum number of Newton iterations.
46
+
47
+ step_size: float, default=0.1
48
+ Step size for Newton iterations.
49
+
50
+ tolerance: float, default=1e-4
51
+ Tolerance for the solution obtained by the Newton solver. Also
52
+ used for the finite difference approximation to the derivative.
53
+
54
+ Returns
55
+ -------
56
+
57
+ rho : numpy.array
58
+ Spectral density
59
+
60
+ See Also
61
+ --------
62
+
63
+ density
64
+ stieltjes
65
+
66
+ Notes
67
+ -----
68
+
69
+ Work in progress.
70
+
71
+ References
72
+ ----------
73
+
74
+ .. [1] tbd
75
+
76
+ Examples
77
+ --------
78
+
79
+ .. code-block:: python
80
+
81
+ >>> from freealg import FreeForm
82
+ """
83
+
84
+ alpha = size / matrix.n
85
+ m = matrix._eval_stieltjes
86
+ # Lower and upper bound on new support
87
+ hilb_lb = (1 / m(matrix.lam_m + delta * 1j)[1]).real
88
+ hilb_ub = (1 / m(matrix.lam_p + delta * 1j)[1]).real
89
+ lb = matrix.lam_m - (alpha - 1) * hilb_lb
90
+ ub = matrix.lam_p - (alpha - 1) * hilb_ub
91
+
92
+ # Create x if not given
93
+ if x is None:
94
+ radius = 0.5 * (ub - lb)
95
+ center = 0.5 * (ub + lb)
96
+ scale = 1.25
97
+ x_min = numpy.floor(center - radius * scale)
98
+ x_max = numpy.ceil(center + radius * scale)
99
+ x = numpy.linspace(x_min, x_max, 500)
100
+
101
+ def _char_z(z):
102
+ return z + (1 / m(z)[1]) * (1 - alpha)
103
+
104
+ # Ensure that input is an array
105
+ x = numpy.asarray(x)
106
+
107
+ target = x + delta * 1j
108
+
109
+ z = numpy.full(target.shape, numpy.mean(matrix.support) - .1j,
110
+ dtype=numpy.complex128)
111
+
112
+ # Broken Newton steps can produce a lot of warnings. Removing them
113
+ # for now.
114
+ with numpy.errstate(all='ignore'):
115
+ for _ in range(iterations):
116
+ objective = _char_z(z) - target
117
+ mask = numpy.abs(objective) >= tolerance
118
+ if not numpy.any(mask):
119
+ break
120
+ z_m = z[mask]
121
+
122
+ # Perform finite difference approximation
123
+ dfdz = _char_z(z_m+tolerance) - _char_z(z_m-tolerance)
124
+ dfdz /= 2*tolerance
125
+ dfdz[dfdz == 0] = 1.0
126
+
127
+ # Perform Newton step
128
+ z[mask] = z_m - step_size * objective[mask] / dfdz
129
+
130
+ # Plemelj's formula
131
+ char_s = m(z)[1] / alpha
132
+ rho = numpy.maximum(0, char_s.imag / numpy.pi)
133
+ rho[numpy.isnan(rho) | numpy.isinf(rho)] = 0
134
+ rho = rho.reshape(*x.shape)
135
+
136
+ return rho, x, (lb, ub)
@@ -174,9 +174,7 @@ def jacobi_stieltjes(z, psi, support, alpha=0.0, beta=0.0, n_base=40):
174
174
  integrand = w_nodes * P_k_nodes # (n_quad,)
175
175
 
176
176
  # Broadcast over z: shape (n_quad, ...) / ...
177
- # diff = u_z[None, ...] - t_nodes[:, None] # (n_quad, ...)
178
177
  diff = u_z[None, ...] - t_nodes[:, None, None] # (n_quad, Ny, Nx)
179
- # m_k = (integrand[:, None] / diff).sum(axis=0) # shape like z
180
178
  m_k = (integrand[:, None, None] / diff).sum(axis=0)
181
179
 
182
180
  # Accumulate with factor 2/span
@@ -0,0 +1,463 @@
1
+ # SPDX-FileCopyrightText: Copyright 2025, Siavash Ameli <sameli@berkeley.edu>
2
+ # SPDX-License-Identifier: BSD-3-Clause
3
+ # SPDX-FileType: SOURCE
4
+ #
5
+ # This program is free software: you can redistribute it and/or modify it under
6
+ # the terms of the license found in the LICENSE.txt file in the root directory
7
+ # of this source tree.
8
+
9
+
10
+ # =======
11
+ # Imports
12
+ # =======
13
+
14
+ import numpy
15
+ from numpy.linalg import lstsq
16
+ from itertools import product
17
+ from scipy.optimize import least_squares, differential_evolution
18
+
19
+ __all__ = ['fit_pade', 'eval_pade']
20
+
21
+
22
+ # =============
23
+ # default poles
24
+ # =============
25
+
26
+ def _default_poles(q, lam_m, lam_p, safety=1.0, odd_side='left'):
27
+ """
28
+ Generate q real poles outside [lam_m, lam_p].
29
+
30
+ • even q : q/2 on each side (Chebyshev-like layout)
31
+ • odd q : (q+1)/2 on the *left*, (q–1)/2 on the right
32
+ so q=1 => single pole on whichever side `odd_side` says.
33
+
34
+ safety >= 1: 1, then poles start half an interval away; >1 pushes them
35
+ farther.
36
+ """
37
+
38
+ if q == 0:
39
+ return numpy.empty(0)
40
+
41
+ Delta = 0.5 * (lam_p - lam_m)
42
+
43
+ # Decide how many poles on each side. m_L and m_R determine how many poles
44
+ # to be on the left and right of the support interval.
45
+ if q % 2 == 0:
46
+ m_L = m_R = q // 2
47
+ else:
48
+ if odd_side == 'left':
49
+ m_L = (q + 1) // 2
50
+ m_R = q // 2
51
+ else:
52
+ m_L = q // 2
53
+ m_R = (q + 1) // 2
54
+
55
+ # Chebyshev-extrema offsets (all positive)
56
+ kL = numpy.arange(m_L)
57
+ tL = (2 * kL + 1) * numpy.pi / (2 * m_L)
58
+ offsL = safety * Delta * (1 + numpy.cos(tL))
59
+
60
+ kR = numpy.arange(m_R)
61
+ tR = (2 * kR + 1) * numpy.pi / (2 * m_R + (m_R == 0))
62
+ offsR = safety * Delta * (1 + numpy.cos(tR))
63
+
64
+ left = lam_m - offsL
65
+ right = lam_p + offsR
66
+
67
+ return numpy.sort(numpy.concatenate([left, right]))
68
+
69
+
70
+ # ============
71
+ # encode poles
72
+ # ============
73
+
74
+ def _encode_poles(a, lam_m, lam_p):
75
+ """
76
+ Map real pole a_j → unconstrained s_j,
77
+ so that the default left-of-interval pole stays left.
78
+ """
79
+
80
+ # half-width of the interval
81
+ d = 0.5 * (lam_p - lam_m)
82
+ # if a < lam_m, we want s ≥ 0; if a > lam_p, s < 0
83
+ return numpy.where(
84
+ a < lam_m,
85
+ numpy.log((lam_m - a) / d), # zero at a = lam_m - d
86
+ -numpy.log((a - lam_p) / d) # zero at a = lam_p + d
87
+ )
88
+
89
+
90
+ # ============
91
+ # decode poles
92
+ # ============
93
+
94
+ def _decode_poles(s, lam_m, lam_p):
95
+ """
96
+ Inverse map s_j → real pole a_j outside the interval.
97
+ """
98
+
99
+ d = 0.5 * (lam_p - lam_m)
100
+ return numpy.where(
101
+ s >= 0,
102
+ lam_m - d * numpy.exp(s), # maps s=0 to a=lam_m−d (left)
103
+ lam_p + d * numpy.exp(-s) # maps s=0 to a=lam_p+d (right)
104
+ )
105
+
106
+
107
+ # ========
108
+ # inner ls
109
+ # ========
110
+
111
+ # def _inner_ls(x, f, poles): # TEST
112
+ def _inner_ls(x, f, poles, p=1):
113
+ """
114
+ This is the inner least square (blazing fast).
115
+ """
116
+
117
+ if poles.size == 0 and p == -1:
118
+ return 0.0, 0.0, numpy.empty(0)
119
+
120
+ if poles.size == 0: # q = 0
121
+ # A = numpy.column_stack((numpy.ones_like(x), x))
122
+ cols = [numpy.ones_like(x)] if p >= 0 else []
123
+ if p == 1:
124
+ cols.append(x)
125
+ A = numpy.column_stack(cols)
126
+ # ---
127
+ theta, *_ = lstsq(A, f, rcond=None)
128
+ # c, D = theta # TEST
129
+ if p == -1:
130
+ c = 0.0
131
+ D = 0.0
132
+ resid = numpy.empty(0)
133
+ elif p == 0:
134
+ c = theta[0]
135
+ D = 0.0
136
+ resid = numpy.empty(0)
137
+ else: # p == 1
138
+ c, D = theta
139
+ resid = numpy.empty(0)
140
+ else:
141
+ # phi = 1.0 / (x[:, None] - poles[None, :])
142
+ # # A = numpy.column_stack((numpy.ones_like(x), x, phi)) # TEST
143
+ # # theta, *_ = lstsq(A, f, rcond=None)
144
+ # # c, D, resid = theta[0], theta[1], theta[2:]
145
+ # phi = 1.0 / (x[:, None] - poles[None, :])
146
+ # cols = [numpy.ones_like(x)] if p >= 0 else []
147
+ # if p == 1:
148
+ # cols.append(x)
149
+ # cols.append(phi)
150
+ # A = numpy.column_stack(cols)
151
+ # theta, *_ = lstsq(A, f, rcond=None)
152
+ # if p == -1:
153
+ # c = 0.0
154
+ # D = 0.0
155
+ # resid = theta
156
+ # elif p == 0:
157
+ # c = theta[0]
158
+ # D = 0.0
159
+ # resid = theta[1:]
160
+ # else: # p == 1
161
+ # c = theta[0]
162
+ # D = theta[1]
163
+ # resid = theta[2:]
164
+
165
+ phi = 1.0 / (x[:, None] - poles[None, :])
166
+ cols = [numpy.ones_like(x)] if p >= 0 else []
167
+ if p == 1:
168
+ cols.append(x)
169
+ cols.append(phi)
170
+
171
+ A = numpy.column_stack(cols)
172
+ theta, *_ = lstsq(A, f, rcond=None)
173
+
174
+ if p == -1:
175
+ c, D, resid = 0.0, 0.0, theta
176
+ elif p == 0:
177
+ c, D, resid = theta[0], 0.0, theta[1:]
178
+ else: # p == 1
179
+ c, D, resid = theta[0], theta[1], theta[2:]
180
+
181
+ return c, D, resid
182
+
183
+
184
+ # =============
185
+ # eval rational
186
+ # =============
187
+
188
+ def _eval_rational(z, c, D, poles, resid):
189
+ """
190
+ """
191
+
192
+ # z = z[:, None]
193
+ # if poles.size == 0:
194
+ # term = 0.0
195
+ # else:
196
+ # term = numpy.sum(resid / (z - poles), axis=1)
197
+ #
198
+ # return c + D * z.ravel() + term
199
+
200
+ # ensure z is a 1-D array
201
+ z = numpy.asarray(z)
202
+ z_col = z[:, None]
203
+
204
+ if poles.size == 0:
205
+ term = 0.0
206
+ else:
207
+ term = numpy.sum(resid / (z_col - poles[None, :]), axis=1)
208
+
209
+ return c + D * z + term
210
+
211
+
212
+ # ========
213
+ # fit pade
214
+ # ========
215
+
216
+ def fit_pade(x, f, lam_m, lam_p, p=1, q=2, odd_side='left', safety=1.0,
217
+ max_outer=40, xtol=1e-12, ftol=1e-12, optimizer='ls', verbose=0):
218
+ """
219
+ This is the outer optimiser.
220
+
221
+ Fits G(x) = (p>=1 ? c : 0) + (p==1 ? D x : 0) + sum r_j/(x - a_j) # TEST
222
+ """
223
+
224
+ # Checks
225
+ if not (odd_side in ['left', 'right']):
226
+ raise ValueError('"odd_side" can only be "left" or "right".')
227
+
228
+ if not (p in [-1, 0, 1]):
229
+ raise ValueError('"pade_p" can only be -1, 0, or 1.')
230
+
231
+ x = numpy.asarray(x, float)
232
+ f = numpy.asarray(f, float)
233
+
234
+ poles0 = _default_poles(q, lam_m, lam_p, safety=safety, odd_side=odd_side)
235
+ # if q == 0: # nothing to optimise
236
+ if q == 0 and p <= 0:
237
+ # c, D, resid = _inner_ls(x, f, poles0) # TEST
238
+ c, D, resid = _inner_ls(x, f, poles0, p)
239
+ pade_sol = {
240
+ 'c': c, 'D': D, 'poles': poles0, 'resid': resid,
241
+ 'outer_iters': 0
242
+ }
243
+
244
+ return pade_sol
245
+
246
+ s0 = _encode_poles(poles0, lam_m, lam_p)
247
+
248
+ # --------
249
+ # residual
250
+ # --------
251
+
252
+ # def residual(s): # TEST
253
+ def residual(s, p=p):
254
+ poles = _decode_poles(s, lam_m, lam_p)
255
+ # c, D, resid = _inner_ls(x, f, poles) # TEST
256
+ c, D, resid = _inner_ls(x, f, poles, p)
257
+ return _eval_rational(x, c, D, poles, resid) - f
258
+
259
+ # ----------------
260
+
261
+ # Optimizer
262
+ if optimizer == 'ls':
263
+ # scale = numpy.maximum(1.0, numpy.abs(s0))
264
+ res = least_squares(residual, s0,
265
+ method='trf',
266
+ # method='lm',
267
+ # x_scale=scale,
268
+ max_nfev=max_outer, xtol=xtol, ftol=ftol,
269
+ verbose=verbose)
270
+
271
+ elif optimizer == 'de':
272
+
273
+ # Bounds
274
+ # span = lam_p - lam_m
275
+ # B = 3.0 # multiples of span
276
+ # L = numpy.log(B * span)
277
+ # bounds = [(-L, L)] * len(s0)
278
+
279
+ d = 0.5*(lam_p - lam_m)
280
+ # the minimum factor so that lam_m - d*exp(s)=0 is exp(s)=lam_m/d
281
+ min_factor = lam_m/d
282
+ B = max(10.0, min_factor*10.0)
283
+ L = numpy.log(B)
284
+ bounds = [(-L, L)] * len(s0)
285
+
286
+ # Global stage
287
+ glob = differential_evolution(lambda s: numpy.sum(residual(s)**2),
288
+ bounds, maxiter=50, popsize=10,
289
+ polish=False)
290
+
291
+ # local polish
292
+ res = least_squares(
293
+ residual, glob.x,
294
+ method='lm',
295
+ max_nfev=max_outer, xtol=xtol, ftol=ftol,
296
+ verbose=verbose)
297
+
298
+ else:
299
+ raise RuntimeError('"optimizer" is invalid.')
300
+
301
+ poles = _decode_poles(res.x, lam_m, lam_p)
302
+ # c, D, resid = _inner_ls(x, f, poles) # TEST
303
+ c, D, resid = _inner_ls(x, f, poles, p)
304
+
305
+ pade_sol = {
306
+ 'c': c, 'D': D, 'poles': poles, 'resid': resid,
307
+ 'outer_iters': res.nfev
308
+ }
309
+
310
+ return pade_sol
311
+
312
+
313
+ # =========
314
+ # eval pade
315
+ # =========
316
+
317
+ def eval_pade(z, pade_sol):
318
+ """
319
+ """
320
+
321
+ # z_arr = numpy.asanyarray(z) # shape=(M,N)
322
+ # flat = z_arr.ravel() # shape=(M·N,)
323
+ # c, D = pade_sol['c'], pade_sol['D']
324
+ # poles = pade_sol['poles']
325
+ # resid = pade_sol['resid']
326
+ #
327
+ # # _eval_rational takes a 1-D array of z's and returns 1-D outputs
328
+ # flat_out = _eval_rational(flat, c, D, poles, resid)
329
+ #
330
+ # # restore the original shape
331
+ # out = flat_out.reshape(z_arr.shape) # shape=(M,N)
332
+ #
333
+ # return out
334
+
335
+ z = numpy.asanyarray(z) # complex or real, any shape
336
+ c, D = pade_sol['c'], pade_sol['D']
337
+ poles, resid = pade_sol['poles'], pade_sol['resid']
338
+
339
+ out = c + D*z
340
+ for bj, rj in zip(poles, resid):
341
+ out += rj/(z - bj) # each is an (N,) op, no N×q temp
342
+ return out
343
+
344
+
345
+ # ============
346
+ # fit pade old
347
+ # ============
348
+
349
+ def fit_pade_old(x, f, lam_m, lam_p, p, q, delta=1e-8, B=numpy.inf,
350
+ S=numpy.inf, B_default=10.0, S_factor=2.0, maxiter_de=200):
351
+ """
352
+ Fit a [p/q] rational P/Q of the form:
353
+ P(x) = s * prod_{i=0..p-1}(x - a_i)
354
+ Q(x) = prod_{j=0..q-1}(x - b_j)
355
+
356
+ Constraints:
357
+ a_i ∈ [lam_m, lam_p]
358
+ b_j ∈ (-infty, lam_m - delta] cup [lam_p + delta, infty)
359
+
360
+ Approach:
361
+ - Brute‐force all 2^q left/right assignments for denominator roots
362
+ - Global search with differential_evolution, fallback to zeros if needed
363
+ - Local refinement with least_squares
364
+
365
+ Returns a dict with keys:
366
+ 's' : optimal scale factor
367
+ 'a' : array of p numerator roots (in [lam_m, lam_p])
368
+ 'b' : array of q denominator roots (outside the interval)
369
+ 'resid' : final residual norm
370
+ 'signs' : tuple indicating left/right pattern for each b_j
371
+ """
372
+
373
+ # Determine finite bounds for DE
374
+ if not numpy.isfinite(B):
375
+ B_eff = B_default
376
+ else:
377
+ B_eff = B
378
+ if not numpy.isfinite(S):
379
+ # scale bound: S_factor * max|f| * interval width + safety
380
+ S_eff = S_factor * numpy.max(numpy.abs(f)) * (lam_p - lam_m) + 1.0
381
+ if S_eff <= 0:
382
+ S_eff = 1.0
383
+ else:
384
+ S_eff = S
385
+
386
+ def map_roots(signs, b):
387
+ """Map unconstrained b_j -> real root outside the interval."""
388
+ out = numpy.empty_like(b)
389
+ for j, (s_val, bj) in enumerate(zip(signs, b)):
390
+ if s_val > 0:
391
+ out[j] = lam_p + delta + numpy.exp(bj)
392
+ else:
393
+ out[j] = lam_m - delta - numpy.exp(bj)
394
+ return out
395
+
396
+ best = {'resid': numpy.inf}
397
+
398
+ # Enumerate all left/right sign patterns
399
+ for signs in product([-1, 1], repeat=q):
400
+ # Residual vector for current pattern
401
+ def resid_vec(z):
402
+ s_val = z[0]
403
+ a = z[1:1+p]
404
+ b = z[1+p:]
405
+ P = s_val * numpy.prod(x[:, None] - a[None, :], axis=1)
406
+ roots_Q = map_roots(signs, b)
407
+ Q = numpy.prod(x[:, None] - roots_Q[None, :], axis=1)
408
+ return P - f * Q
409
+
410
+ def obj(z):
411
+ r = resid_vec(z)
412
+ return r.dot(r)
413
+
414
+ # Build bounds for DE
415
+ bounds = []
416
+ bounds.append((-S_eff, S_eff)) # s
417
+ bounds += [(lam_m, lam_p)] * p # a_i
418
+ bounds += [(-B_eff, B_eff)] * q # b_j
419
+
420
+ # 1) Global search
421
+ try:
422
+ de = differential_evolution(obj, bounds,
423
+ maxiter=maxiter_de,
424
+ polish=False)
425
+ z0 = de.x
426
+ except ValueError:
427
+ # fallback: start at zeros
428
+ z0 = numpy.zeros(1 + p + q)
429
+
430
+ # 2) Local refinement
431
+ ls = least_squares(resid_vec, z0, xtol=1e-12, ftol=1e-12)
432
+
433
+ rnorm = numpy.linalg.norm(resid_vec(ls.x))
434
+ if rnorm < best['resid']:
435
+ best.update(resid=rnorm, signs=signs, x=ls.x.copy())
436
+
437
+ # Unpack best solution
438
+ z_best = best['x']
439
+ s_opt = z_best[0]
440
+ a_opt = z_best[1:1+p]
441
+ b_opt = map_roots(best['signs'], z_best[1+p:])
442
+
443
+ return {
444
+ 's': s_opt,
445
+ 'a': a_opt,
446
+ 'b': b_opt,
447
+ 'resid': best['resid'],
448
+ 'signs': best['signs'],
449
+ }
450
+
451
+
452
+ # =============
453
+ # eval pade old
454
+ # =============
455
+
456
+ def eval_pade_old(z, s, a, b):
457
+ """
458
+ """
459
+
460
+ Pz = s * numpy.prod([z - aj for aj in a], axis=0)
461
+ Qz = numpy.prod([z - bj for bj in b], axis=0)
462
+
463
+ return Pz / Qz