freealg 0.6.3__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- freealg/__init__.py +8 -7
- freealg/__version__.py +1 -1
- freealg/_algebraic_form/__init__.py +11 -0
- freealg/_algebraic_form/_continuation_algebraic.py +503 -0
- freealg/_algebraic_form/_decompress.py +648 -0
- freealg/_algebraic_form/_edge.py +352 -0
- freealg/_algebraic_form/_sheets_util.py +145 -0
- freealg/_algebraic_form/algebraic_form.py +987 -0
- freealg/_freeform/__init__.py +16 -0
- freealg/_freeform/_density_util.py +243 -0
- freealg/{_linalg.py → _freeform/_linalg.py} +1 -1
- freealg/{freeform.py → _freeform/freeform.py} +2 -1
- freealg/_geometric_form/__init__.py +13 -0
- freealg/_geometric_form/_continuation_genus0.py +175 -0
- freealg/_geometric_form/_continuation_genus1.py +275 -0
- freealg/_geometric_form/_elliptic_functions.py +174 -0
- freealg/_geometric_form/_sphere_maps.py +63 -0
- freealg/_geometric_form/_torus_maps.py +118 -0
- freealg/_geometric_form/geometric_form.py +1094 -0
- freealg/_util.py +1 -228
- freealg/distributions/__init__.py +5 -1
- freealg/distributions/_chiral_block.py +440 -0
- freealg/distributions/_deformed_marchenko_pastur.py +617 -0
- freealg/distributions/_deformed_wigner.py +312 -0
- freealg/distributions/_kesten_mckay.py +2 -2
- freealg/distributions/_marchenko_pastur.py +199 -82
- freealg/distributions/_meixner.py +2 -2
- freealg/distributions/_wachter.py +2 -2
- freealg/distributions/_wigner.py +2 -2
- freealg/visualization/__init__.py +12 -0
- freealg/visualization/_glue_util.py +32 -0
- freealg/visualization/_rgb_hsv.py +125 -0
- {freealg-0.6.3.dist-info → freealg-0.7.1.dist-info}/METADATA +1 -1
- freealg-0.7.1.dist-info/RECORD +47 -0
- freealg-0.6.3.dist-info/RECORD +0 -26
- /freealg/{_chebyshev.py → _freeform/_chebyshev.py} +0 -0
- /freealg/{_damp.py → _freeform/_damp.py} +0 -0
- /freealg/{_decompress.py → _freeform/_decompress.py} +0 -0
- /freealg/{_jacobi.py → _freeform/_jacobi.py} +0 -0
- /freealg/{_pade.py → _freeform/_pade.py} +0 -0
- /freealg/{_plot_util.py → _freeform/_plot_util.py} +0 -0
- /freealg/{_sample.py → _freeform/_sample.py} +0 -0
- /freealg/{_series.py → _freeform/_series.py} +0 -0
- /freealg/{_support.py → _freeform/_support.py} +0 -0
- {freealg-0.6.3.dist-info → freealg-0.7.1.dist-info}/WHEEL +0 -0
- {freealg-0.6.3.dist-info → freealg-0.7.1.dist-info}/licenses/AUTHORS.txt +0 -0
- {freealg-0.6.3.dist-info → freealg-0.7.1.dist-info}/licenses/LICENSE.txt +0 -0
- {freealg-0.6.3.dist-info → freealg-0.7.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright 2026, Siavash Ameli <sameli@berkeley.edu>
|
|
2
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
3
|
+
# SPDX-FileType: SOURCE
|
|
4
|
+
#
|
|
5
|
+
# This program is free software: you can redistribute it and/or modify it under
|
|
6
|
+
# the terms of the license found in the LICENSE.txt file in the root directory
|
|
7
|
+
# of this source tree.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# =======
|
|
11
|
+
# Imports
|
|
12
|
+
# =======
|
|
13
|
+
|
|
14
|
+
import numpy
|
|
15
|
+
from ._continuation_algebraic import eval_roots
|
|
16
|
+
from ._decompress import eval_P_partials
|
|
17
|
+
|
|
18
|
+
__all__ = ['evolve_edges', 'merge_edges']
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# ================
|
|
22
|
+
# edge newton step
|
|
23
|
+
# ================
|
|
24
|
+
|
|
25
|
+
def _edge_newton_step(t, zeta, y, a_coeffs, max_iter=30, tol=1e-12):
|
|
26
|
+
"""
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
tau = float(numpy.exp(t))
|
|
30
|
+
c = tau - 1.0
|
|
31
|
+
|
|
32
|
+
for _ in range(max_iter):
|
|
33
|
+
P, Pz, Py = eval_P_partials(zeta, y, a_coeffs)
|
|
34
|
+
|
|
35
|
+
# F1 = P(zeta,y)
|
|
36
|
+
F1 = complex(P)
|
|
37
|
+
|
|
38
|
+
# F2 = y^2 Py - c Pz
|
|
39
|
+
F2 = complex((y * y) * Py - c * Pz)
|
|
40
|
+
|
|
41
|
+
if max(abs(F1), abs(F2)) <= tol:
|
|
42
|
+
return zeta, y, True
|
|
43
|
+
|
|
44
|
+
# Numerical Jacobian (2x2) in (zeta,y)
|
|
45
|
+
eps_z = 1e-8 * (1.0 + abs(zeta))
|
|
46
|
+
eps_y = 1e-8 * (1.0 + abs(y))
|
|
47
|
+
|
|
48
|
+
Pp, Pzp, Pyp = eval_P_partials(zeta + eps_z, y, a_coeffs)
|
|
49
|
+
F1_zp = (complex(Pp) - F1) / eps_z
|
|
50
|
+
F2_zp = (complex((y * y) * Pyp - c * Pzp) - F2) / eps_z
|
|
51
|
+
|
|
52
|
+
Pp, Pzp, Pyp = eval_P_partials(zeta, y + eps_y, a_coeffs)
|
|
53
|
+
F1_yp = (complex(Pp) - F1) / eps_y
|
|
54
|
+
F2_yp = (complex(((y + eps_y) * (y + eps_y)) * Pyp - c * Pzp) - F2) / \
|
|
55
|
+
eps_y
|
|
56
|
+
|
|
57
|
+
# Solve J * [dz, dy] = -F
|
|
58
|
+
det = F1_zp * F2_yp - F1_yp * F2_zp
|
|
59
|
+
if det == 0.0:
|
|
60
|
+
return zeta, y, False
|
|
61
|
+
|
|
62
|
+
dz = (-F1 * F2_yp + F1_yp * F2) / det
|
|
63
|
+
dy = (-F1_zp * F2 + F1 * F2_zp) / det
|
|
64
|
+
|
|
65
|
+
# Mild damping if update is huge
|
|
66
|
+
lam = 1.0
|
|
67
|
+
if abs(dz) + abs(dy) > 10.0 * (1.0 + abs(zeta) + abs(y)):
|
|
68
|
+
lam = 0.2
|
|
69
|
+
|
|
70
|
+
zeta = zeta + lam * dz
|
|
71
|
+
y = y + lam * dy
|
|
72
|
+
|
|
73
|
+
return zeta, y, False
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
# ==================
|
|
77
|
+
# pick physical root
|
|
78
|
+
# ==================
|
|
79
|
+
|
|
80
|
+
def _pick_physical_root(z, roots):
|
|
81
|
+
"""
|
|
82
|
+
Pick the Herglotz/physical root at a point z in C+.
|
|
83
|
+
|
|
84
|
+
Heuristic: choose the root with maximal Im(root) when Im(z)>0,
|
|
85
|
+
then enforce Im(root)>0. Falls back to closest-to -1/z if needed.
|
|
86
|
+
"""
|
|
87
|
+
|
|
88
|
+
r = numpy.asarray(roots, dtype=complex).ravel()
|
|
89
|
+
if r.size == 0:
|
|
90
|
+
return numpy.nan + 1j * numpy.nan
|
|
91
|
+
|
|
92
|
+
if z.imag > 0.0:
|
|
93
|
+
pos = r[numpy.imag(r) > 0.0]
|
|
94
|
+
if pos.size > 0:
|
|
95
|
+
return pos[numpy.argmax(numpy.imag(pos))]
|
|
96
|
+
|
|
97
|
+
target = -1.0 / z
|
|
98
|
+
return r[numpy.argmin(numpy.abs(r - target))]
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
# ============================
|
|
102
|
+
# init edge point from support
|
|
103
|
+
# ============================
|
|
104
|
+
|
|
105
|
+
def _init_edge_point_from_support(x_edge, a_coeffs, eta=1e-3):
|
|
106
|
+
"""
|
|
107
|
+
Initialize (zeta,y) at t=0 for an edge near x_edge.
|
|
108
|
+
|
|
109
|
+
Uses z = x_edge + i*eta, picks physical root y, then refines zeta on real
|
|
110
|
+
axis.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
z = complex(x_edge + 1j * eta)
|
|
114
|
+
roots = eval_roots(numpy.array([z]), a_coeffs)[0]
|
|
115
|
+
y = _pick_physical_root(z, roots)
|
|
116
|
+
|
|
117
|
+
# Move zeta to real axis as initial guess
|
|
118
|
+
zeta = complex(x_edge)
|
|
119
|
+
|
|
120
|
+
# Refine zeta,y to satisfy P=0 and Py=0 at t=0 (branch point)
|
|
121
|
+
# This uses the same Newton system with c=0, i.e. F2 = y^2 Py.
|
|
122
|
+
zeta, y, ok = _edge_newton_step(0.0, zeta, y, a_coeffs, max_iter=50,
|
|
123
|
+
tol=1e-10)
|
|
124
|
+
|
|
125
|
+
return zeta, y, ok
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
# ============
|
|
129
|
+
# evolve edges
|
|
130
|
+
# ============
|
|
131
|
+
|
|
132
|
+
def evolve_edges(t_grid, a_coeffs, support=None, eta=1e-3, dt_max=0.1,
|
|
133
|
+
max_iter=30, tol=1e-12):
|
|
134
|
+
"""
|
|
135
|
+
Evolve spectral edges under free decompression using the fitted
|
|
136
|
+
polynomial P.
|
|
137
|
+
|
|
138
|
+
At time t, edges are computed as critical values of the FD map restricted
|
|
139
|
+
to the spectral curve P(zeta,y)=0. We solve for (zeta(t), y(t)):
|
|
140
|
+
|
|
141
|
+
P(zeta,y) = 0,
|
|
142
|
+
y^2 * Py(zeta,y) - (exp(t)-1) * Pzeta(zeta,y) = 0,
|
|
143
|
+
|
|
144
|
+
then map to the physical coordinate:
|
|
145
|
+
z_edge(t) = zeta - (exp(t)-1)/y.
|
|
146
|
+
|
|
147
|
+
Parameters
|
|
148
|
+
----------
|
|
149
|
+
t_grid : array_like of float
|
|
150
|
+
Strictly increasing time grid.
|
|
151
|
+
a_coeffs : ndarray
|
|
152
|
+
Coefficients defining P(zeta,y).
|
|
153
|
+
support : list of (float, float), optional
|
|
154
|
+
List of intervals [(a1,b1),...,(ak,bk)] at t=0. If provided, these
|
|
155
|
+
endpoints are used as labels/initial guesses and all are tracked.
|
|
156
|
+
If omitted, this function currently raises ValueError (auto-detection
|
|
157
|
+
is intentionally not implemented here to avoid fragile heuristics).
|
|
158
|
+
eta : float, optional
|
|
159
|
+
Small imaginary part used only to pick an initial physical root near
|
|
160
|
+
each endpoint at t=0.
|
|
161
|
+
dt_max : float, optional
|
|
162
|
+
Maximum internal time step used for substepping in t.
|
|
163
|
+
max_iter : int, optional
|
|
164
|
+
Newton iterations per time step.
|
|
165
|
+
tol : float, optional
|
|
166
|
+
Tolerance for the 2x2 Newton solve.
|
|
167
|
+
|
|
168
|
+
Returns
|
|
169
|
+
-------
|
|
170
|
+
edges : ndarray, shape (len(t_grid), 2*k)
|
|
171
|
+
Tracked edges in the order [a1,b1,a2,b2,...] for each time.
|
|
172
|
+
ok : ndarray of bool, same shape as edges
|
|
173
|
+
Flags indicating whether each edge solve succeeded.
|
|
174
|
+
|
|
175
|
+
Notes
|
|
176
|
+
-----
|
|
177
|
+
The solve is done by continuation in time. If two edges merge, the Newton
|
|
178
|
+
system may become ill-conditioned near the merge time.
|
|
179
|
+
|
|
180
|
+
Examples
|
|
181
|
+
--------
|
|
182
|
+
.. code-block:: python
|
|
183
|
+
|
|
184
|
+
t_grid = numpy.linspace(0.0, 3.0, 61)
|
|
185
|
+
support = [(a1,b1)]
|
|
186
|
+
edges, ok = fd_evolve_edges(t_grid, a_coeffs, support=support,
|
|
187
|
+
eta=1e-3)
|
|
188
|
+
|
|
189
|
+
a_t = edges[:, 0]
|
|
190
|
+
b_t = edges[:, 1]
|
|
191
|
+
"""
|
|
192
|
+
|
|
193
|
+
t_grid = numpy.asarray(t_grid, dtype=float).ravel()
|
|
194
|
+
if t_grid.size < 1:
|
|
195
|
+
raise ValueError("t_grid must be non-empty.")
|
|
196
|
+
if numpy.any(numpy.diff(t_grid) <= 0.0):
|
|
197
|
+
raise ValueError("t_grid must be strictly increasing.")
|
|
198
|
+
|
|
199
|
+
if support is None:
|
|
200
|
+
raise ValueError(
|
|
201
|
+
"support must be provided (auto-detection not implemented).")
|
|
202
|
+
|
|
203
|
+
# Flatten endpoints in the order [a1,b1,a2,b2,...]
|
|
204
|
+
endpoints0 = []
|
|
205
|
+
for a, b in support:
|
|
206
|
+
endpoints0.append(float(a))
|
|
207
|
+
endpoints0.append(float(b))
|
|
208
|
+
|
|
209
|
+
m = len(endpoints0)
|
|
210
|
+
edges = numpy.empty((t_grid.size, m), dtype=float)
|
|
211
|
+
ok = numpy.zeros((t_grid.size, m), dtype=bool)
|
|
212
|
+
|
|
213
|
+
# Initialize spectral points (zeta,y) at t=0 for each endpoint
|
|
214
|
+
zeta = numpy.empty(m, dtype=complex)
|
|
215
|
+
y = numpy.empty(m, dtype=complex)
|
|
216
|
+
|
|
217
|
+
for j in range(m):
|
|
218
|
+
z0, y0, ok0 = _init_edge_point_from_support(endpoints0[j], a_coeffs,
|
|
219
|
+
eta=eta)
|
|
220
|
+
zeta[j] = z0
|
|
221
|
+
y[j] = y0
|
|
222
|
+
edges[0, j] = float(numpy.real(z0)) # at t=0, z_edge = zeta
|
|
223
|
+
ok[0, j] = ok0
|
|
224
|
+
|
|
225
|
+
# Time continuation
|
|
226
|
+
for it in range(1, t_grid.size):
|
|
227
|
+
t0 = float(t_grid[it - 1])
|
|
228
|
+
t1 = float(t_grid[it])
|
|
229
|
+
dt = t1 - t0
|
|
230
|
+
|
|
231
|
+
n_sub = int(numpy.ceil(dt / float(dt_max)))
|
|
232
|
+
if n_sub < 1:
|
|
233
|
+
n_sub = 1
|
|
234
|
+
|
|
235
|
+
for ks in range(1, n_sub + 1):
|
|
236
|
+
t = t0 + dt * (ks / float(n_sub))
|
|
237
|
+
for j in range(m):
|
|
238
|
+
zeta[j], y[j], okj = _edge_newton_step(
|
|
239
|
+
t, zeta[j], y[j], a_coeffs,
|
|
240
|
+
max_iter=max_iter, tol=tol
|
|
241
|
+
)
|
|
242
|
+
ok[it, j] = okj
|
|
243
|
+
|
|
244
|
+
tau = float(numpy.exp(t1))
|
|
245
|
+
c = tau - 1.0
|
|
246
|
+
z_edge = zeta - c / y
|
|
247
|
+
|
|
248
|
+
edges[it, :] = numpy.real(z_edge)
|
|
249
|
+
# ok[it,:] already set in last substep loop
|
|
250
|
+
|
|
251
|
+
return edges, ok
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
# ===========
|
|
255
|
+
# merge edges
|
|
256
|
+
# ===========
|
|
257
|
+
|
|
258
|
+
def merge_edges(edges, tol=0.0):
|
|
259
|
+
"""
|
|
260
|
+
Merge bulks when inner edges cross, without shifting columns.
|
|
261
|
+
|
|
262
|
+
Columns are fixed as [a1,b1,a2,b2,...,ak,bk]. When the gap between bulk j
|
|
263
|
+
and bulk j+1 closes (b_j >= a_{j+1} - tol), we annihilate the two inner
|
|
264
|
+
edges by setting b_j and a_{j+1} to NaN. All other columns remain in place.
|
|
265
|
+
|
|
266
|
+
This preserves smooth plotting per original edge index (e.g. b2 stays in
|
|
267
|
+
the same column for all t). The number of active bulks is computed as the
|
|
268
|
+
number of connected components after merges.
|
|
269
|
+
|
|
270
|
+
Parameters
|
|
271
|
+
----------
|
|
272
|
+
edges : ndarray, shape (nt, 2k)
|
|
273
|
+
Edge trajectories [a1,b1,a2,b2,...].
|
|
274
|
+
tol : float
|
|
275
|
+
Merge tolerance in x-units.
|
|
276
|
+
|
|
277
|
+
Returns
|
|
278
|
+
-------
|
|
279
|
+
edges2 : ndarray, shape (nt, 2k)
|
|
280
|
+
Same shape as input. Inner merged edges are NaN. No columns are
|
|
281
|
+
shifted.
|
|
282
|
+
active_k : ndarray, shape (nt,)
|
|
283
|
+
Number of remaining bulks (connected components) at each time.
|
|
284
|
+
"""
|
|
285
|
+
edges = numpy.asarray(edges, dtype=float)
|
|
286
|
+
nt, m = edges.shape
|
|
287
|
+
if m % 2 != 0:
|
|
288
|
+
raise ValueError("edges must have even number of columns.")
|
|
289
|
+
k0 = m // 2
|
|
290
|
+
|
|
291
|
+
edges2 = edges.copy()
|
|
292
|
+
active_k = numpy.zeros(nt, dtype=int)
|
|
293
|
+
|
|
294
|
+
for it in range(nt):
|
|
295
|
+
row = edges2[it, :].copy()
|
|
296
|
+
a = row[0::2].copy()
|
|
297
|
+
b = row[1::2].copy()
|
|
298
|
+
|
|
299
|
+
# Initialize blocks as list of (L_index, R_index) in bulk indices.
|
|
300
|
+
blocks = []
|
|
301
|
+
for j in range(k0):
|
|
302
|
+
if numpy.isfinite(a[j]) and numpy.isfinite(b[j]) and (b[j] > a[j]):
|
|
303
|
+
blocks.append([j, j])
|
|
304
|
+
|
|
305
|
+
if len(blocks) == 0:
|
|
306
|
+
active_k[it] = 0
|
|
307
|
+
edges2[it, :] = row
|
|
308
|
+
continue
|
|
309
|
+
|
|
310
|
+
# Helper to get current left/right edge value of a block.
|
|
311
|
+
def left_edge(block):
|
|
312
|
+
return a[block[0]]
|
|
313
|
+
|
|
314
|
+
def right_edge(block):
|
|
315
|
+
return b[block[1]]
|
|
316
|
+
|
|
317
|
+
# Iteratively merge adjacent blocks when they overlap / touch.
|
|
318
|
+
merged = True
|
|
319
|
+
while merged and (len(blocks) > 1):
|
|
320
|
+
merged = False
|
|
321
|
+
new_blocks = [blocks[0]]
|
|
322
|
+
for blk in blocks[1:]:
|
|
323
|
+
prev = new_blocks[-1]
|
|
324
|
+
# If right(prev) crosses left(blk), merge.
|
|
325
|
+
if numpy.isfinite(right_edge(prev)) and \
|
|
326
|
+
numpy.isfinite(left_edge(blk)) and \
|
|
327
|
+
(right_edge(prev) >= left_edge(blk) - float(tol)):
|
|
328
|
+
|
|
329
|
+
# Annihilate inner boundary edges in fixed columns:
|
|
330
|
+
# b_{prev.right_bulk} and a_{blk.left_bulk}
|
|
331
|
+
bj = prev[1]
|
|
332
|
+
aj = blk[0]
|
|
333
|
+
b[bj] = numpy.nan
|
|
334
|
+
a[aj] = numpy.nan
|
|
335
|
+
|
|
336
|
+
# Merge block indices: left stays prev.left, right becomes
|
|
337
|
+
# blk.right
|
|
338
|
+
prev[1] = blk[1]
|
|
339
|
+
merged = True
|
|
340
|
+
else:
|
|
341
|
+
new_blocks.append(blk)
|
|
342
|
+
blocks = new_blocks
|
|
343
|
+
|
|
344
|
+
active_k[it] = len(blocks)
|
|
345
|
+
|
|
346
|
+
# Write back modified a,b into the row without shifting any columns.
|
|
347
|
+
row2 = row.copy()
|
|
348
|
+
row2[0::2] = a
|
|
349
|
+
row2[1::2] = b
|
|
350
|
+
edges2[it, :] = row2
|
|
351
|
+
|
|
352
|
+
return edges2, active_k
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright 2026, Siavash Ameli <sameli@berkeley.edu>
|
|
2
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
3
|
+
# SPDX-FileType: SOURCE
|
|
4
|
+
#
|
|
5
|
+
# This program is free software: you can redistribute it and/or modify it
|
|
6
|
+
# under the terms of the license found in the LICENSE.txt file in the root
|
|
7
|
+
# directory of this source tree.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# =======
|
|
11
|
+
# Imports
|
|
12
|
+
# =======
|
|
13
|
+
|
|
14
|
+
import numpy
|
|
15
|
+
|
|
16
|
+
__all__ = ['_pick_physical_root_scalar', 'track_roots_on_grid',
|
|
17
|
+
'infer_m1_partners_on_cuts']
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# =========================
|
|
21
|
+
# pick physical root scalar
|
|
22
|
+
# =========================
|
|
23
|
+
|
|
24
|
+
def _pick_physical_root_scalar(z, roots):
|
|
25
|
+
"""
|
|
26
|
+
Pick the Herglotz root: Im(root) has the same sign as Im(z).
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
s = 1.0 if (z.imag >= 0.0) else -1.0
|
|
30
|
+
k = int(numpy.argmax(s * roots.imag))
|
|
31
|
+
return roots[k]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# ============
|
|
35
|
+
# permutations
|
|
36
|
+
# ============
|
|
37
|
+
|
|
38
|
+
def _permutations(items):
|
|
39
|
+
|
|
40
|
+
items = list(items)
|
|
41
|
+
if len(items) <= 1:
|
|
42
|
+
yield tuple(items)
|
|
43
|
+
return
|
|
44
|
+
for i in range(len(items)):
|
|
45
|
+
rest = items[:i] + items[i + 1:]
|
|
46
|
+
for p in _permutations(rest):
|
|
47
|
+
yield (items[i],) + p
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# ===================
|
|
51
|
+
# track roots on grid
|
|
52
|
+
# ===================
|
|
53
|
+
|
|
54
|
+
def track_roots_on_grid(m_all, z=None, i0=0, j0=0):
|
|
55
|
+
|
|
56
|
+
m_all = numpy.asarray(m_all, dtype=numpy.complex128)
|
|
57
|
+
n_y, n_x, s = m_all.shape
|
|
58
|
+
|
|
59
|
+
sheets = numpy.full_like(m_all, numpy.nan + 1j * numpy.nan)
|
|
60
|
+
|
|
61
|
+
perms = numpy.array(list(_permutations(range(s))), dtype=int)
|
|
62
|
+
|
|
63
|
+
def sort_seed(v):
|
|
64
|
+
v = numpy.asarray(v, dtype=numpy.complex128)
|
|
65
|
+
order = numpy.argsort(-numpy.imag(v))
|
|
66
|
+
return v[order]
|
|
67
|
+
|
|
68
|
+
v0 = m_all[i0, j0, :]
|
|
69
|
+
if numpy.all(numpy.isfinite(v0)):
|
|
70
|
+
sheets[i0, j0, :] = sort_seed(v0)
|
|
71
|
+
|
|
72
|
+
for i in range(i0, n_y):
|
|
73
|
+
for j in range((j0 if i == i0 else 0), n_x):
|
|
74
|
+
if i == i0 and j == j0:
|
|
75
|
+
continue
|
|
76
|
+
|
|
77
|
+
v = m_all[i, j, :]
|
|
78
|
+
if not numpy.all(numpy.isfinite(v)):
|
|
79
|
+
continue
|
|
80
|
+
|
|
81
|
+
if j > 0 and numpy.all(numpy.isfinite(sheets[i, j - 1, :])):
|
|
82
|
+
ref = sheets[i, j - 1, :]
|
|
83
|
+
elif i > 0 and numpy.all(numpy.isfinite(sheets[i - 1, j, :])):
|
|
84
|
+
ref = sheets[i - 1, j, :]
|
|
85
|
+
else:
|
|
86
|
+
sheets[i, j, :] = sort_seed(v)
|
|
87
|
+
continue
|
|
88
|
+
|
|
89
|
+
v_perm = v[perms]
|
|
90
|
+
cost = numpy.abs(v_perm - ref[None, :]).sum(axis=1)
|
|
91
|
+
p = perms[int(numpy.argmin(cost))]
|
|
92
|
+
sheets[i, j, :] = v[p]
|
|
93
|
+
|
|
94
|
+
if z is not None:
|
|
95
|
+
z = numpy.asarray(z)
|
|
96
|
+
if z.shape != (n_y, n_x):
|
|
97
|
+
raise ValueError("z must have shape (n_y, n_x) matching m_all.")
|
|
98
|
+
mask_up = numpy.imag(z) > 0.0
|
|
99
|
+
scores = numpy.full(s, -numpy.inf, dtype=numpy.float64)
|
|
100
|
+
for r in range(s):
|
|
101
|
+
v = sheets[:, :, r]
|
|
102
|
+
vv = v[mask_up]
|
|
103
|
+
finite = numpy.isfinite(vv)
|
|
104
|
+
if numpy.any(finite):
|
|
105
|
+
scores[r] = float(numpy.mean(numpy.imag(vv[finite])))
|
|
106
|
+
r_phys = int(numpy.argmax(scores))
|
|
107
|
+
perm = [r_phys] + [r for r in range(s) if r != r_phys]
|
|
108
|
+
sheets = sheets[:, :, perm]
|
|
109
|
+
|
|
110
|
+
return sheets
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
# =========================
|
|
114
|
+
# infer m1 partners on cuts
|
|
115
|
+
# =========================
|
|
116
|
+
|
|
117
|
+
def infer_m1_partners_on_cuts(z, sheets, support):
|
|
118
|
+
# sheets: [m1, m2, m3] arrays on the same z-grid
|
|
119
|
+
X = numpy.real(z[0, :])
|
|
120
|
+
ycol = numpy.imag(z[:, 0])
|
|
121
|
+
|
|
122
|
+
# pick nearest rows just above and below 0
|
|
123
|
+
i_up = numpy.where(ycol > 0)[0][0]
|
|
124
|
+
i_dn = numpy.where(ycol < 0)[0][-1]
|
|
125
|
+
|
|
126
|
+
partners = []
|
|
127
|
+
for (a, b) in support:
|
|
128
|
+
x0 = 0.5 * (a + b)
|
|
129
|
+
j = int(numpy.argmin(numpy.abs(X - x0)))
|
|
130
|
+
|
|
131
|
+
m1_up = sheets[0][i_up, j]
|
|
132
|
+
m1_dn = sheets[0][i_dn, j]
|
|
133
|
+
|
|
134
|
+
# who matches across the cut?
|
|
135
|
+
d_up_to_dn = [abs(m1_up - sheets[k][i_dn, j])
|
|
136
|
+
for k in range(len(sheets))]
|
|
137
|
+
d_dn_to_up = [abs(m1_dn - sheets[k][i_up, j])
|
|
138
|
+
for k in range(len(sheets))]
|
|
139
|
+
|
|
140
|
+
# ignore k=0 (trivial match away from cuts); take best among {1,2}
|
|
141
|
+
k1 = min([1, 2], key=lambda k: d_up_to_dn[k] + d_dn_to_up[k])
|
|
142
|
+
partners.append(k1)
|
|
143
|
+
|
|
144
|
+
# e.g. [1,2] means I1 swaps with m2, I2 swaps with m3
|
|
145
|
+
return partners
|