setiastrosuitepro 1.6.12__py3-none-any.whl → 1.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of setiastrosuitepro might be problematic. Click here for more details.
- setiastro/images/3dplanet.png +0 -0
- setiastro/images/TextureClarity.svg +56 -0
- setiastro/images/narrowbandnormalization.png +0 -0
- setiastro/images/planetarystacker.png +0 -0
- setiastro/saspro/__init__.py +9 -8
- setiastro/saspro/__main__.py +326 -285
- setiastro/saspro/_generated/build_info.py +2 -2
- setiastro/saspro/aberration_ai.py +128 -13
- setiastro/saspro/aberration_ai_preset.py +29 -3
- setiastro/saspro/astrospike_python.py +45 -3
- setiastro/saspro/blink_comparator_pro.py +116 -71
- setiastro/saspro/curve_editor_pro.py +72 -22
- setiastro/saspro/curves_preset.py +249 -47
- setiastro/saspro/doc_manager.py +4 -1
- setiastro/saspro/gui/main_window.py +326 -46
- setiastro/saspro/gui/mixins/file_mixin.py +41 -18
- setiastro/saspro/gui/mixins/menu_mixin.py +9 -0
- setiastro/saspro/gui/mixins/toolbar_mixin.py +123 -7
- setiastro/saspro/histogram.py +179 -7
- setiastro/saspro/imageops/narrowband_normalization.py +816 -0
- setiastro/saspro/imageops/serloader.py +1429 -0
- setiastro/saspro/layers.py +186 -10
- setiastro/saspro/layers_dock.py +198 -5
- setiastro/saspro/legacy/image_manager.py +10 -4
- setiastro/saspro/legacy/numba_utils.py +1 -1
- setiastro/saspro/live_stacking.py +24 -4
- setiastro/saspro/multiscale_decomp.py +30 -17
- setiastro/saspro/narrowband_normalization.py +1618 -0
- setiastro/saspro/planetprojection.py +3854 -0
- setiastro/saspro/remove_green.py +1 -1
- setiastro/saspro/resources.py +8 -0
- setiastro/saspro/rgbalign.py +456 -12
- setiastro/saspro/save_options.py +45 -13
- setiastro/saspro/ser_stack_config.py +102 -0
- setiastro/saspro/ser_stacker.py +2327 -0
- setiastro/saspro/ser_stacker_dialog.py +1865 -0
- setiastro/saspro/ser_tracking.py +228 -0
- setiastro/saspro/serviewer.py +1773 -0
- setiastro/saspro/sfcc.py +298 -64
- setiastro/saspro/shortcuts.py +14 -7
- setiastro/saspro/stacking_suite.py +21 -6
- setiastro/saspro/stat_stretch.py +179 -31
- setiastro/saspro/subwindow.py +38 -5
- setiastro/saspro/texture_clarity.py +593 -0
- setiastro/saspro/widgets/resource_monitor.py +122 -74
- {setiastrosuitepro-1.6.12.dist-info → setiastrosuitepro-1.7.3.dist-info}/METADATA +3 -2
- {setiastrosuitepro-1.6.12.dist-info → setiastrosuitepro-1.7.3.dist-info}/RECORD +51 -37
- {setiastrosuitepro-1.6.12.dist-info → setiastrosuitepro-1.7.3.dist-info}/WHEEL +0 -0
- {setiastrosuitepro-1.6.12.dist-info → setiastrosuitepro-1.7.3.dist-info}/entry_points.txt +0 -0
- {setiastrosuitepro-1.6.12.dist-info → setiastrosuitepro-1.7.3.dist-info}/licenses/LICENSE +0 -0
- {setiastrosuitepro-1.6.12.dist-info → setiastrosuitepro-1.7.3.dist-info}/licenses/license.txt +0 -0
|
@@ -0,0 +1,2327 @@
|
|
|
1
|
+
# src/setiastro/saspro/ser_stacker.py
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
import os
|
|
4
|
+
import threading
|
|
5
|
+
|
|
6
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from typing import Optional, Tuple, List, Dict, Any
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
|
|
13
|
+
import cv2
|
|
14
|
+
cv2.setNumThreads(1)
|
|
15
|
+
|
|
16
|
+
from setiastro.saspro.imageops.serloader import SERReader
|
|
17
|
+
from setiastro.saspro.ser_stack_config import SERStackConfig
|
|
18
|
+
from setiastro.saspro.ser_tracking import PlanetaryTracker, SurfaceTracker, _to_mono01
|
|
19
|
+
from setiastro.saspro.imageops.serloader import open_planetary_source, PlanetaryFrameSource
|
|
20
|
+
|
|
21
|
+
_BAYER_TO_CV2 = {
|
|
22
|
+
"RGGB": cv2.COLOR_BayerRG2RGB,
|
|
23
|
+
"BGGR": cv2.COLOR_BayerBG2RGB,
|
|
24
|
+
"GRBG": cv2.COLOR_BayerGR2RGB,
|
|
25
|
+
"GBRG": cv2.COLOR_BayerGB2RGB,
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
def _cfg_bayer_pattern(cfg) -> str | None:
|
|
29
|
+
# cfg.bayer_pattern might be missing in older saved projects; be defensive
|
|
30
|
+
return getattr(cfg, "bayer_pattern", None)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _get_frame(src, idx: int, *, roi, debayer: bool, to_float01: bool, force_rgb: bool, bayer_pattern: str | None):
|
|
34
|
+
"""
|
|
35
|
+
Drop-in wrapper:
|
|
36
|
+
- passes cfg.bayer_pattern down to sources that support it
|
|
37
|
+
- stays compatible with sources whose get_frame() doesn't accept bayer_pattern yet
|
|
38
|
+
"""
|
|
39
|
+
try:
|
|
40
|
+
return src.get_frame(
|
|
41
|
+
int(idx),
|
|
42
|
+
roi=roi,
|
|
43
|
+
debayer=debayer,
|
|
44
|
+
to_float01=to_float01,
|
|
45
|
+
force_rgb=force_rgb,
|
|
46
|
+
bayer_pattern=bayer_pattern,
|
|
47
|
+
)
|
|
48
|
+
except TypeError:
|
|
49
|
+
# Back-compat: older PlanetaryFrameSource implementations
|
|
50
|
+
return src.get_frame(
|
|
51
|
+
int(idx),
|
|
52
|
+
roi=roi,
|
|
53
|
+
debayer=debayer,
|
|
54
|
+
to_float01=to_float01,
|
|
55
|
+
force_rgb=force_rgb,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@dataclass
|
|
60
|
+
class AnalyzeResult:
|
|
61
|
+
frames_total: int
|
|
62
|
+
roi_used: Optional[Tuple[int, int, int, int]]
|
|
63
|
+
track_mode: str
|
|
64
|
+
quality: np.ndarray # (N,) float32 higher=better
|
|
65
|
+
dx: np.ndarray # (N,) float32
|
|
66
|
+
dy: np.ndarray # (N,) float32
|
|
67
|
+
conf: np.ndarray # (N,) float32 0..1 (final conf used by stacking)
|
|
68
|
+
order: np.ndarray # (N,) int indices sorted by quality desc
|
|
69
|
+
ref_mode: str # "best_frame" | "best_stack"
|
|
70
|
+
ref_count: int
|
|
71
|
+
ref_image: np.ndarray # float32 [0..1], ROI-sized
|
|
72
|
+
ap_centers: Optional[np.ndarray] = None # (M,2) int32 in ROI coords
|
|
73
|
+
ap_size: int = 64
|
|
74
|
+
ap_multiscale: bool = False
|
|
75
|
+
|
|
76
|
+
# ✅ NEW: surface anchor confidence (coarse tracker)
|
|
77
|
+
coarse_conf: Optional[np.ndarray] = None # (N,) float32 0..1
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
@dataclass
|
|
81
|
+
class FrameEval:
|
|
82
|
+
idx: int
|
|
83
|
+
score: float
|
|
84
|
+
dx: float
|
|
85
|
+
dy: float
|
|
86
|
+
conf: float
|
|
87
|
+
|
|
88
|
+
def _print_surface_debug(
|
|
89
|
+
*,
|
|
90
|
+
dx: np.ndarray,
|
|
91
|
+
dy: np.ndarray,
|
|
92
|
+
conf: np.ndarray,
|
|
93
|
+
coarse_conf: np.ndarray | None,
|
|
94
|
+
floor: float = 0.05,
|
|
95
|
+
prefix: str = "[SER][Surface]"
|
|
96
|
+
) -> None:
|
|
97
|
+
try:
|
|
98
|
+
dx = np.asarray(dx, dtype=np.float32)
|
|
99
|
+
dy = np.asarray(dy, dtype=np.float32)
|
|
100
|
+
conf = np.asarray(conf, dtype=np.float32)
|
|
101
|
+
|
|
102
|
+
dx_min = float(np.min(dx)) if dx.size else 0.0
|
|
103
|
+
dx_max = float(np.max(dx)) if dx.size else 0.0
|
|
104
|
+
dy_min = float(np.min(dy)) if dy.size else 0.0
|
|
105
|
+
dy_max = float(np.max(dy)) if dy.size else 0.0
|
|
106
|
+
|
|
107
|
+
conf_mean = float(np.mean(conf)) if conf.size else 0.0
|
|
108
|
+
conf_min = float(np.min(conf)) if conf.size else 0.0
|
|
109
|
+
|
|
110
|
+
msg = (
|
|
111
|
+
f"{prefix} dx[min,max]=({dx_min:.2f},{dx_max:.2f}) "
|
|
112
|
+
f"dy[min,max]=({dy_min:.2f},{dy_max:.2f}) "
|
|
113
|
+
f"conf[mean,min]=({conf_mean:.3f},{conf_min:.3f})"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
if coarse_conf is not None:
|
|
117
|
+
cc = np.asarray(coarse_conf, dtype=np.float32)
|
|
118
|
+
cc_mean = float(np.mean(cc)) if cc.size else 0.0
|
|
119
|
+
cc_min = float(np.min(cc)) if cc.size else 0.0
|
|
120
|
+
cc_bad = float(np.mean(cc < 0.2)) if cc.size else 0.0
|
|
121
|
+
msg += f" coarse_conf[mean,min]=({cc_mean:.3f},{cc_min:.3f}) frac<0.2={cc_bad:.2%}"
|
|
122
|
+
|
|
123
|
+
if conf_mean <= floor + 1e-6:
|
|
124
|
+
msg += f" ⚠ conf.mean near floor ({floor}); alignment likely failing"
|
|
125
|
+
print(msg)
|
|
126
|
+
except Exception as e:
|
|
127
|
+
print(f"{prefix} debug print failed: {e}")
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def _clamp_roi_in_bounds(roi: Tuple[int, int, int, int], w: int, h: int) -> Tuple[int, int, int, int]:
|
|
131
|
+
x, y, rw, rh = [int(v) for v in roi]
|
|
132
|
+
x = max(0, min(w - 1, x))
|
|
133
|
+
y = max(0, min(h - 1, y))
|
|
134
|
+
rw = max(1, min(w - x, rw))
|
|
135
|
+
rh = max(1, min(h - y, rh))
|
|
136
|
+
return x, y, rw, rh
|
|
137
|
+
|
|
138
|
+
def _grad_img(m: np.ndarray) -> np.ndarray:
|
|
139
|
+
"""Simple, robust edge image for SSD refine."""
|
|
140
|
+
m = m.astype(np.float32, copy=False)
|
|
141
|
+
if cv2 is None:
|
|
142
|
+
# fallback: finite differences
|
|
143
|
+
gx = np.zeros_like(m); gx[:, 1:] = m[:, 1:] - m[:, :-1]
|
|
144
|
+
gy = np.zeros_like(m); gy[1:, :] = m[1:, :] - m[:-1, :]
|
|
145
|
+
g = np.abs(gx) + np.abs(gy)
|
|
146
|
+
g -= float(g.mean())
|
|
147
|
+
s = float(g.std()) + 1e-6
|
|
148
|
+
return g / s
|
|
149
|
+
|
|
150
|
+
gx = cv2.Sobel(m, cv2.CV_32F, 1, 0, ksize=3)
|
|
151
|
+
gy = cv2.Sobel(m, cv2.CV_32F, 0, 1, ksize=3)
|
|
152
|
+
g = cv2.magnitude(gx, gy)
|
|
153
|
+
g -= float(g.mean())
|
|
154
|
+
s = float(g.std()) + 1e-6
|
|
155
|
+
return (g / s).astype(np.float32, copy=False)
|
|
156
|
+
|
|
157
|
+
def _ssd_prepare_ref(ref_m: np.ndarray, crop: float = 0.80):
|
|
158
|
+
"""
|
|
159
|
+
Precompute reference gradient + crop window once.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
rg : full reference gradient image (float32)
|
|
163
|
+
rgc : cropped view of rg
|
|
164
|
+
sl : (y0,y1,x0,x1) crop slices
|
|
165
|
+
"""
|
|
166
|
+
ref_m = ref_m.astype(np.float32, copy=False)
|
|
167
|
+
rg = _grad_img(ref_m) # compute ONCE
|
|
168
|
+
|
|
169
|
+
H, W = rg.shape[:2]
|
|
170
|
+
cfx = max(8, int(W * (1.0 - float(crop)) * 0.5))
|
|
171
|
+
cfy = max(8, int(H * (1.0 - float(crop)) * 0.5))
|
|
172
|
+
x0, x1 = cfx, W - cfx
|
|
173
|
+
y0, y1 = cfy, H - cfy
|
|
174
|
+
|
|
175
|
+
rgc = rg[y0:y1, x0:x1] # view
|
|
176
|
+
return rg, rgc, (y0, y1, x0, x1)
|
|
177
|
+
|
|
178
|
+
def _subpixel_quadratic_1d(vm: float, v0: float, vp: float) -> float:
|
|
179
|
+
"""
|
|
180
|
+
Given SSD at (-1,0,+1): (vm, v0, vp), return vertex offset in [-0.5,0.5]-ish.
|
|
181
|
+
Works for minimizing SSD.
|
|
182
|
+
"""
|
|
183
|
+
denom = (vm - 2.0 * v0 + vp)
|
|
184
|
+
if abs(denom) < 1e-12:
|
|
185
|
+
return 0.0
|
|
186
|
+
# vertex of parabola fit through -1,0,+1
|
|
187
|
+
t = 0.5 * (vm - vp) / denom
|
|
188
|
+
return float(np.clip(t, -0.75, 0.75))
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def _ssd_confidence_prepared(
|
|
192
|
+
rgc: np.ndarray,
|
|
193
|
+
cgc0: np.ndarray,
|
|
194
|
+
dx_i: int,
|
|
195
|
+
dy_i: int,
|
|
196
|
+
) -> float:
|
|
197
|
+
"""
|
|
198
|
+
Compute SSD between rgc and cgc0 shifted by (dx_i,dy_i) using slicing overlap.
|
|
199
|
+
Returns SSD (lower is better).
|
|
200
|
+
|
|
201
|
+
NOTE: This is integer-only and extremely fast (no warps).
|
|
202
|
+
"""
|
|
203
|
+
H, W = rgc.shape[:2]
|
|
204
|
+
|
|
205
|
+
# Overlap slices for rgc and shifted cgc0
|
|
206
|
+
x0r = max(0, dx_i)
|
|
207
|
+
x1r = min(W, W + dx_i)
|
|
208
|
+
y0r = max(0, dy_i)
|
|
209
|
+
y1r = min(H, H + dy_i)
|
|
210
|
+
|
|
211
|
+
x0c = max(0, -dx_i)
|
|
212
|
+
x1c = min(W, W - dx_i)
|
|
213
|
+
y0c = max(0, -dy_i)
|
|
214
|
+
y1c = min(H, H - dy_i)
|
|
215
|
+
|
|
216
|
+
rr = rgc[y0r:y1r, x0r:x1r]
|
|
217
|
+
cc = cgc0[y0c:y1c, x0c:x1c]
|
|
218
|
+
|
|
219
|
+
d = rr - cc
|
|
220
|
+
return float(np.mean(d * d))
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def _ssd_confidence(
|
|
224
|
+
ref_m: np.ndarray,
|
|
225
|
+
cur_m: np.ndarray,
|
|
226
|
+
dx: float,
|
|
227
|
+
dy: float,
|
|
228
|
+
*,
|
|
229
|
+
crop: float = 0.80,
|
|
230
|
+
) -> float:
|
|
231
|
+
"""
|
|
232
|
+
Original API: confidence from gradient SSD, higher=better (0..1).
|
|
233
|
+
|
|
234
|
+
Optimized:
|
|
235
|
+
- computes ref grad once per call (still OK if used standalone)
|
|
236
|
+
- uses one warp for (dx,dy)
|
|
237
|
+
- no extra work beyond necessary
|
|
238
|
+
|
|
239
|
+
For iterative search, use _refine_shift_ssd() which avoids redoing work.
|
|
240
|
+
"""
|
|
241
|
+
ref_m = ref_m.astype(np.float32, copy=False)
|
|
242
|
+
cur_m = cur_m.astype(np.float32, copy=False)
|
|
243
|
+
|
|
244
|
+
# shift current by the proposed shift
|
|
245
|
+
cur_s = _shift_image(cur_m, float(dx), float(dy))
|
|
246
|
+
|
|
247
|
+
rg, rgc, sl = _ssd_prepare_ref(ref_m, crop=crop)
|
|
248
|
+
y0, y1, x0, x1 = sl
|
|
249
|
+
|
|
250
|
+
cg = _grad_img(cur_s)
|
|
251
|
+
cgc = cg[y0:y1, x0:x1]
|
|
252
|
+
|
|
253
|
+
d = rgc - cgc
|
|
254
|
+
ssd = float(np.mean(d * d))
|
|
255
|
+
|
|
256
|
+
scale = 0.002
|
|
257
|
+
conf = float(np.exp(-ssd / max(1e-12, scale)))
|
|
258
|
+
return float(np.clip(conf, 0.0, 1.0))
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def _refine_shift_ssd(
|
|
262
|
+
ref_m: np.ndarray,
|
|
263
|
+
cur_m: np.ndarray,
|
|
264
|
+
dx0: float,
|
|
265
|
+
dy0: float,
|
|
266
|
+
*,
|
|
267
|
+
radius: int = 10,
|
|
268
|
+
crop: float = 0.80,
|
|
269
|
+
bruteforce: bool = False,
|
|
270
|
+
max_steps: int | None = None,
|
|
271
|
+
) -> tuple[float, float, float]:
|
|
272
|
+
"""
|
|
273
|
+
Returns (dx_refine, dy_refine, conf) where you ADD refine to (dx0,dy0).
|
|
274
|
+
|
|
275
|
+
CPU-optimized:
|
|
276
|
+
- precompute ref gradient crop once
|
|
277
|
+
- apply (dx0,dy0) shift ONCE
|
|
278
|
+
- compute gradient ONCE for shifted cur
|
|
279
|
+
- evaluate integer candidates via slicing overlap SSD (no warps)
|
|
280
|
+
|
|
281
|
+
If bruteforce=True, does full window scan in [-r,r]^2 (fast).
|
|
282
|
+
Otherwise does 8-neighbor hill-climb over integer offsets (very fast).
|
|
283
|
+
|
|
284
|
+
Optional subpixel polish:
|
|
285
|
+
- after choosing best integer (best_dx,best_dy), do a tiny separable quadratic
|
|
286
|
+
fit along x and y using SSD at +/-1 around the best integer.
|
|
287
|
+
- does NOT require any new gradients/warps (just 4 extra SSD evals).
|
|
288
|
+
"""
|
|
289
|
+
r = int(max(0, radius))
|
|
290
|
+
if r == 0:
|
|
291
|
+
# nothing to do; just compute confidence at dx0/dy0
|
|
292
|
+
c = _ssd_confidence(ref_m, cur_m, dx0, dy0, crop=crop)
|
|
293
|
+
return 0.0, 0.0, float(c)
|
|
294
|
+
|
|
295
|
+
# Prepare ref grad crop ONCE
|
|
296
|
+
_, rgc, sl = _ssd_prepare_ref(ref_m, crop=crop)
|
|
297
|
+
y0, y1, x0, x1 = sl
|
|
298
|
+
|
|
299
|
+
# Shift cur by the current estimate ONCE, then gradient ONCE
|
|
300
|
+
cur_m = cur_m.astype(np.float32, copy=False)
|
|
301
|
+
cur0 = _shift_image(cur_m, float(dx0), float(dy0))
|
|
302
|
+
cg0 = _grad_img(cur0)
|
|
303
|
+
cgc0 = cg0[y0:y1, x0:x1]
|
|
304
|
+
|
|
305
|
+
# Helper: parabola vertex for minimizing SSD, using (-1,0,+1) samples
|
|
306
|
+
def _quad_min_offset(vm: float, v0: float, vp: float) -> float:
|
|
307
|
+
denom = (vm - 2.0 * v0 + vp)
|
|
308
|
+
if abs(denom) < 1e-12:
|
|
309
|
+
return 0.0
|
|
310
|
+
t = 0.5 * (vm - vp) / denom
|
|
311
|
+
return float(np.clip(t, -0.75, 0.75))
|
|
312
|
+
|
|
313
|
+
if bruteforce:
|
|
314
|
+
# NOTE: your bruteforce path currently includes a subpixel step already.
|
|
315
|
+
# If you want to keep using that exact implementation, just call it:
|
|
316
|
+
dxr, dyr, conf = _refine_shift_ssd_bruteforce(ref_m, cur_m, dx0, dy0, radius=r, crop=crop)
|
|
317
|
+
return float(dxr), float(dyr), float(conf)
|
|
318
|
+
|
|
319
|
+
# Hill-climb in integer space minimizing SSD
|
|
320
|
+
if max_steps is None:
|
|
321
|
+
max_steps = max(1, min(r, 6)) # small cap helps speed; tune if you want
|
|
322
|
+
|
|
323
|
+
best_dx = 0
|
|
324
|
+
best_dy = 0
|
|
325
|
+
best_ssd = _ssd_confidence_prepared(rgc, cgc0, 0, 0)
|
|
326
|
+
|
|
327
|
+
neigh = ((-1,0),(1,0),(0,-1),(0,1),(-1,-1),(-1,1),(1,-1),(1,1))
|
|
328
|
+
|
|
329
|
+
for _ in range(int(max_steps)):
|
|
330
|
+
improved = False
|
|
331
|
+
for sx, sy in neigh:
|
|
332
|
+
cand_dx = best_dx + sx
|
|
333
|
+
cand_dy = best_dy + sy
|
|
334
|
+
if abs(cand_dx) > r or abs(cand_dy) > r:
|
|
335
|
+
continue
|
|
336
|
+
|
|
337
|
+
ssd = _ssd_confidence_prepared(rgc, cgc0, cand_dx, cand_dy)
|
|
338
|
+
if ssd < best_ssd:
|
|
339
|
+
best_ssd = ssd
|
|
340
|
+
best_dx = cand_dx
|
|
341
|
+
best_dy = cand_dy
|
|
342
|
+
improved = True
|
|
343
|
+
|
|
344
|
+
if not improved:
|
|
345
|
+
break
|
|
346
|
+
|
|
347
|
+
# ---- subpixel quadratic polish around best integer (cheap) ----
|
|
348
|
+
# Uses SSD at +/-1 around best integer in X and Y (separable).
|
|
349
|
+
dx_sub = 0.0
|
|
350
|
+
dy_sub = 0.0
|
|
351
|
+
if r >= 1:
|
|
352
|
+
# X samples at (best_dx-1, best_dy), (best_dx, best_dy), (best_dx+1, best_dy)
|
|
353
|
+
if abs(best_dx - 1) <= r:
|
|
354
|
+
s_xm = _ssd_confidence_prepared(rgc, cgc0, best_dx - 1, best_dy)
|
|
355
|
+
else:
|
|
356
|
+
s_xm = best_ssd
|
|
357
|
+
s_x0 = best_ssd
|
|
358
|
+
if abs(best_dx + 1) <= r:
|
|
359
|
+
s_xp = _ssd_confidence_prepared(rgc, cgc0, best_dx + 1, best_dy)
|
|
360
|
+
else:
|
|
361
|
+
s_xp = best_ssd
|
|
362
|
+
dx_sub = _quad_min_offset(s_xm, s_x0, s_xp)
|
|
363
|
+
|
|
364
|
+
# Y samples at (best_dx, best_dy-1), (best_dx, best_dy), (best_dx, best_dy+1)
|
|
365
|
+
if abs(best_dy - 1) <= r:
|
|
366
|
+
s_ym = _ssd_confidence_prepared(rgc, cgc0, best_dx, best_dy - 1)
|
|
367
|
+
else:
|
|
368
|
+
s_ym = best_ssd
|
|
369
|
+
s_y0 = best_ssd
|
|
370
|
+
if abs(best_dy + 1) <= r:
|
|
371
|
+
s_yp = _ssd_confidence_prepared(rgc, cgc0, best_dx, best_dy + 1)
|
|
372
|
+
else:
|
|
373
|
+
s_yp = best_ssd
|
|
374
|
+
dy_sub = _quad_min_offset(s_ym, s_y0, s_yp)
|
|
375
|
+
|
|
376
|
+
best_dx_f = float(best_dx) + float(dx_sub)
|
|
377
|
+
best_dy_f = float(best_dy) + float(dy_sub)
|
|
378
|
+
|
|
379
|
+
# Confidence: keep based on best *integer* SSD (no subpixel warp needed)
|
|
380
|
+
scale = 0.002
|
|
381
|
+
conf = float(np.exp(-best_ssd / max(1e-12, scale)))
|
|
382
|
+
conf = float(np.clip(conf, 0.0, 1.0))
|
|
383
|
+
|
|
384
|
+
return float(best_dx_f), float(best_dy_f), float(conf)
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
def _refine_shift_ssd_bruteforce(
|
|
389
|
+
ref_m: np.ndarray,
|
|
390
|
+
cur_m: np.ndarray,
|
|
391
|
+
dx0: float,
|
|
392
|
+
dy0: float,
|
|
393
|
+
*,
|
|
394
|
+
radius: int = 2,
|
|
395
|
+
crop: float = 0.80,
|
|
396
|
+
) -> tuple[float, float, float]:
|
|
397
|
+
"""
|
|
398
|
+
Full brute-force scan in [-radius,+radius]^2, but optimized:
|
|
399
|
+
- shift by (dx0,dy0) ONCE
|
|
400
|
+
- compute gradients ONCE
|
|
401
|
+
- evaluate candidates via slicing overlap SSD (no warps)
|
|
402
|
+
- keep your separable quadratic subpixel fit
|
|
403
|
+
"""
|
|
404
|
+
ref_m = ref_m.astype(np.float32, copy=False)
|
|
405
|
+
cur_m = cur_m.astype(np.float32, copy=False)
|
|
406
|
+
|
|
407
|
+
r = int(max(0, radius))
|
|
408
|
+
if r == 0:
|
|
409
|
+
c = _ssd_confidence(ref_m, cur_m, dx0, dy0, crop=crop)
|
|
410
|
+
return 0.0, 0.0, float(c)
|
|
411
|
+
|
|
412
|
+
# Apply current estimate once
|
|
413
|
+
cur0 = _shift_image(cur_m, float(dx0), float(dy0))
|
|
414
|
+
|
|
415
|
+
# Gradients once
|
|
416
|
+
rg = _grad_img(ref_m)
|
|
417
|
+
cg0 = _grad_img(cur0)
|
|
418
|
+
|
|
419
|
+
H, W = rg.shape[:2]
|
|
420
|
+
cfx = max(8, int(W * (1.0 - float(crop)) * 0.5))
|
|
421
|
+
cfy = max(8, int(H * (1.0 - float(crop)) * 0.5))
|
|
422
|
+
x0, x1 = cfx, W - cfx
|
|
423
|
+
y0, y1 = cfy, H - cfy
|
|
424
|
+
|
|
425
|
+
rgc = rg[y0:y1, x0:x1]
|
|
426
|
+
cgc0 = cg0[y0:y1, x0:x1]
|
|
427
|
+
|
|
428
|
+
# brute-force integer search
|
|
429
|
+
best = (0, 0)
|
|
430
|
+
best_ssd = float("inf")
|
|
431
|
+
ssds: dict[tuple[int, int], float] = {}
|
|
432
|
+
|
|
433
|
+
for j in range(-r, r + 1):
|
|
434
|
+
for i in range(-r, r + 1):
|
|
435
|
+
ssd = _ssd_confidence_prepared(rgc, cgc0, i, j)
|
|
436
|
+
ssds[(i, j)] = ssd
|
|
437
|
+
if ssd < best_ssd:
|
|
438
|
+
best_ssd = ssd
|
|
439
|
+
best = (i, j)
|
|
440
|
+
|
|
441
|
+
bx, by = best
|
|
442
|
+
|
|
443
|
+
# Subpixel quadratic fit (separable) if neighbors exist
|
|
444
|
+
def _quad_peak(vm, v0, vp):
|
|
445
|
+
denom = (vm - 2.0 * v0 + vp)
|
|
446
|
+
if abs(denom) < 1e-12:
|
|
447
|
+
return 0.0
|
|
448
|
+
return 0.5 * (vm - vp) / denom
|
|
449
|
+
|
|
450
|
+
dx_sub = 0.0
|
|
451
|
+
dy_sub = 0.0
|
|
452
|
+
if (bx - 1, by) in ssds and (bx + 1, by) in ssds:
|
|
453
|
+
dx_sub = _quad_peak(ssds[(bx - 1, by)], ssds[(bx, by)], ssds[(bx + 1, by)])
|
|
454
|
+
if (bx, by - 1) in ssds and (bx, by + 1) in ssds:
|
|
455
|
+
dy_sub = _quad_peak(ssds[(bx, by - 1)], ssds[(bx, by)], ssds[(bx, by + 1)])
|
|
456
|
+
|
|
457
|
+
dxr = float(bx + np.clip(dx_sub, -0.75, 0.75))
|
|
458
|
+
dyr = float(by + np.clip(dy_sub, -0.75, 0.75))
|
|
459
|
+
|
|
460
|
+
# Confidence: use your “sharpness” idea (median neighbor vs best)
|
|
461
|
+
neigh = [v for (k, v) in ssds.items() if k != (bx, by)]
|
|
462
|
+
neigh_med = float(np.median(np.asarray(neigh, np.float32))) if neigh else best_ssd
|
|
463
|
+
sharp = max(0.0, neigh_med - best_ssd)
|
|
464
|
+
conf = float(np.clip(sharp / max(1e-6, neigh_med), 0.0, 1.0))
|
|
465
|
+
|
|
466
|
+
return dxr, dyr, conf
|
|
467
|
+
|
|
468
|
+
def _bandpass(m: np.ndarray) -> np.ndarray:
|
|
469
|
+
"""Illumination-robust image for tracking (float32)."""
|
|
470
|
+
m = m.astype(np.float32, copy=False)
|
|
471
|
+
|
|
472
|
+
# remove large-scale illumination (terminator gradient)
|
|
473
|
+
lo = cv2.GaussianBlur(m, (0, 0), 6.0)
|
|
474
|
+
hi = cv2.GaussianBlur(m, (0, 0), 1.2)
|
|
475
|
+
bp = hi - lo
|
|
476
|
+
|
|
477
|
+
# normalize
|
|
478
|
+
bp -= float(bp.mean())
|
|
479
|
+
s = float(bp.std()) + 1e-6
|
|
480
|
+
bp = bp / s
|
|
481
|
+
|
|
482
|
+
# window to reduce FFT edge artifacts
|
|
483
|
+
hann_y = np.hanning(bp.shape[0]).astype(np.float32)
|
|
484
|
+
hann_x = np.hanning(bp.shape[1]).astype(np.float32)
|
|
485
|
+
bp *= (hann_y[:, None] * hann_x[None, :])
|
|
486
|
+
|
|
487
|
+
return bp
|
|
488
|
+
|
|
489
|
+
def _reject_ap_outliers(ap_dx: np.ndarray, ap_dy: np.ndarray, ap_cf: np.ndarray, *, z: float = 3.5) -> np.ndarray:
|
|
490
|
+
"""
|
|
491
|
+
Return a boolean mask of APs to keep based on MAD distance from median.
|
|
492
|
+
"""
|
|
493
|
+
dx = np.asarray(ap_dx, np.float32)
|
|
494
|
+
dy = np.asarray(ap_dy, np.float32)
|
|
495
|
+
cf = np.asarray(ap_cf, np.float32)
|
|
496
|
+
|
|
497
|
+
good = cf > 0.15
|
|
498
|
+
if not np.any(good):
|
|
499
|
+
return good
|
|
500
|
+
|
|
501
|
+
dxg = dx[good]
|
|
502
|
+
dyg = dy[good]
|
|
503
|
+
|
|
504
|
+
mx = float(np.median(dxg))
|
|
505
|
+
my = float(np.median(dyg))
|
|
506
|
+
|
|
507
|
+
rx = np.abs(dxg - mx)
|
|
508
|
+
ry = np.abs(dyg - my)
|
|
509
|
+
|
|
510
|
+
madx = float(np.median(rx)) + 1e-6
|
|
511
|
+
mady = float(np.median(ry)) + 1e-6
|
|
512
|
+
|
|
513
|
+
zx = rx / madx
|
|
514
|
+
zy = ry / mady
|
|
515
|
+
|
|
516
|
+
keep_g = (zx < z) & (zy < z)
|
|
517
|
+
keep = np.zeros_like(good)
|
|
518
|
+
keep_idx = np.where(good)[0]
|
|
519
|
+
keep[keep_idx] = keep_g
|
|
520
|
+
return keep
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
def _coarse_surface_ref_locked(
|
|
524
|
+
source_obj,
|
|
525
|
+
*,
|
|
526
|
+
n: int,
|
|
527
|
+
roi,
|
|
528
|
+
roi_used=None,
|
|
529
|
+
debayer: bool,
|
|
530
|
+
to_rgb: bool,
|
|
531
|
+
bayer_pattern: Optional[str] = None,
|
|
532
|
+
progress_cb=None,
|
|
533
|
+
progress_every: int = 25,
|
|
534
|
+
# tuning:
|
|
535
|
+
down: int = 2,
|
|
536
|
+
template_size: int = 256,
|
|
537
|
+
search_radius: int = 96,
|
|
538
|
+
bandpass: bool = True,
|
|
539
|
+
# ✅ NEW: parallel coarse
|
|
540
|
+
workers: int | None = None,
|
|
541
|
+
stride: int = 16,
|
|
542
|
+
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
543
|
+
"""
|
|
544
|
+
Surface coarse tracking that DOES NOT DRIFT:
|
|
545
|
+
- Locks to frame0 reference (in roi=roi_track coords).
|
|
546
|
+
- Uses NCC + subpixel phaseCorr.
|
|
547
|
+
- Optional parallelization by chunking time into segments of length=stride.
|
|
548
|
+
Each segment runs sequentially (keeps pred window), segments run in parallel.
|
|
549
|
+
"""
|
|
550
|
+
if cv2 is None:
|
|
551
|
+
dx = np.zeros((n,), np.float32)
|
|
552
|
+
dy = np.zeros((n,), np.float32)
|
|
553
|
+
cc = np.ones((n,), np.float32)
|
|
554
|
+
return dx, dy, cc
|
|
555
|
+
|
|
556
|
+
dx = np.zeros((n,), dtype=np.float32)
|
|
557
|
+
dy = np.zeros((n,), dtype=np.float32)
|
|
558
|
+
cc = np.zeros((n,), dtype=np.float32)
|
|
559
|
+
|
|
560
|
+
def _downN(m: np.ndarray) -> np.ndarray:
|
|
561
|
+
if down <= 1:
|
|
562
|
+
return m.astype(np.float32, copy=False)
|
|
563
|
+
H, W = m.shape[:2]
|
|
564
|
+
return cv2.resize(
|
|
565
|
+
m,
|
|
566
|
+
(max(2, W // down), max(2, H // down)),
|
|
567
|
+
interpolation=cv2.INTER_AREA,
|
|
568
|
+
).astype(np.float32, copy=False)
|
|
569
|
+
|
|
570
|
+
def _pick_anchor_center_ds(W: int, H: int) -> tuple[int, int]:
|
|
571
|
+
cx = W // 2
|
|
572
|
+
cy = H // 2
|
|
573
|
+
if roi_used is None or roi is None:
|
|
574
|
+
return int(cx), int(cy)
|
|
575
|
+
try:
|
|
576
|
+
xt, yt, wt, ht = [int(v) for v in roi]
|
|
577
|
+
xu, yu, wu, hu = [int(v) for v in roi_used]
|
|
578
|
+
cux = xu + (wu * 0.5)
|
|
579
|
+
cuy = yu + (hu * 0.5)
|
|
580
|
+
cx_full = cux - xt
|
|
581
|
+
cy_full = cuy - yt
|
|
582
|
+
cx = int(round(cx_full / max(1, int(down))))
|
|
583
|
+
cy = int(round(cy_full / max(1, int(down))))
|
|
584
|
+
cx = max(0, min(W - 1, cx))
|
|
585
|
+
cy = max(0, min(H - 1, cy))
|
|
586
|
+
except Exception:
|
|
587
|
+
pass
|
|
588
|
+
return int(cx), int(cy)
|
|
589
|
+
|
|
590
|
+
# ---------------------------
|
|
591
|
+
# Prep ref/template once
|
|
592
|
+
# ---------------------------
|
|
593
|
+
src0, owns0 = _ensure_source(source_obj, cache_items=2)
|
|
594
|
+
try:
|
|
595
|
+
img0 = _get_frame(src0, 0, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
|
|
596
|
+
|
|
597
|
+
ref0 = _to_mono01(img0).astype(np.float32, copy=False)
|
|
598
|
+
ref0 = _downN(ref0)
|
|
599
|
+
ref0p = _bandpass(ref0) if bandpass else (ref0 - float(ref0.mean()))
|
|
600
|
+
|
|
601
|
+
H, W = ref0p.shape[:2]
|
|
602
|
+
ts = int(max(64, min(template_size, min(H, W) - 4)))
|
|
603
|
+
half = ts // 2
|
|
604
|
+
|
|
605
|
+
cx0, cy0 = _pick_anchor_center_ds(W, H)
|
|
606
|
+
rx0 = max(0, min(W - ts, cx0 - half))
|
|
607
|
+
ry0 = max(0, min(H - ts, cy0 - half))
|
|
608
|
+
ref_t = ref0p[ry0:ry0 + ts, rx0:rx0 + ts].copy()
|
|
609
|
+
finally:
|
|
610
|
+
if owns0:
|
|
611
|
+
try:
|
|
612
|
+
src0.close()
|
|
613
|
+
except Exception:
|
|
614
|
+
pass
|
|
615
|
+
|
|
616
|
+
dx[0] = 0.0
|
|
617
|
+
dy[0] = 0.0
|
|
618
|
+
cc[0] = 1.0
|
|
619
|
+
|
|
620
|
+
if progress_cb:
|
|
621
|
+
progress_cb(0, n, "Surface: coarse (ref-locked NCC+subpix)…")
|
|
622
|
+
|
|
623
|
+
# If no workers requested (or too small), fall back to sequential
|
|
624
|
+
if workers is None:
|
|
625
|
+
cpu = os.cpu_count() or 4
|
|
626
|
+
workers = max(1, min(cpu, 48))
|
|
627
|
+
workers = int(max(1, workers))
|
|
628
|
+
stride = int(max(4, stride))
|
|
629
|
+
|
|
630
|
+
# ---------------------------
|
|
631
|
+
# Core "one frame" matcher
|
|
632
|
+
# ---------------------------
|
|
633
|
+
def _match_one(curp: np.ndarray, pred_x: float, pred_y: float, r: int) -> tuple[float, float, float, float, float]:
|
|
634
|
+
# returns (mx_ds, my_ds, dx_full, dy_full, conf)
|
|
635
|
+
x0 = int(max(0, min(W - 1, pred_x - r)))
|
|
636
|
+
y0 = int(max(0, min(H - 1, pred_y - r)))
|
|
637
|
+
x1 = int(min(W, pred_x + r + ts))
|
|
638
|
+
y1 = int(min(H, pred_y + r + ts))
|
|
639
|
+
|
|
640
|
+
win = curp[y0:y1, x0:x1]
|
|
641
|
+
if win.shape[0] < ts or win.shape[1] < ts:
|
|
642
|
+
return float(pred_x), float(pred_y), 0.0, 0.0, 0.0
|
|
643
|
+
|
|
644
|
+
res = cv2.matchTemplate(win, ref_t, cv2.TM_CCOEFF_NORMED)
|
|
645
|
+
_, max_val, _, max_loc = cv2.minMaxLoc(res)
|
|
646
|
+
conf_ncc = float(np.clip(max_val, 0.0, 1.0))
|
|
647
|
+
|
|
648
|
+
mx_ds = float(x0 + max_loc[0])
|
|
649
|
+
my_ds = float(y0 + max_loc[1])
|
|
650
|
+
|
|
651
|
+
# subpix refine on the matched patch
|
|
652
|
+
mx_i = int(round(mx_ds))
|
|
653
|
+
my_i = int(round(my_ds))
|
|
654
|
+
cur_t = curp[my_i:my_i + ts, mx_i:mx_i + ts]
|
|
655
|
+
if cur_t.shape == ref_t.shape:
|
|
656
|
+
(sdx, sdy), resp = cv2.phaseCorrelate(ref_t.astype(np.float32), cur_t.astype(np.float32))
|
|
657
|
+
sub_dx = float(sdx)
|
|
658
|
+
sub_dy = float(sdy)
|
|
659
|
+
conf_pc = float(np.clip(resp, 0.0, 1.0))
|
|
660
|
+
else:
|
|
661
|
+
sub_dx = 0.0
|
|
662
|
+
sub_dy = 0.0
|
|
663
|
+
conf_pc = 0.0
|
|
664
|
+
|
|
665
|
+
dx_ds = float(rx0 - mx_ds) + sub_dx
|
|
666
|
+
dy_ds = float(ry0 - my_ds) + sub_dy
|
|
667
|
+
dx_full = float(dx_ds * down)
|
|
668
|
+
dy_full = float(dy_ds * down)
|
|
669
|
+
|
|
670
|
+
conf = float(np.clip(0.65 * conf_ncc + 0.35 * conf_pc, 0.0, 1.0))
|
|
671
|
+
return float(mx_ds), float(my_ds), dx_full, dy_full, conf
|
|
672
|
+
|
|
673
|
+
# ---------------------------
|
|
674
|
+
# Keyframe boundary pass (sequential)
|
|
675
|
+
# ---------------------------
|
|
676
|
+
boundaries = list(range(0, n, stride))
|
|
677
|
+
start_pred = {} # b -> (pred_x, pred_y)
|
|
678
|
+
start_pred[0] = (float(rx0), float(ry0))
|
|
679
|
+
|
|
680
|
+
# We use a slightly larger radius for boundary frames to be extra safe
|
|
681
|
+
r_key = int(max(16, int(search_radius) * 2))
|
|
682
|
+
|
|
683
|
+
srck, ownsk = _ensure_source(source_obj, cache_items=2)
|
|
684
|
+
try:
|
|
685
|
+
pred_x, pred_y = float(rx0), float(ry0)
|
|
686
|
+
for b in boundaries[1:]:
|
|
687
|
+
img = _get_frame(srck, b, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
|
|
688
|
+
|
|
689
|
+
cur = _to_mono01(img).astype(np.float32, copy=False)
|
|
690
|
+
cur = _downN(cur)
|
|
691
|
+
curp = _bandpass(cur) if bandpass else (cur - float(cur.mean()))
|
|
692
|
+
|
|
693
|
+
mx_ds, my_ds, dx_b, dy_b, conf_b = _match_one(curp, pred_x, pred_y, r_key)
|
|
694
|
+
|
|
695
|
+
# store boundary predictor (template top-left in this frame)
|
|
696
|
+
start_pred[b] = (mx_ds, my_ds)
|
|
697
|
+
|
|
698
|
+
# update for next boundary
|
|
699
|
+
pred_x, pred_y = mx_ds, my_ds
|
|
700
|
+
|
|
701
|
+
# also fill boundary output immediately (optional but nice)
|
|
702
|
+
dx[b] = dx_b
|
|
703
|
+
dy[b] = dy_b
|
|
704
|
+
cc[b] = conf_b
|
|
705
|
+
if conf_b < 0.15 and b > 0:
|
|
706
|
+
dx[b] = dx[b - 1]
|
|
707
|
+
dy[b] = dy[b - 1]
|
|
708
|
+
finally:
|
|
709
|
+
if ownsk:
|
|
710
|
+
try:
|
|
711
|
+
srck.close()
|
|
712
|
+
except Exception:
|
|
713
|
+
pass
|
|
714
|
+
|
|
715
|
+
# ---------------------------
|
|
716
|
+
# Parallel per-chunk scan (each chunk sequential)
|
|
717
|
+
# ---------------------------
|
|
718
|
+
r = int(max(16, search_radius))
|
|
719
|
+
|
|
720
|
+
def _run_chunk(b: int, e: int) -> int:
|
|
721
|
+
src, owns = _ensure_source(source_obj, cache_items=0)
|
|
722
|
+
try:
|
|
723
|
+
pred_x, pred_y = start_pred.get(b, (float(rx0), float(ry0)))
|
|
724
|
+
# if boundary already computed above, keep it; start after b
|
|
725
|
+
i0 = b
|
|
726
|
+
if b in start_pred and b != 0:
|
|
727
|
+
i0 = b + 1 # boundary already solved with r_key
|
|
728
|
+
|
|
729
|
+
if i0 == 0:
|
|
730
|
+
i0 = 1
|
|
731
|
+
for i in range(i0, e):
|
|
732
|
+
if i in start_pred:
|
|
733
|
+
pred_x, pred_y = start_pred[i]
|
|
734
|
+
continue
|
|
735
|
+
|
|
736
|
+
img = _get_frame(src, i, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
|
|
737
|
+
cur = _to_mono01(img).astype(np.float32, copy=False)
|
|
738
|
+
cur = _downN(cur)
|
|
739
|
+
curp = _bandpass(cur) if bandpass else (cur - float(cur.mean()))
|
|
740
|
+
|
|
741
|
+
mx_ds, my_ds, dx_i, dy_i, conf_i = _match_one(curp, pred_x, pred_y, r)
|
|
742
|
+
|
|
743
|
+
dx[i] = dx_i
|
|
744
|
+
dy[i] = dy_i
|
|
745
|
+
cc[i] = conf_i
|
|
746
|
+
|
|
747
|
+
pred_x, pred_y = mx_ds, my_ds
|
|
748
|
+
|
|
749
|
+
if conf_i < 0.15 and i > 0:
|
|
750
|
+
dx[i] = dx[i - 1]
|
|
751
|
+
dy[i] = dy[i - 1]
|
|
752
|
+
return (e - b)
|
|
753
|
+
finally:
|
|
754
|
+
if owns:
|
|
755
|
+
try:
|
|
756
|
+
src.close()
|
|
757
|
+
except Exception:
|
|
758
|
+
pass
|
|
759
|
+
|
|
760
|
+
if workers <= 1 or n <= stride * 2:
|
|
761
|
+
# small job: just do sequential scan exactly like before
|
|
762
|
+
src, owns = _ensure_source(source_obj, cache_items=2)
|
|
763
|
+
try:
|
|
764
|
+
pred_x, pred_y = float(rx0), float(ry0)
|
|
765
|
+
for i in range(1, n):
|
|
766
|
+
img = _get_frame(src, i, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
|
|
767
|
+
cur = _to_mono01(img).astype(np.float32, copy=False)
|
|
768
|
+
cur = _downN(cur)
|
|
769
|
+
curp = _bandpass(cur) if bandpass else (cur - float(cur.mean()))
|
|
770
|
+
|
|
771
|
+
mx_ds, my_ds, dx_i, dy_i, conf_i = _match_one(curp, pred_x, pred_y, r)
|
|
772
|
+
dx[i] = dx_i
|
|
773
|
+
dy[i] = dy_i
|
|
774
|
+
cc[i] = conf_i
|
|
775
|
+
pred_x, pred_y = mx_ds, my_ds
|
|
776
|
+
|
|
777
|
+
if conf_i < 0.15:
|
|
778
|
+
dx[i] = dx[i - 1]
|
|
779
|
+
dy[i] = dy[i - 1]
|
|
780
|
+
|
|
781
|
+
if progress_cb and (i % int(max(1, progress_every)) == 0 or i == n - 1):
|
|
782
|
+
progress_cb(i, n, "Surface: coarse (ref-locked NCC+subpix)…")
|
|
783
|
+
finally:
|
|
784
|
+
if owns:
|
|
785
|
+
try:
|
|
786
|
+
src.close()
|
|
787
|
+
except Exception:
|
|
788
|
+
pass
|
|
789
|
+
return dx, dy, cc
|
|
790
|
+
|
|
791
|
+
# Parallel chunks
|
|
792
|
+
done = 0
|
|
793
|
+
with ThreadPoolExecutor(max_workers=workers) as ex:
|
|
794
|
+
futs = []
|
|
795
|
+
for b in boundaries:
|
|
796
|
+
e = min(n, b + stride)
|
|
797
|
+
futs.append(ex.submit(_run_chunk, b, e))
|
|
798
|
+
|
|
799
|
+
for fut in as_completed(futs):
|
|
800
|
+
done += int(fut.result())
|
|
801
|
+
if progress_cb:
|
|
802
|
+
# best-effort: done is "frames processed" not exact index
|
|
803
|
+
progress_cb(min(done, n - 1), n, "Surface: coarse (ref-locked NCC+subpix)…")
|
|
804
|
+
|
|
805
|
+
return dx, dy, cc
|
|
806
|
+
|
|
807
|
+
|
|
808
|
+
def _shift_image(img01: np.ndarray, dx: float, dy: float) -> np.ndarray:
|
|
809
|
+
"""
|
|
810
|
+
Shift image by (dx,dy) in pixel units. Positive dx shifts right, positive dy shifts down.
|
|
811
|
+
Uses cv2.warpAffine if available; else nearest-ish roll (wrap) fallback.
|
|
812
|
+
"""
|
|
813
|
+
if abs(dx) < 1e-6 and abs(dy) < 1e-6:
|
|
814
|
+
return img01
|
|
815
|
+
|
|
816
|
+
if cv2 is not None:
|
|
817
|
+
# border replicate is usually better than constant black for planetary
|
|
818
|
+
h, w = img01.shape[:2]
|
|
819
|
+
M = np.array([[1.0, 0.0, dx],
|
|
820
|
+
[0.0, 1.0, dy]], dtype=np.float32)
|
|
821
|
+
if img01.ndim == 2:
|
|
822
|
+
return cv2.warpAffine(img01, M, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
|
|
823
|
+
else:
|
|
824
|
+
return cv2.warpAffine(img01, M, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
|
|
825
|
+
# very rough fallback (wraps!)
|
|
826
|
+
rx = int(round(dx))
|
|
827
|
+
ry = int(round(dy))
|
|
828
|
+
out = np.roll(img01, shift=ry, axis=0)
|
|
829
|
+
out = np.roll(out, shift=rx, axis=1)
|
|
830
|
+
return out
|
|
831
|
+
|
|
832
|
+
def _downsample_mono01(img01: np.ndarray, max_dim: int = 512) -> np.ndarray:
|
|
833
|
+
"""
|
|
834
|
+
Convert to mono and downsample for analysis/tracking. Returns float32 in [0,1].
|
|
835
|
+
"""
|
|
836
|
+
m = _to_mono01(img01).astype(np.float32, copy=False)
|
|
837
|
+
H, W = m.shape[:2]
|
|
838
|
+
mx = int(max(1, max_dim))
|
|
839
|
+
if max(H, W) <= mx:
|
|
840
|
+
return m
|
|
841
|
+
|
|
842
|
+
if cv2 is None:
|
|
843
|
+
# crude fallback
|
|
844
|
+
scale = mx / float(max(H, W))
|
|
845
|
+
nh = max(2, int(round(H * scale)))
|
|
846
|
+
nw = max(2, int(round(W * scale)))
|
|
847
|
+
# nearest-ish
|
|
848
|
+
ys = (np.linspace(0, H - 1, nh)).astype(np.int32)
|
|
849
|
+
xs = (np.linspace(0, W - 1, nw)).astype(np.int32)
|
|
850
|
+
return m[ys[:, None], xs[None, :]].astype(np.float32)
|
|
851
|
+
|
|
852
|
+
scale = mx / float(max(H, W))
|
|
853
|
+
nh = max(2, int(round(H * scale)))
|
|
854
|
+
nw = max(2, int(round(W * scale)))
|
|
855
|
+
return cv2.resize(m, (nw, nh), interpolation=cv2.INTER_AREA).astype(np.float32, copy=False)
|
|
856
|
+
|
|
857
|
+
|
|
858
|
+
def _phase_corr_shift(ref_m: np.ndarray, cur_m: np.ndarray) -> tuple[float, float, float]:
|
|
859
|
+
"""
|
|
860
|
+
Returns (dx, dy, response) such that shifting cur by (dx,dy) aligns to ref.
|
|
861
|
+
Uses cv2.phaseCorrelate if available.
|
|
862
|
+
"""
|
|
863
|
+
if cv2 is None:
|
|
864
|
+
return 0.0, 0.0, 1.0
|
|
865
|
+
|
|
866
|
+
# phaseCorrelate expects float32/float64
|
|
867
|
+
ref = ref_m.astype(np.float32, copy=False)
|
|
868
|
+
cur = cur_m.astype(np.float32, copy=False)
|
|
869
|
+
(dx, dy), resp = cv2.phaseCorrelate(ref, cur) # shift cur -> ref
|
|
870
|
+
return float(dx), float(dy), float(resp)
|
|
871
|
+
|
|
872
|
+
def _ensure_source(source, cache_items: int = 10) -> tuple[PlanetaryFrameSource, bool]:
|
|
873
|
+
"""
|
|
874
|
+
Returns (src, owns_src)
|
|
875
|
+
|
|
876
|
+
Accepts:
|
|
877
|
+
- PlanetaryFrameSource-like object (duck typed: get_frame/meta/close)
|
|
878
|
+
- path string
|
|
879
|
+
- list/tuple of paths
|
|
880
|
+
"""
|
|
881
|
+
# Already an opened source-like object
|
|
882
|
+
if source is not None and hasattr(source, "get_frame") and hasattr(source, "meta") and hasattr(source, "close"):
|
|
883
|
+
return source, False
|
|
884
|
+
|
|
885
|
+
# allow tuple -> list
|
|
886
|
+
if isinstance(source, tuple):
|
|
887
|
+
source = list(source)
|
|
888
|
+
|
|
889
|
+
src = open_planetary_source(source, cache_items=cache_items)
|
|
890
|
+
return src, True
|
|
891
|
+
|
|
892
|
+
def stack_ser(
|
|
893
|
+
source: str | list[str] | PlanetaryFrameSource,
|
|
894
|
+
*,
|
|
895
|
+
roi=None,
|
|
896
|
+
debayer: bool = True,
|
|
897
|
+
keep_percent: float = 20.0,
|
|
898
|
+
track_mode: str = "planetary",
|
|
899
|
+
surface_anchor=None,
|
|
900
|
+
to_rgb: bool = False, # ✅ add this
|
|
901
|
+
bayer_pattern: Optional[str] = None, # ✅ strongly recommended since dialog passes it
|
|
902
|
+
analysis: AnalyzeResult | None = None,
|
|
903
|
+
local_warp: bool = True,
|
|
904
|
+
max_dim: int = 512,
|
|
905
|
+
progress_cb=None,
|
|
906
|
+
cache_items: int = 10,
|
|
907
|
+
workers: int | None = None,
|
|
908
|
+
chunk_size: int | None = None,
|
|
909
|
+
# ✅ NEW drizzle knobs
|
|
910
|
+
drizzle_scale: float = 1.0,
|
|
911
|
+
drizzle_pixfrac: float = 0.80,
|
|
912
|
+
drizzle_kernel: str = "gaussian",
|
|
913
|
+
drizzle_sigma: float = 0.0,
|
|
914
|
+
keep_mask=None,
|
|
915
|
+
) -> tuple[np.ndarray, dict]:
|
|
916
|
+
source_obj = source
|
|
917
|
+
|
|
918
|
+
# ---- Worker count ----
|
|
919
|
+
if workers is None:
|
|
920
|
+
cpu = os.cpu_count() or 4
|
|
921
|
+
workers = max(1, min(cpu, 48))
|
|
922
|
+
|
|
923
|
+
if cv2 is not None:
|
|
924
|
+
try:
|
|
925
|
+
cv2.setNumThreads(1)
|
|
926
|
+
except Exception:
|
|
927
|
+
pass
|
|
928
|
+
|
|
929
|
+
drizzle_scale = float(drizzle_scale)
|
|
930
|
+
drizzle_on = drizzle_scale > 1.0001
|
|
931
|
+
drizzle_pixfrac = float(drizzle_pixfrac)
|
|
932
|
+
drizzle_kernel = str(drizzle_kernel).strip().lower()
|
|
933
|
+
if drizzle_kernel not in ("square", "circle", "gaussian"):
|
|
934
|
+
drizzle_kernel = "gaussian"
|
|
935
|
+
drizzle_sigma = float(drizzle_sigma)
|
|
936
|
+
|
|
937
|
+
# ---- Open once to get meta + first frame shape ----
|
|
938
|
+
src0, owns0 = _ensure_source(source_obj, cache_items=cache_items)
|
|
939
|
+
try:
|
|
940
|
+
n = int(src0.meta.frames)
|
|
941
|
+
keep_percent = max(0.1, min(100.0, float(keep_percent)))
|
|
942
|
+
k = max(1, int(round(n * (keep_percent / 100.0))))
|
|
943
|
+
|
|
944
|
+
if analysis is None or analysis.ref_image is None or analysis.ap_centers is None:
|
|
945
|
+
raise ValueError("stack_ser expects analysis with ref_image + ap_centers (run Analyze first).")
|
|
946
|
+
|
|
947
|
+
order = np.asarray(analysis.order, np.int32)
|
|
948
|
+
keep_idx = order[:k].astype(np.int32, copy=False)
|
|
949
|
+
|
|
950
|
+
# ✅ NEW: apply keep_mask (global mask in original frame index space)
|
|
951
|
+
if keep_mask is not None:
|
|
952
|
+
km = np.asarray(keep_mask, dtype=bool)
|
|
953
|
+
if km.ndim != 1 or km.shape[0] != n:
|
|
954
|
+
raise ValueError(f"keep_mask must be 1D bool of length {n}, got shape {km.shape}")
|
|
955
|
+
keep_idx = keep_idx[km[keep_idx]]
|
|
956
|
+
|
|
957
|
+
# Ensure at least one frame survives (or decide to error)
|
|
958
|
+
if keep_idx.size == 0:
|
|
959
|
+
raise ValueError("keep_mask rejected all frames in the Keep% set.")
|
|
960
|
+
# reference / APs
|
|
961
|
+
ref_img = analysis.ref_image.astype(np.float32, copy=False)
|
|
962
|
+
ref_m = _to_mono01(ref_img).astype(np.float32, copy=False)
|
|
963
|
+
ap_centers_all = np.asarray(analysis.ap_centers, np.int32)
|
|
964
|
+
ap_size = int(getattr(analysis, "ap_size", 64) or 64)
|
|
965
|
+
|
|
966
|
+
# frame shape for accumulator
|
|
967
|
+
first = _get_frame(src0, int(keep_idx[0]), roi=roi, debayer=debayer, to_float01=True, force_rgb=False, bayer_pattern=bayer_pattern)
|
|
968
|
+
acc_shape = first.shape # (H,W) or (H,W,3)
|
|
969
|
+
finally:
|
|
970
|
+
if owns0:
|
|
971
|
+
try:
|
|
972
|
+
src0.close()
|
|
973
|
+
except Exception:
|
|
974
|
+
pass
|
|
975
|
+
|
|
976
|
+
# ---- Progress aggregation (thread-safe) ----
|
|
977
|
+
done_lock = threading.Lock()
|
|
978
|
+
done_ct = 0
|
|
979
|
+
total_ct = int(len(keep_idx))
|
|
980
|
+
|
|
981
|
+
def _bump_progress(delta: int, phase: str = "Stack"):
|
|
982
|
+
nonlocal done_ct
|
|
983
|
+
if progress_cb is None:
|
|
984
|
+
return
|
|
985
|
+
with done_lock:
|
|
986
|
+
done_ct += int(delta)
|
|
987
|
+
d = done_ct
|
|
988
|
+
progress_cb(d, total_ct, phase)
|
|
989
|
+
|
|
990
|
+
# ---- Chunking ----
|
|
991
|
+
idx_list = keep_idx.tolist()
|
|
992
|
+
if chunk_size is None:
|
|
993
|
+
chunk_size = max(8, int(np.ceil(len(idx_list) / float(workers * 2))))
|
|
994
|
+
chunks: list[list[int]] = [idx_list[i:i + chunk_size] for i in range(0, len(idx_list), chunk_size)]
|
|
995
|
+
|
|
996
|
+
if progress_cb:
|
|
997
|
+
progress_cb(0, total_ct, "Stack")
|
|
998
|
+
|
|
999
|
+
# ---- drizzle helpers ----
|
|
1000
|
+
if drizzle_on:
|
|
1001
|
+
from setiastro.saspro.legacy.numba_utils import (
|
|
1002
|
+
drizzle_deposit_numba_kernel_mono,
|
|
1003
|
+
drizzle_deposit_color_kernel,
|
|
1004
|
+
finalize_drizzle_2d,
|
|
1005
|
+
finalize_drizzle_3d,
|
|
1006
|
+
)
|
|
1007
|
+
|
|
1008
|
+
# map kernel string -> code used by your numba
|
|
1009
|
+
kernel_code = {"square": 0, "circle": 1, "gaussian": 2}[drizzle_kernel]
|
|
1010
|
+
|
|
1011
|
+
# If gaussian sigma isn't provided, use something tied to pixfrac.
|
|
1012
|
+
# Your numba interprets gaussian sigma as "sigma_out", and also enforces >= drop_shrink*0.5.
|
|
1013
|
+
if drizzle_sigma <= 1e-9:
|
|
1014
|
+
# a good practical default: sigma ~ pixfrac*0.5
|
|
1015
|
+
drizzle_sigma_eff = max(1e-3, float(drizzle_pixfrac) * 0.5)
|
|
1016
|
+
else:
|
|
1017
|
+
drizzle_sigma_eff = drizzle_sigma
|
|
1018
|
+
|
|
1019
|
+
H, W = int(acc_shape[0]), int(acc_shape[1])
|
|
1020
|
+
outH = int(round(H * drizzle_scale))
|
|
1021
|
+
outW = int(round(W * drizzle_scale))
|
|
1022
|
+
|
|
1023
|
+
# Identity transform from input pixels -> aligned/reference pixel coords
|
|
1024
|
+
# drizzle_factor applies the scale.
|
|
1025
|
+
T = np.zeros((2, 3), dtype=np.float32)
|
|
1026
|
+
T[0, 0] = 1.0
|
|
1027
|
+
T[1, 1] = 1.0
|
|
1028
|
+
|
|
1029
|
+
# ---- Worker: accumulate its own sum OR its own drizzle buffers ----
|
|
1030
|
+
def _stack_chunk(chunk: list[int]):
|
|
1031
|
+
src, owns = _ensure_source(source_obj, cache_items=0)
|
|
1032
|
+
try:
|
|
1033
|
+
if drizzle_on:
|
|
1034
|
+
if len(acc_shape) == 2:
|
|
1035
|
+
dbuf = np.zeros((outH, outW), dtype=np.float32)
|
|
1036
|
+
cbuf = np.zeros((outH, outW), dtype=np.float32)
|
|
1037
|
+
else:
|
|
1038
|
+
dbuf = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
|
|
1039
|
+
cbuf = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
|
|
1040
|
+
else:
|
|
1041
|
+
acc = np.zeros(acc_shape, dtype=np.float32)
|
|
1042
|
+
wacc = 0.0
|
|
1043
|
+
|
|
1044
|
+
for i in chunk:
|
|
1045
|
+
img = _get_frame(src, int(i), roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern).astype(np.float32, copy=False)
|
|
1046
|
+
|
|
1047
|
+
# Global prior (from Analyze)
|
|
1048
|
+
gdx = float(analysis.dx[int(i)]) if (analysis.dx is not None) else 0.0
|
|
1049
|
+
gdy = float(analysis.dy[int(i)]) if (analysis.dy is not None) else 0.0
|
|
1050
|
+
|
|
1051
|
+
# Global prior always first
|
|
1052
|
+
warped_g = _shift_image(img, gdx, gdy)
|
|
1053
|
+
|
|
1054
|
+
if cv2 is None or (not local_warp):
|
|
1055
|
+
warped = warped_g
|
|
1056
|
+
else:
|
|
1057
|
+
cur_m_g = _to_mono01(warped_g).astype(np.float32, copy=False)
|
|
1058
|
+
|
|
1059
|
+
ap_rdx, ap_rdy, ap_resp = _ap_phase_shifts_per_ap(
|
|
1060
|
+
ref_m, cur_m_g,
|
|
1061
|
+
ap_centers=ap_centers_all,
|
|
1062
|
+
ap_size=ap_size,
|
|
1063
|
+
max_dim=max_dim,
|
|
1064
|
+
)
|
|
1065
|
+
ap_cf = np.clip(ap_resp.astype(np.float32, copy=False), 0.0, 1.0)
|
|
1066
|
+
|
|
1067
|
+
keep = _reject_ap_outliers(ap_rdx, ap_rdy, ap_cf, z=3.5)
|
|
1068
|
+
if np.any(keep):
|
|
1069
|
+
ap_centers = ap_centers_all[keep]
|
|
1070
|
+
ap_dx_k = ap_rdx[keep]
|
|
1071
|
+
ap_dy_k = ap_rdy[keep]
|
|
1072
|
+
ap_cf_k = ap_cf[keep]
|
|
1073
|
+
|
|
1074
|
+
dx_field, dy_field = _dense_field_from_ap_shifts(
|
|
1075
|
+
warped_g.shape[0], warped_g.shape[1],
|
|
1076
|
+
ap_centers, ap_dx_k, ap_dy_k, ap_cf_k,
|
|
1077
|
+
grid=32, power=2.0, conf_floor=0.15,
|
|
1078
|
+
radius=float(ap_size) * 3.0,
|
|
1079
|
+
)
|
|
1080
|
+
warped = _warp_by_dense_field(warped_g, dx_field, dy_field)
|
|
1081
|
+
else:
|
|
1082
|
+
warped = warped_g
|
|
1083
|
+
|
|
1084
|
+
if drizzle_on:
|
|
1085
|
+
# deposit aligned frame into drizzle buffers
|
|
1086
|
+
fw = 1.0 # frame_weight (could later use quality weights)
|
|
1087
|
+
if warped.ndim == 2:
|
|
1088
|
+
drizzle_deposit_numba_kernel_mono(
|
|
1089
|
+
warped, T, dbuf, cbuf,
|
|
1090
|
+
drizzle_factor=drizzle_scale,
|
|
1091
|
+
drop_shrink=drizzle_pixfrac,
|
|
1092
|
+
frame_weight=fw,
|
|
1093
|
+
kernel_code=kernel_code,
|
|
1094
|
+
gaussian_sigma_or_radius=drizzle_sigma_eff,
|
|
1095
|
+
)
|
|
1096
|
+
else:
|
|
1097
|
+
drizzle_deposit_color_kernel(
|
|
1098
|
+
warped, T, dbuf, cbuf,
|
|
1099
|
+
drizzle_factor=drizzle_scale,
|
|
1100
|
+
drop_shrink=drizzle_pixfrac,
|
|
1101
|
+
frame_weight=fw,
|
|
1102
|
+
kernel_code=kernel_code,
|
|
1103
|
+
gaussian_sigma_or_radius=drizzle_sigma_eff,
|
|
1104
|
+
)
|
|
1105
|
+
else:
|
|
1106
|
+
acc += warped
|
|
1107
|
+
wacc += 1.0
|
|
1108
|
+
|
|
1109
|
+
_bump_progress(len(chunk), "Stack")
|
|
1110
|
+
|
|
1111
|
+
if drizzle_on:
|
|
1112
|
+
return dbuf, cbuf
|
|
1113
|
+
return acc, wacc
|
|
1114
|
+
|
|
1115
|
+
finally:
|
|
1116
|
+
if owns:
|
|
1117
|
+
try:
|
|
1118
|
+
src.close()
|
|
1119
|
+
except Exception:
|
|
1120
|
+
pass
|
|
1121
|
+
|
|
1122
|
+
# ---- Parallel run + reduce ----
|
|
1123
|
+
if drizzle_on:
|
|
1124
|
+
# reduce drizzle buffers
|
|
1125
|
+
if len(acc_shape) == 2:
|
|
1126
|
+
dbuf_total = np.zeros((outH, outW), dtype=np.float32)
|
|
1127
|
+
cbuf_total = np.zeros((outH, outW), dtype=np.float32)
|
|
1128
|
+
else:
|
|
1129
|
+
dbuf_total = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
|
|
1130
|
+
cbuf_total = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
|
|
1131
|
+
|
|
1132
|
+
with ThreadPoolExecutor(max_workers=workers) as ex:
|
|
1133
|
+
futs = [ex.submit(_stack_chunk, c) for c in chunks if c]
|
|
1134
|
+
for fut in as_completed(futs):
|
|
1135
|
+
db, cb = fut.result()
|
|
1136
|
+
dbuf_total += db
|
|
1137
|
+
cbuf_total += cb
|
|
1138
|
+
|
|
1139
|
+
# finalize
|
|
1140
|
+
if len(acc_shape) == 2:
|
|
1141
|
+
out = np.zeros((outH, outW), dtype=np.float32)
|
|
1142
|
+
finalize_drizzle_2d(dbuf_total, cbuf_total, out)
|
|
1143
|
+
else:
|
|
1144
|
+
out = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
|
|
1145
|
+
finalize_drizzle_3d(dbuf_total, cbuf_total, out)
|
|
1146
|
+
|
|
1147
|
+
out = np.clip(out, 0.0, 1.0).astype(np.float32, copy=False)
|
|
1148
|
+
|
|
1149
|
+
else:
|
|
1150
|
+
acc_total = np.zeros(acc_shape, dtype=np.float32)
|
|
1151
|
+
wacc_total = 0.0
|
|
1152
|
+
|
|
1153
|
+
with ThreadPoolExecutor(max_workers=workers) as ex:
|
|
1154
|
+
futs = [ex.submit(_stack_chunk, c) for c in chunks if c]
|
|
1155
|
+
for fut in as_completed(futs):
|
|
1156
|
+
acc_c, w_c = fut.result()
|
|
1157
|
+
acc_total += acc_c
|
|
1158
|
+
wacc_total += float(w_c)
|
|
1159
|
+
|
|
1160
|
+
out = np.clip(acc_total / max(1e-6, wacc_total), 0.0, 1.0).astype(np.float32, copy=False)
|
|
1161
|
+
|
|
1162
|
+
diag = {
|
|
1163
|
+
"frames_total": int(n),
|
|
1164
|
+
"frames_kept": int(len(keep_idx)),
|
|
1165
|
+
"roi_used": roi,
|
|
1166
|
+
"track_mode": track_mode,
|
|
1167
|
+
"local_warp": bool(local_warp),
|
|
1168
|
+
"workers": int(workers),
|
|
1169
|
+
"chunk_size": int(chunk_size),
|
|
1170
|
+
"drizzle_scale": float(drizzle_scale),
|
|
1171
|
+
"drizzle_pixfrac": float(drizzle_pixfrac),
|
|
1172
|
+
"drizzle_kernel": str(drizzle_kernel),
|
|
1173
|
+
"drizzle_sigma": float(drizzle_sigma),
|
|
1174
|
+
}
|
|
1175
|
+
return out, diag
|
|
1176
|
+
|
|
1177
|
+
def _build_reference(
|
|
1178
|
+
src: PlanetaryFrameSource,
|
|
1179
|
+
*,
|
|
1180
|
+
order: np.ndarray,
|
|
1181
|
+
roi,
|
|
1182
|
+
debayer: bool,
|
|
1183
|
+
to_rgb: bool,
|
|
1184
|
+
ref_mode: str,
|
|
1185
|
+
ref_count: int,
|
|
1186
|
+
bayer_pattern=None,
|
|
1187
|
+
) -> np.ndarray:
|
|
1188
|
+
"""
|
|
1189
|
+
ref_mode:
|
|
1190
|
+
- "best_frame": return best single frame
|
|
1191
|
+
- "best_stack": return mean of best ref_count frames
|
|
1192
|
+
"""
|
|
1193
|
+
best_idx = int(order[0])
|
|
1194
|
+
f0 = _get_frame(src, best_idx, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
|
|
1195
|
+
if ref_mode != "best_stack" or ref_count <= 1:
|
|
1196
|
+
return f0.astype(np.float32, copy=False)
|
|
1197
|
+
|
|
1198
|
+
k = int(max(2, min(ref_count, len(order))))
|
|
1199
|
+
acc = np.zeros_like(f0, dtype=np.float32)
|
|
1200
|
+
for j in range(k):
|
|
1201
|
+
idx = int(order[j])
|
|
1202
|
+
fr = _get_frame(src, idx, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
|
|
1203
|
+
acc += fr.astype(np.float32, copy=False)
|
|
1204
|
+
ref = acc / float(k)
|
|
1205
|
+
return np.clip(ref, 0.0, 1.0).astype(np.float32, copy=False)
|
|
1206
|
+
|
|
1207
|
+
def _cfg_get_source(cfg) -> Any:
|
|
1208
|
+
"""
|
|
1209
|
+
Back-compat: prefer cfg.source (new), else cfg.ser_path (old).
|
|
1210
|
+
cfg.source may be:
|
|
1211
|
+
- path string (ser/avi/mp4/etc)
|
|
1212
|
+
- list of image paths
|
|
1213
|
+
- PlanetaryFrameSource
|
|
1214
|
+
"""
|
|
1215
|
+
src = getattr(cfg, "source", None)
|
|
1216
|
+
if src is not None and src != "":
|
|
1217
|
+
return src
|
|
1218
|
+
return getattr(cfg, "ser_path", None)
|
|
1219
|
+
|
|
1220
|
+
def analyze_ser(
|
|
1221
|
+
cfg: SERStackConfig,
|
|
1222
|
+
*,
|
|
1223
|
+
debayer: bool = True,
|
|
1224
|
+
to_rgb: bool = False,
|
|
1225
|
+
smooth_sigma: float = 1.5, # kept for API compat
|
|
1226
|
+
thresh_pct: float = 92.0, # kept for API compat
|
|
1227
|
+
ref_mode: str = "best_frame", # "best_frame" or "best_stack"
|
|
1228
|
+
bayer_pattern: Optional[str] = None,
|
|
1229
|
+
ref_count: int = 5,
|
|
1230
|
+
max_dim: int = 512,
|
|
1231
|
+
progress_cb=None,
|
|
1232
|
+
workers: Optional[int] = None,
|
|
1233
|
+
) -> AnalyzeResult:
|
|
1234
|
+
"""
|
|
1235
|
+
Parallel analyze for *any* PlanetaryFrameSource (SER/AVI/MP4/images/sequence).
|
|
1236
|
+
- Pass 1: quality for every frame
|
|
1237
|
+
- Build reference:
|
|
1238
|
+
- planetary: best frame or best-N stack
|
|
1239
|
+
- surface: frame 0 (chronological anchor)
|
|
1240
|
+
- Autoplace APs (always)
|
|
1241
|
+
- Pass 2:
|
|
1242
|
+
- planetary: AP-based shift directly
|
|
1243
|
+
- surface:
|
|
1244
|
+
(A) coarse drift stabilization via ref-locked NCC+subpix (on a larger tracking ROI),
|
|
1245
|
+
(B) AP search+refine that follows coarse, with outlier rejection,
|
|
1246
|
+
(C) robust median -> final dx/dy/conf
|
|
1247
|
+
"""
|
|
1248
|
+
|
|
1249
|
+
source_obj = _cfg_get_source(cfg)
|
|
1250
|
+
bpat = bayer_pattern or _cfg_bayer_pattern(cfg)
|
|
1251
|
+
|
|
1252
|
+
if not source_obj:
|
|
1253
|
+
raise ValueError("SERStackConfig.source/ser_path is empty")
|
|
1254
|
+
|
|
1255
|
+
# ---- open source + meta (single open) ----
|
|
1256
|
+
src0, owns0 = _ensure_source(source_obj, cache_items=2)
|
|
1257
|
+
try:
|
|
1258
|
+
meta = src0.meta
|
|
1259
|
+
base_roi = cfg.roi
|
|
1260
|
+
if base_roi is not None:
|
|
1261
|
+
base_roi = _clamp_roi_in_bounds(base_roi, meta.width, meta.height)
|
|
1262
|
+
n = int(meta.frames)
|
|
1263
|
+
if n <= 0:
|
|
1264
|
+
raise ValueError("Source contains no frames")
|
|
1265
|
+
src_w = int(meta.width)
|
|
1266
|
+
src_h = int(meta.height)
|
|
1267
|
+
finally:
|
|
1268
|
+
if owns0:
|
|
1269
|
+
try:
|
|
1270
|
+
src0.close()
|
|
1271
|
+
except Exception:
|
|
1272
|
+
pass
|
|
1273
|
+
|
|
1274
|
+
# ---- Worker count ----
|
|
1275
|
+
if workers is None:
|
|
1276
|
+
cpu = os.cpu_count() or 4
|
|
1277
|
+
workers = max(1, min(cpu, 48))
|
|
1278
|
+
|
|
1279
|
+
if cv2 is not None:
|
|
1280
|
+
try:
|
|
1281
|
+
cv2.setNumThreads(1)
|
|
1282
|
+
except Exception:
|
|
1283
|
+
pass
|
|
1284
|
+
|
|
1285
|
+
# ---- Surface tracking ROI (IMPORTANT for big drift) ----
|
|
1286
|
+
def _surface_tracking_roi() -> Optional[Tuple[int, int, int, int]]:
|
|
1287
|
+
if base_roi is None:
|
|
1288
|
+
return None # full frame
|
|
1289
|
+
margin = int(getattr(cfg, "surface_track_margin", 256))
|
|
1290
|
+
x, y, w, h = [int(v) for v in base_roi]
|
|
1291
|
+
x0 = max(0, x - margin)
|
|
1292
|
+
y0 = max(0, y - margin)
|
|
1293
|
+
x1 = min(src_w, x + w + margin)
|
|
1294
|
+
y1 = min(src_h, y + h + margin)
|
|
1295
|
+
return _clamp_roi_in_bounds((x0, y0, x1 - x0, y1 - y0), src_w, src_h)
|
|
1296
|
+
|
|
1297
|
+
roi_track = _surface_tracking_roi() if cfg.track_mode == "surface" else base_roi
|
|
1298
|
+
roi_used = base_roi # APs and final ref are in this coordinate system
|
|
1299
|
+
|
|
1300
|
+
# -------------------------------------------------------------------------
|
|
1301
|
+
# Pass 1: quality (use roi_used)
|
|
1302
|
+
# -------------------------------------------------------------------------
|
|
1303
|
+
quality = np.zeros((n,), dtype=np.float32)
|
|
1304
|
+
idxs = np.arange(n, dtype=np.int32)
|
|
1305
|
+
n_chunks = max(5, int(workers) * int(getattr(cfg, "progress_chunk_factor", 5)))
|
|
1306
|
+
n_chunks = max(1, min(int(n), n_chunks))
|
|
1307
|
+
chunks = np.array_split(idxs, n_chunks)
|
|
1308
|
+
|
|
1309
|
+
if progress_cb:
|
|
1310
|
+
progress_cb(0, n, "Quality")
|
|
1311
|
+
|
|
1312
|
+
def _q_chunk(chunk: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
|
|
1313
|
+
out_i: list[int] = []
|
|
1314
|
+
out_q: list[float] = []
|
|
1315
|
+
src, owns = _ensure_source(source_obj, cache_items=0)
|
|
1316
|
+
try:
|
|
1317
|
+
for i in chunk.tolist():
|
|
1318
|
+
img = _get_frame(
|
|
1319
|
+
src, int(i),
|
|
1320
|
+
roi=roi_used,
|
|
1321
|
+
debayer=debayer,
|
|
1322
|
+
to_float01=True,
|
|
1323
|
+
force_rgb=bool(to_rgb),
|
|
1324
|
+
bayer_pattern=bpat,
|
|
1325
|
+
)
|
|
1326
|
+
m = _downsample_mono01(img, max_dim=max_dim)
|
|
1327
|
+
|
|
1328
|
+
if cv2 is not None:
|
|
1329
|
+
lap = cv2.Laplacian(m, cv2.CV_32F, ksize=3)
|
|
1330
|
+
q = float(np.mean(np.abs(lap)))
|
|
1331
|
+
else:
|
|
1332
|
+
q = float(
|
|
1333
|
+
np.abs(m[:, 1:] - m[:, :-1]).mean() +
|
|
1334
|
+
np.abs(m[1:, :] - m[:-1, :]).mean()
|
|
1335
|
+
)
|
|
1336
|
+
out_i.append(int(i))
|
|
1337
|
+
out_q.append(q)
|
|
1338
|
+
finally:
|
|
1339
|
+
if owns:
|
|
1340
|
+
try:
|
|
1341
|
+
src.close()
|
|
1342
|
+
except Exception:
|
|
1343
|
+
pass
|
|
1344
|
+
return np.asarray(out_i, np.int32), np.asarray(out_q, np.float32)
|
|
1345
|
+
|
|
1346
|
+
done_ct = 0
|
|
1347
|
+
with ThreadPoolExecutor(max_workers=workers) as ex:
|
|
1348
|
+
futs = [ex.submit(_q_chunk, c) for c in chunks if c.size > 0]
|
|
1349
|
+
for fut in as_completed(futs):
|
|
1350
|
+
ii, qq = fut.result()
|
|
1351
|
+
quality[ii] = qq
|
|
1352
|
+
done_ct += int(ii.size)
|
|
1353
|
+
if progress_cb:
|
|
1354
|
+
progress_cb(done_ct, n, "Quality")
|
|
1355
|
+
|
|
1356
|
+
order = np.argsort(-quality).astype(np.int32, copy=False)
|
|
1357
|
+
|
|
1358
|
+
# -------------------------------------------------------------------------
|
|
1359
|
+
# Build reference
|
|
1360
|
+
# -------------------------------------------------------------------------
|
|
1361
|
+
ref_count = int(max(1, min(int(ref_count), n)))
|
|
1362
|
+
ref_mode = "best_stack" if ref_mode == "best_stack" else "best_frame"
|
|
1363
|
+
|
|
1364
|
+
src_ref, owns_ref = _ensure_source(source_obj, cache_items=2)
|
|
1365
|
+
if progress_cb:
|
|
1366
|
+
progress_cb(0, n, f"Building reference ({ref_mode}, N={ref_count})…")
|
|
1367
|
+
try:
|
|
1368
|
+
if cfg.track_mode == "surface":
|
|
1369
|
+
# Surface ref must be frame 0 in roi_used coords
|
|
1370
|
+
ref_img = _get_frame(
|
|
1371
|
+
src_ref, 0,
|
|
1372
|
+
roi=roi_used, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb),
|
|
1373
|
+
bayer_pattern=bpat,
|
|
1374
|
+
).astype(np.float32, copy=False)
|
|
1375
|
+
|
|
1376
|
+
ref_mode = "first_frame"
|
|
1377
|
+
ref_count = 1
|
|
1378
|
+
else:
|
|
1379
|
+
ref_img = _build_reference(
|
|
1380
|
+
src_ref,
|
|
1381
|
+
order=order,
|
|
1382
|
+
roi=roi_used,
|
|
1383
|
+
debayer=debayer,
|
|
1384
|
+
to_rgb=to_rgb,
|
|
1385
|
+
ref_mode=ref_mode,
|
|
1386
|
+
ref_count=ref_count,
|
|
1387
|
+
bayer_pattern=bpat, # ✅ add this
|
|
1388
|
+
).astype(np.float32, copy=False)
|
|
1389
|
+
|
|
1390
|
+
finally:
|
|
1391
|
+
if owns_ref:
|
|
1392
|
+
try:
|
|
1393
|
+
src_ref.close()
|
|
1394
|
+
except Exception:
|
|
1395
|
+
pass
|
|
1396
|
+
|
|
1397
|
+
# -------------------------------------------------------------------------
|
|
1398
|
+
# Autoplace APs (always)
|
|
1399
|
+
# -------------------------------------------------------------------------
|
|
1400
|
+
if progress_cb:
|
|
1401
|
+
progress_cb(0, n, "Placing alignment points…")
|
|
1402
|
+
|
|
1403
|
+
ap_size = int(getattr(cfg, "ap_size", 64) or 64)
|
|
1404
|
+
ap_centers = _autoplace_aps(
|
|
1405
|
+
ref_img,
|
|
1406
|
+
ap_size=ap_size,
|
|
1407
|
+
ap_spacing=int(getattr(cfg, "ap_spacing", 48)),
|
|
1408
|
+
ap_min_mean=float(getattr(cfg, "ap_min_mean", 0.03)),
|
|
1409
|
+
)
|
|
1410
|
+
|
|
1411
|
+
# -------------------------------------------------------------------------
|
|
1412
|
+
# Pass 2: shifts/conf
|
|
1413
|
+
# -------------------------------------------------------------------------
|
|
1414
|
+
dx = np.zeros((n,), dtype=np.float32)
|
|
1415
|
+
dy = np.zeros((n,), dtype=np.float32)
|
|
1416
|
+
conf = np.ones((n,), dtype=np.float32)
|
|
1417
|
+
coarse_conf: Optional[np.ndarray] = None
|
|
1418
|
+
|
|
1419
|
+
if cfg.track_mode == "off" or cv2 is None:
|
|
1420
|
+
return AnalyzeResult(
|
|
1421
|
+
frames_total=n,
|
|
1422
|
+
roi_used=roi_used,
|
|
1423
|
+
track_mode=cfg.track_mode,
|
|
1424
|
+
quality=quality,
|
|
1425
|
+
dx=dx,
|
|
1426
|
+
dy=dy,
|
|
1427
|
+
conf=conf,
|
|
1428
|
+
order=order,
|
|
1429
|
+
ref_mode=ref_mode,
|
|
1430
|
+
ref_count=ref_count,
|
|
1431
|
+
ref_image=ref_img,
|
|
1432
|
+
ap_centers=ap_centers,
|
|
1433
|
+
ap_size=ap_size,
|
|
1434
|
+
ap_multiscale=bool(getattr(cfg, "ap_multiscale", False)),
|
|
1435
|
+
coarse_conf=None,
|
|
1436
|
+
)
|
|
1437
|
+
|
|
1438
|
+
ref_m_full = _to_mono01(ref_img).astype(np.float32, copy=False)
|
|
1439
|
+
use_multiscale = bool(getattr(cfg, "ap_multiscale", False))
|
|
1440
|
+
|
|
1441
|
+
# ---- surface coarse drift (ref-locked) ----
|
|
1442
|
+
if cfg.track_mode == "surface":
|
|
1443
|
+
coarse_conf = np.zeros((n,), dtype=np.float32)
|
|
1444
|
+
if progress_cb:
|
|
1445
|
+
progress_cb(0, n, "Surface: coarse drift (ref-locked NCC+subpix)…")
|
|
1446
|
+
|
|
1447
|
+
dx_chain, dy_chain, cc_chain = _coarse_surface_ref_locked(
|
|
1448
|
+
source_obj,
|
|
1449
|
+
n=n,
|
|
1450
|
+
roi=roi_track,
|
|
1451
|
+
roi_used=roi_used, # ✅ NEW
|
|
1452
|
+
debayer=debayer,
|
|
1453
|
+
to_rgb=to_rgb,
|
|
1454
|
+
bayer_pattern=bpat,
|
|
1455
|
+
progress_cb=progress_cb,
|
|
1456
|
+
progress_every=25,
|
|
1457
|
+
down=2,
|
|
1458
|
+
template_size=256,
|
|
1459
|
+
search_radius=96,
|
|
1460
|
+
bandpass=True,
|
|
1461
|
+
workers=min(workers, 8), # coarse doesn’t need 48; 4–8 is usually ideal
|
|
1462
|
+
stride=16, # 8–32 typical
|
|
1463
|
+
)
|
|
1464
|
+
dx[:] = dx_chain
|
|
1465
|
+
dy[:] = dy_chain
|
|
1466
|
+
coarse_conf[:] = cc_chain
|
|
1467
|
+
|
|
1468
|
+
# ---- chunked refine ----
|
|
1469
|
+
idxs2 = np.arange(n, dtype=np.int32)
|
|
1470
|
+
|
|
1471
|
+
# More/smaller chunks => progress updates sooner (futures complete more frequently)
|
|
1472
|
+
chunk_factor = int(getattr(cfg, "progress_chunk_factor", 5)) # optional knob
|
|
1473
|
+
min_chunks = 5
|
|
1474
|
+
n_chunks2 = max(min_chunks, int(workers) * chunk_factor)
|
|
1475
|
+
n_chunks2 = max(1, min(int(n), n_chunks2))
|
|
1476
|
+
|
|
1477
|
+
chunks2 = np.array_split(idxs2, n_chunks2)
|
|
1478
|
+
|
|
1479
|
+
if progress_cb:
|
|
1480
|
+
progress_cb(0, n, "SSD Refine")
|
|
1481
|
+
|
|
1482
|
+
if cfg.track_mode == "surface":
|
|
1483
|
+
# FAST surface refine:
|
|
1484
|
+
# - use coarse dx/dy from ref-locked tracker
|
|
1485
|
+
# - apply coarse shift to current mono frame
|
|
1486
|
+
# - compute residual per-AP phase shifts (NO SEARCH)
|
|
1487
|
+
# - final dx/dy = coarse + median(residual)
|
|
1488
|
+
def _shift_chunk(chunk: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
|
1489
|
+
out_i: list[int] = []
|
|
1490
|
+
out_dx: list[float] = []
|
|
1491
|
+
out_dy: list[float] = []
|
|
1492
|
+
out_cf: list[float] = []
|
|
1493
|
+
|
|
1494
|
+
src, owns = _ensure_source(source_obj, cache_items=0)
|
|
1495
|
+
try:
|
|
1496
|
+
for i in chunk.tolist():
|
|
1497
|
+
img = _get_frame(
|
|
1498
|
+
src, int(i),
|
|
1499
|
+
roi=roi_used,
|
|
1500
|
+
debayer=debayer,
|
|
1501
|
+
to_float01=True,
|
|
1502
|
+
force_rgb=bool(to_rgb),
|
|
1503
|
+
bayer_pattern=bpat,
|
|
1504
|
+
)
|
|
1505
|
+
cur_m = _to_mono01(img).astype(np.float32, copy=False)
|
|
1506
|
+
|
|
1507
|
+
coarse_dx = float(dx[int(i)])
|
|
1508
|
+
coarse_dy = float(dy[int(i)])
|
|
1509
|
+
|
|
1510
|
+
# Apply coarse shift FIRST (so APs line up without any searching)
|
|
1511
|
+
cur_m_g = _shift_image(cur_m, coarse_dx, coarse_dy)
|
|
1512
|
+
|
|
1513
|
+
if use_multiscale:
|
|
1514
|
+
s2, s1, s05 = _scaled_ap_sizes(ap_size)
|
|
1515
|
+
|
|
1516
|
+
def _one_scale(s_ap: int):
|
|
1517
|
+
rdx, rdy, resp = _ap_phase_shifts_per_ap(
|
|
1518
|
+
ref_m_full, cur_m_g,
|
|
1519
|
+
ap_centers=ap_centers,
|
|
1520
|
+
ap_size=s_ap,
|
|
1521
|
+
max_dim=max_dim,
|
|
1522
|
+
)
|
|
1523
|
+
cf = np.clip(resp.astype(np.float32, copy=False), 0.0, 1.0)
|
|
1524
|
+
keep = _reject_ap_outliers(rdx, rdy, cf, z=3.5)
|
|
1525
|
+
if not np.any(keep):
|
|
1526
|
+
return 0.0, 0.0, 0.25
|
|
1527
|
+
dx_r = float(np.median(rdx[keep]))
|
|
1528
|
+
dy_r = float(np.median(rdy[keep]))
|
|
1529
|
+
cf_r = float(np.median(cf[keep]))
|
|
1530
|
+
return dx_r, dy_r, cf_r
|
|
1531
|
+
|
|
1532
|
+
dx2, dy2, cf2 = _one_scale(s2)
|
|
1533
|
+
dx1, dy1, cf1 = _one_scale(s1)
|
|
1534
|
+
dx0, dy0, cf0 = _one_scale(s05)
|
|
1535
|
+
|
|
1536
|
+
w2 = max(1e-3, float(cf2)) * 1.25
|
|
1537
|
+
w1 = max(1e-3, float(cf1)) * 1.00
|
|
1538
|
+
w0 = max(1e-3, float(cf0)) * 0.85
|
|
1539
|
+
wsum = (w2 + w1 + w0)
|
|
1540
|
+
|
|
1541
|
+
dx_res = (w2 * dx2 + w1 * dx1 + w0 * dx0) / wsum
|
|
1542
|
+
dy_res = (w2 * dy2 + w1 * dy1 + w0 * dy0) / wsum
|
|
1543
|
+
cf_ap = float(np.clip((w2 * cf2 + w1 * cf1 + w0 * cf0) / wsum, 0.0, 1.0))
|
|
1544
|
+
else:
|
|
1545
|
+
rdx, rdy, resp = _ap_phase_shifts_per_ap(
|
|
1546
|
+
ref_m_full, cur_m_g,
|
|
1547
|
+
ap_centers=ap_centers,
|
|
1548
|
+
ap_size=ap_size,
|
|
1549
|
+
max_dim=max_dim,
|
|
1550
|
+
)
|
|
1551
|
+
cf = np.clip(resp.astype(np.float32, copy=False), 0.0, 1.0)
|
|
1552
|
+
keep = _reject_ap_outliers(rdx, rdy, cf, z=3.5)
|
|
1553
|
+
if np.any(keep):
|
|
1554
|
+
dx_res = float(np.median(rdx[keep]))
|
|
1555
|
+
dy_res = float(np.median(rdy[keep]))
|
|
1556
|
+
cf_ap = float(np.median(cf[keep]))
|
|
1557
|
+
else:
|
|
1558
|
+
dx_res, dy_res, cf_ap = 0.0, 0.0, 0.25
|
|
1559
|
+
|
|
1560
|
+
# Final = coarse + residual (residual is relative to coarse-shifted frame)
|
|
1561
|
+
# Final = coarse + residual (residual is relative to coarse-shifted frame)
|
|
1562
|
+
dx_i = float(coarse_dx + dx_res)
|
|
1563
|
+
dy_i = float(coarse_dy + dy_res)
|
|
1564
|
+
|
|
1565
|
+
# Final lock-in refinement: minimize (ref-cur)^2 on gradients in a tiny window
|
|
1566
|
+
# NOTE: pass *unshifted* cur_m with the current dx_i/dy_i estimate
|
|
1567
|
+
dxr, dyr, c_ssd = _refine_shift_ssd(
|
|
1568
|
+
ref_m_full, cur_m, dx_i, dy_i,
|
|
1569
|
+
radius=5, crop=0.80,
|
|
1570
|
+
bruteforce=bool(getattr(cfg, "ssd_refine_bruteforce", False)),
|
|
1571
|
+
)
|
|
1572
|
+
dx_i += float(dxr)
|
|
1573
|
+
dy_i += float(dyr)
|
|
1574
|
+
|
|
1575
|
+
# Confidence: combine coarse + AP, then optionally nudge with SSD
|
|
1576
|
+
cc = float(coarse_conf[int(i)]) if coarse_conf is not None else 0.5
|
|
1577
|
+
cf_i = float(np.clip(0.60 * cc + 0.40 * float(cf_ap), 0.0, 1.0))
|
|
1578
|
+
cf_i = float(np.clip(0.85 * cf_i + 0.15 * float(c_ssd), 0.05, 1.0))
|
|
1579
|
+
|
|
1580
|
+
out_i.append(int(i))
|
|
1581
|
+
out_dx.append(dx_i)
|
|
1582
|
+
out_dy.append(dy_i)
|
|
1583
|
+
out_cf.append(cf_i)
|
|
1584
|
+
finally:
|
|
1585
|
+
if owns:
|
|
1586
|
+
try:
|
|
1587
|
+
src.close()
|
|
1588
|
+
except Exception:
|
|
1589
|
+
pass
|
|
1590
|
+
|
|
1591
|
+
return (
|
|
1592
|
+
np.asarray(out_i, np.int32),
|
|
1593
|
+
np.asarray(out_dx, np.float32),
|
|
1594
|
+
np.asarray(out_dy, np.float32),
|
|
1595
|
+
np.asarray(out_cf, np.float32),
|
|
1596
|
+
)
|
|
1597
|
+
|
|
1598
|
+
else:
|
|
1599
|
+
# planetary: centroid tracking (same as viewer) for GLOBAL dx/dy/conf
|
|
1600
|
+
# APs are still computed and used later by stack_ser for local_warp residuals.
|
|
1601
|
+
tracker = PlanetaryTracker(
|
|
1602
|
+
smooth_sigma=float(getattr(cfg, "planet_smooth_sigma", smooth_sigma)),
|
|
1603
|
+
thresh_pct=float(getattr(cfg, "planet_thresh_pct", thresh_pct)),
|
|
1604
|
+
min_val=float(getattr(cfg, "planet_min_val", 0.02)),
|
|
1605
|
+
use_norm=bool(getattr(cfg, "planet_use_norm", False)),
|
|
1606
|
+
norm_hi_pct=float(getattr(cfg, "planet_norm_hi_pct", 99.5)),
|
|
1607
|
+
)
|
|
1608
|
+
|
|
1609
|
+
# IMPORTANT: reference center is computed from the SAME reference image that Analyze chose
|
|
1610
|
+
ref_cx, ref_cy, ref_cc = tracker.compute_center(ref_img)
|
|
1611
|
+
if ref_cc <= 0.0:
|
|
1612
|
+
# fallback: center of ROI
|
|
1613
|
+
mref = _to_mono01(ref_img)
|
|
1614
|
+
ref_cx = float(mref.shape[1] * 0.5)
|
|
1615
|
+
ref_cy = float(mref.shape[0] * 0.5)
|
|
1616
|
+
|
|
1617
|
+
ref_center = (float(ref_cx), float(ref_cy))
|
|
1618
|
+
ref_m_full = _to_mono01(ref_img).astype(np.float32, copy=False)
|
|
1619
|
+
|
|
1620
|
+
def _shift_chunk(chunk: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
|
1621
|
+
out_i: list[int] = []
|
|
1622
|
+
out_dx: list[float] = []
|
|
1623
|
+
out_dy: list[float] = []
|
|
1624
|
+
out_cf: list[float] = []
|
|
1625
|
+
|
|
1626
|
+
src, owns = _ensure_source(source_obj, cache_items=0)
|
|
1627
|
+
try:
|
|
1628
|
+
for i in chunk.tolist():
|
|
1629
|
+
img = _get_frame(
|
|
1630
|
+
src, int(i),
|
|
1631
|
+
roi=roi_used,
|
|
1632
|
+
debayer=debayer,
|
|
1633
|
+
to_float01=True,
|
|
1634
|
+
force_rgb=bool(to_rgb),
|
|
1635
|
+
bayer_pattern=bpat,
|
|
1636
|
+
)
|
|
1637
|
+
|
|
1638
|
+
dx_i, dy_i, cf_i = tracker.shift_to_ref(img, ref_center)
|
|
1639
|
+
|
|
1640
|
+
if float(cf_i) >= 0.25:
|
|
1641
|
+
cur_m = _to_mono01(img).astype(np.float32, copy=False)
|
|
1642
|
+
dxr, dyr, c_ssd = _refine_shift_ssd(
|
|
1643
|
+
ref_m_full, cur_m, dx_i, dy_i,
|
|
1644
|
+
radius=5, crop=0.80,
|
|
1645
|
+
bruteforce=bool(getattr(cfg, "ssd_refine_bruteforce", False)),
|
|
1646
|
+
)
|
|
1647
|
+
|
|
1648
|
+
dx_i = float(dx_i) + dxr
|
|
1649
|
+
dy_i = float(dy_i) + dyr
|
|
1650
|
+
cf_i = float(np.clip(0.85 * float(cf_i) + 0.15 * c_ssd, 0.05, 1.0))
|
|
1651
|
+
out_i.append(int(i))
|
|
1652
|
+
out_dx.append(float(dx_i))
|
|
1653
|
+
out_dy.append(float(dy_i))
|
|
1654
|
+
out_cf.append(float(cf_i))
|
|
1655
|
+
finally:
|
|
1656
|
+
if owns:
|
|
1657
|
+
try:
|
|
1658
|
+
src.close()
|
|
1659
|
+
except Exception:
|
|
1660
|
+
pass
|
|
1661
|
+
|
|
1662
|
+
return (
|
|
1663
|
+
np.asarray(out_i, np.int32),
|
|
1664
|
+
np.asarray(out_dx, np.float32),
|
|
1665
|
+
np.asarray(out_dy, np.float32),
|
|
1666
|
+
np.asarray(out_cf, np.float32),
|
|
1667
|
+
)
|
|
1668
|
+
|
|
1669
|
+
|
|
1670
|
+
done_ct = 0
|
|
1671
|
+
with ThreadPoolExecutor(max_workers=workers) as ex:
|
|
1672
|
+
futs = [ex.submit(_shift_chunk, c) for c in chunks2 if c.size > 0]
|
|
1673
|
+
for fut in as_completed(futs):
|
|
1674
|
+
ii, ddx, ddy, ccf = fut.result()
|
|
1675
|
+
dx[ii] = ddx
|
|
1676
|
+
dy[ii] = ddy
|
|
1677
|
+
conf[ii] = np.clip(ccf, 0.05, 1.0).astype(np.float32, copy=False)
|
|
1678
|
+
|
|
1679
|
+
done_ct += int(ii.size)
|
|
1680
|
+
if progress_cb:
|
|
1681
|
+
progress_cb(done_ct, n, "SSD Refine")
|
|
1682
|
+
|
|
1683
|
+
if cfg.track_mode == "surface":
|
|
1684
|
+
_print_surface_debug(dx=dx, dy=dy, conf=conf, coarse_conf=coarse_conf, floor=0.05, prefix="[SER][Surface]")
|
|
1685
|
+
|
|
1686
|
+
return AnalyzeResult(
|
|
1687
|
+
frames_total=n,
|
|
1688
|
+
roi_used=roi_used,
|
|
1689
|
+
track_mode=cfg.track_mode,
|
|
1690
|
+
quality=quality,
|
|
1691
|
+
dx=dx,
|
|
1692
|
+
dy=dy,
|
|
1693
|
+
conf=conf,
|
|
1694
|
+
order=order,
|
|
1695
|
+
ref_mode=ref_mode,
|
|
1696
|
+
ref_count=ref_count,
|
|
1697
|
+
ref_image=ref_img,
|
|
1698
|
+
ap_centers=ap_centers,
|
|
1699
|
+
ap_size=ap_size,
|
|
1700
|
+
ap_multiscale=use_multiscale,
|
|
1701
|
+
coarse_conf=coarse_conf,
|
|
1702
|
+
)
|
|
1703
|
+
|
|
1704
|
+
|
|
1705
|
+
def realign_ser(
|
|
1706
|
+
cfg: SERStackConfig,
|
|
1707
|
+
analysis: AnalyzeResult,
|
|
1708
|
+
*,
|
|
1709
|
+
debayer: bool = True,
|
|
1710
|
+
to_rgb: bool = False,
|
|
1711
|
+
max_dim: int = 512,
|
|
1712
|
+
progress_cb=None,
|
|
1713
|
+
bayer_pattern: Optional[str] = None,
|
|
1714
|
+
workers: Optional[int] = None,
|
|
1715
|
+
) -> AnalyzeResult:
|
|
1716
|
+
"""
|
|
1717
|
+
Recompute dx/dy/conf only using analysis.ref_image and analysis.ap_centers.
|
|
1718
|
+
Keeps quality/order/ref_image unchanged.
|
|
1719
|
+
|
|
1720
|
+
Surface mode:
|
|
1721
|
+
- recompute coarse drift (ref-locked) on roi_track
|
|
1722
|
+
- refine via AP search+refine FOLLOWING coarse + outlier rejection
|
|
1723
|
+
"""
|
|
1724
|
+
bpat = bayer_pattern or _cfg_bayer_pattern(cfg)
|
|
1725
|
+
|
|
1726
|
+
if analysis is None:
|
|
1727
|
+
raise ValueError("analysis is None")
|
|
1728
|
+
if analysis.ref_image is None:
|
|
1729
|
+
raise ValueError("analysis.ref_image is missing")
|
|
1730
|
+
|
|
1731
|
+
source_obj = _cfg_get_source(cfg)
|
|
1732
|
+
if not source_obj:
|
|
1733
|
+
raise ValueError("SERStackConfig.source/ser_path is empty")
|
|
1734
|
+
|
|
1735
|
+
n = int(analysis.frames_total)
|
|
1736
|
+
roi_used = analysis.roi_used
|
|
1737
|
+
ref_img = analysis.ref_image
|
|
1738
|
+
|
|
1739
|
+
if cfg.track_mode == "off" or cv2 is None:
|
|
1740
|
+
analysis.dx = np.zeros((n,), dtype=np.float32)
|
|
1741
|
+
analysis.dy = np.zeros((n,), dtype=np.float32)
|
|
1742
|
+
analysis.conf = np.ones((n,), dtype=np.float32)
|
|
1743
|
+
if hasattr(analysis, "coarse_conf"):
|
|
1744
|
+
analysis.coarse_conf = None
|
|
1745
|
+
return analysis
|
|
1746
|
+
|
|
1747
|
+
# Ensure AP centers exist
|
|
1748
|
+
ap_centers = getattr(analysis, "ap_centers", None)
|
|
1749
|
+
if ap_centers is None or np.asarray(ap_centers).size == 0:
|
|
1750
|
+
ap_centers = _autoplace_aps(
|
|
1751
|
+
ref_img,
|
|
1752
|
+
ap_size=int(getattr(cfg, "ap_size", 64)),
|
|
1753
|
+
ap_spacing=int(getattr(cfg, "ap_spacing", 48)),
|
|
1754
|
+
ap_min_mean=float(getattr(cfg, "ap_min_mean", 0.03)),
|
|
1755
|
+
)
|
|
1756
|
+
analysis.ap_centers = ap_centers
|
|
1757
|
+
|
|
1758
|
+
if workers is None:
|
|
1759
|
+
cpu = os.cpu_count() or 4
|
|
1760
|
+
workers = max(1, min(cpu, 48))
|
|
1761
|
+
|
|
1762
|
+
if cv2 is not None:
|
|
1763
|
+
try:
|
|
1764
|
+
cv2.setNumThreads(1)
|
|
1765
|
+
except Exception:
|
|
1766
|
+
pass
|
|
1767
|
+
|
|
1768
|
+
# Need meta for ROI expansion (surface tracking)
|
|
1769
|
+
src0, owns0 = _ensure_source(source_obj, cache_items=2)
|
|
1770
|
+
try:
|
|
1771
|
+
meta = src0.meta
|
|
1772
|
+
src_w = int(meta.width)
|
|
1773
|
+
src_h = int(meta.height)
|
|
1774
|
+
finally:
|
|
1775
|
+
if owns0:
|
|
1776
|
+
try:
|
|
1777
|
+
src0.close()
|
|
1778
|
+
except Exception:
|
|
1779
|
+
pass
|
|
1780
|
+
|
|
1781
|
+
def _surface_tracking_roi() -> Optional[Tuple[int, int, int, int]]:
|
|
1782
|
+
if roi_used is None:
|
|
1783
|
+
return None
|
|
1784
|
+
margin = int(getattr(cfg, "surface_track_margin", 256))
|
|
1785
|
+
x, y, w, h = [int(v) for v in roi_used]
|
|
1786
|
+
x0 = max(0, x - margin)
|
|
1787
|
+
y0 = max(0, y - margin)
|
|
1788
|
+
x1 = min(src_w, x + w + margin)
|
|
1789
|
+
y1 = min(src_h, y + h + margin)
|
|
1790
|
+
return _clamp_roi_in_bounds((x0, y0, x1 - x0, y1 - y0), src_w, src_h)
|
|
1791
|
+
|
|
1792
|
+
roi_track = _surface_tracking_roi() if cfg.track_mode == "surface" else roi_used
|
|
1793
|
+
|
|
1794
|
+
# ---- chunked refine ----
|
|
1795
|
+
idxs2 = np.arange(n, dtype=np.int32)
|
|
1796
|
+
|
|
1797
|
+
# More/smaller chunks => progress updates sooner (futures complete more frequently)
|
|
1798
|
+
chunk_factor = int(getattr(cfg, "progress_chunk_factor", 5)) # optional knob
|
|
1799
|
+
min_chunks = 5
|
|
1800
|
+
n_chunks2 = max(min_chunks, int(workers) * chunk_factor)
|
|
1801
|
+
n_chunks2 = max(1, min(int(n), n_chunks2))
|
|
1802
|
+
|
|
1803
|
+
chunks2 = np.array_split(idxs2, n_chunks2)
|
|
1804
|
+
|
|
1805
|
+
dx = np.zeros((n,), dtype=np.float32)
|
|
1806
|
+
dy = np.zeros((n,), dtype=np.float32)
|
|
1807
|
+
conf = np.ones((n,), dtype=np.float32)
|
|
1808
|
+
|
|
1809
|
+
ref_m = _to_mono01(ref_img).astype(np.float32, copy=False)
|
|
1810
|
+
|
|
1811
|
+
ap_size = int(getattr(cfg, "ap_size", 64) or 64)
|
|
1812
|
+
use_multiscale = bool(getattr(cfg, "ap_multiscale", False))
|
|
1813
|
+
|
|
1814
|
+
coarse_conf: Optional[np.ndarray] = None
|
|
1815
|
+
if cfg.track_mode == "surface":
|
|
1816
|
+
coarse_conf = np.zeros((n,), dtype=np.float32)
|
|
1817
|
+
if progress_cb:
|
|
1818
|
+
progress_cb(0, n, "Surface: coarse drift (ref-locked NCC+subpix)…")
|
|
1819
|
+
|
|
1820
|
+
dx_chain, dy_chain, cc_chain = _coarse_surface_ref_locked(
|
|
1821
|
+
source_obj,
|
|
1822
|
+
n=n,
|
|
1823
|
+
roi=roi_track,
|
|
1824
|
+
roi_used=roi_used, # ✅ NEW
|
|
1825
|
+
debayer=debayer,
|
|
1826
|
+
to_rgb=to_rgb,
|
|
1827
|
+
bayer_pattern=bpat,
|
|
1828
|
+
progress_cb=progress_cb,
|
|
1829
|
+
progress_every=25,
|
|
1830
|
+
down=2,
|
|
1831
|
+
template_size=256,
|
|
1832
|
+
search_radius=96,
|
|
1833
|
+
bandpass=True,
|
|
1834
|
+
workers=min(workers, 8), # coarse doesn’t need 48; 4–8 is usually ideal
|
|
1835
|
+
stride=16, # 8–32 typical
|
|
1836
|
+
)
|
|
1837
|
+
|
|
1838
|
+
dx[:] = dx_chain
|
|
1839
|
+
dy[:] = dy_chain
|
|
1840
|
+
coarse_conf[:] = cc_chain
|
|
1841
|
+
|
|
1842
|
+
if progress_cb:
|
|
1843
|
+
progress_cb(0, n, "SSD Refine")
|
|
1844
|
+
|
|
1845
|
+
if cfg.track_mode == "surface":
|
|
1846
|
+
def _shift_chunk(chunk: np.ndarray):
|
|
1847
|
+
out_i: list[int] = []
|
|
1848
|
+
out_dx: list[float] = []
|
|
1849
|
+
out_dy: list[float] = []
|
|
1850
|
+
out_cf: list[float] = []
|
|
1851
|
+
out_cc: list[float] = []
|
|
1852
|
+
|
|
1853
|
+
src, owns = _ensure_source(source_obj, cache_items=0)
|
|
1854
|
+
try:
|
|
1855
|
+
for i in chunk.tolist():
|
|
1856
|
+
img = _get_frame(
|
|
1857
|
+
src, int(i),
|
|
1858
|
+
roi=roi_used,
|
|
1859
|
+
debayer=debayer,
|
|
1860
|
+
to_float01=True,
|
|
1861
|
+
force_rgb=bool(to_rgb),
|
|
1862
|
+
bayer_pattern=bpat,
|
|
1863
|
+
)
|
|
1864
|
+
cur_m = _to_mono01(img).astype(np.float32, copy=False)
|
|
1865
|
+
|
|
1866
|
+
coarse_dx = float(dx[int(i)])
|
|
1867
|
+
coarse_dy = float(dy[int(i)])
|
|
1868
|
+
cc = float(coarse_conf[int(i)]) if coarse_conf is not None else 0.5
|
|
1869
|
+
|
|
1870
|
+
# Apply coarse shift first
|
|
1871
|
+
cur_m_g = _shift_image(cur_m, coarse_dx, coarse_dy)
|
|
1872
|
+
|
|
1873
|
+
if use_multiscale:
|
|
1874
|
+
s2, s1, s05 = _scaled_ap_sizes(ap_size)
|
|
1875
|
+
|
|
1876
|
+
def _one_scale(s_ap: int):
|
|
1877
|
+
rdx, rdy, resp = _ap_phase_shifts_per_ap(
|
|
1878
|
+
ref_m, cur_m_g,
|
|
1879
|
+
ap_centers=ap_centers,
|
|
1880
|
+
ap_size=s_ap,
|
|
1881
|
+
max_dim=max_dim,
|
|
1882
|
+
)
|
|
1883
|
+
cf = np.clip(resp.astype(np.float32, copy=False), 0.0, 1.0)
|
|
1884
|
+
keep = _reject_ap_outliers(rdx, rdy, cf, z=3.5)
|
|
1885
|
+
if not np.any(keep):
|
|
1886
|
+
return 0.0, 0.0, 0.25
|
|
1887
|
+
return (
|
|
1888
|
+
float(np.median(rdx[keep])),
|
|
1889
|
+
float(np.median(rdy[keep])),
|
|
1890
|
+
float(np.median(cf[keep])),
|
|
1891
|
+
)
|
|
1892
|
+
|
|
1893
|
+
dx2, dy2, cf2 = _one_scale(s2)
|
|
1894
|
+
dx1, dy1, cf1 = _one_scale(s1)
|
|
1895
|
+
dx0, dy0, cf0 = _one_scale(s05)
|
|
1896
|
+
|
|
1897
|
+
w2 = max(1e-3, float(cf2)) * 1.25
|
|
1898
|
+
w1 = max(1e-3, float(cf1)) * 1.00
|
|
1899
|
+
w0 = max(1e-3, float(cf0)) * 0.85
|
|
1900
|
+
wsum = (w2 + w1 + w0)
|
|
1901
|
+
|
|
1902
|
+
dx_res = (w2 * dx2 + w1 * dx1 + w0 * dx0) / wsum
|
|
1903
|
+
dy_res = (w2 * dy2 + w1 * dy1 + w0 * dy0) / wsum
|
|
1904
|
+
cf_ap = float(np.clip((w2 * cf2 + w1 * cf1 + w0 * cf0) / wsum, 0.0, 1.0))
|
|
1905
|
+
else:
|
|
1906
|
+
rdx, rdy, resp = _ap_phase_shifts_per_ap(
|
|
1907
|
+
ref_m, cur_m_g,
|
|
1908
|
+
ap_centers=ap_centers,
|
|
1909
|
+
ap_size=ap_size,
|
|
1910
|
+
max_dim=max_dim,
|
|
1911
|
+
)
|
|
1912
|
+
cf = np.clip(resp.astype(np.float32, copy=False), 0.0, 1.0)
|
|
1913
|
+
keep = _reject_ap_outliers(rdx, rdy, cf, z=3.5)
|
|
1914
|
+
if np.any(keep):
|
|
1915
|
+
dx_res = float(np.median(rdx[keep]))
|
|
1916
|
+
dy_res = float(np.median(rdy[keep]))
|
|
1917
|
+
cf_ap = float(np.median(cf[keep]))
|
|
1918
|
+
else:
|
|
1919
|
+
dx_res, dy_res, cf_ap = 0.0, 0.0, 0.25
|
|
1920
|
+
|
|
1921
|
+
# Final = coarse + residual (residual is relative to coarse-shifted frame)
|
|
1922
|
+
dx_i = float(coarse_dx + dx_res)
|
|
1923
|
+
dy_i = float(coarse_dy + dy_res)
|
|
1924
|
+
|
|
1925
|
+
# Final lock-in refinement: minimize (ref-cur)^2 on gradients in a tiny window
|
|
1926
|
+
# NOTE: pass *unshifted* cur_m with the current dx_i/dy_i estimate
|
|
1927
|
+
dxr, dyr, c_ssd = _refine_shift_ssd(ref_m, cur_m, dx_i, dy_i, radius=5, crop=0.80, bruteforce=bool(getattr(cfg, "ssd_refine_bruteforce", False)))
|
|
1928
|
+
dx_i += float(dxr)
|
|
1929
|
+
dy_i += float(dyr)
|
|
1930
|
+
|
|
1931
|
+
# Confidence: combine coarse + AP, then optionally nudge with SSD
|
|
1932
|
+
cc = float(coarse_conf[int(i)]) if coarse_conf is not None else 0.5
|
|
1933
|
+
cf_i = float(np.clip(0.60 * cc + 0.40 * float(cf_ap), 0.0, 1.0))
|
|
1934
|
+
cf_i = float(np.clip(0.85 * cf_i + 0.15 * float(c_ssd), 0.05, 1.0))
|
|
1935
|
+
|
|
1936
|
+
|
|
1937
|
+
out_i.append(int(i))
|
|
1938
|
+
out_dx.append(dx_i)
|
|
1939
|
+
out_dy.append(dy_i)
|
|
1940
|
+
out_cf.append(cf_i)
|
|
1941
|
+
out_cc.append(float(cc))
|
|
1942
|
+
finally:
|
|
1943
|
+
if owns:
|
|
1944
|
+
try:
|
|
1945
|
+
src.close()
|
|
1946
|
+
except Exception:
|
|
1947
|
+
pass
|
|
1948
|
+
|
|
1949
|
+
return (
|
|
1950
|
+
np.asarray(out_i, np.int32),
|
|
1951
|
+
np.asarray(out_dx, np.float32),
|
|
1952
|
+
np.asarray(out_dy, np.float32),
|
|
1953
|
+
np.asarray(out_cf, np.float32),
|
|
1954
|
+
np.asarray(out_cc, np.float32),
|
|
1955
|
+
)
|
|
1956
|
+
|
|
1957
|
+
else:
|
|
1958
|
+
# planetary: centroid tracking (same as viewer)
|
|
1959
|
+
tracker = PlanetaryTracker(
|
|
1960
|
+
smooth_sigma=float(getattr(cfg, "planet_smooth_sigma", smooth_sigma)),
|
|
1961
|
+
thresh_pct=float(getattr(cfg, "planet_thresh_pct", thresh_pct)),
|
|
1962
|
+
min_val=float(getattr(cfg, "planet_min_val", 0.02)),
|
|
1963
|
+
use_norm=bool(getattr(cfg, "planet_use_norm", False)),
|
|
1964
|
+
norm_hi_pct=float(getattr(cfg, "planet_norm_hi_pct", 99.5)),
|
|
1965
|
+
)
|
|
1966
|
+
|
|
1967
|
+
# Reference center comes from analysis.ref_image (same anchor as analyze_ser)
|
|
1968
|
+
ref_cx, ref_cy, ref_cc = tracker.compute_center(ref_img)
|
|
1969
|
+
if ref_cc <= 0.0:
|
|
1970
|
+
mref = _to_mono01(ref_img)
|
|
1971
|
+
ref_cx = float(mref.shape[1] * 0.5)
|
|
1972
|
+
ref_cy = float(mref.shape[0] * 0.5)
|
|
1973
|
+
|
|
1974
|
+
ref_center = (float(ref_cx), float(ref_cy))
|
|
1975
|
+
ref_m_full = _to_mono01(ref_img).astype(np.float32, copy=False)
|
|
1976
|
+
|
|
1977
|
+
def _shift_chunk(chunk: np.ndarray):
|
|
1978
|
+
out_i: list[int] = []
|
|
1979
|
+
out_dx: list[float] = []
|
|
1980
|
+
out_dy: list[float] = []
|
|
1981
|
+
out_cf: list[float] = []
|
|
1982
|
+
|
|
1983
|
+
src, owns = _ensure_source(source_obj, cache_items=0)
|
|
1984
|
+
try:
|
|
1985
|
+
for i in chunk.tolist():
|
|
1986
|
+
img = _get_frame(
|
|
1987
|
+
src, int(i),
|
|
1988
|
+
roi=roi_used,
|
|
1989
|
+
debayer=debayer,
|
|
1990
|
+
to_float01=True,
|
|
1991
|
+
force_rgb=bool(to_rgb),
|
|
1992
|
+
bayer_pattern=bpat,
|
|
1993
|
+
)
|
|
1994
|
+
|
|
1995
|
+
dx_i, dy_i, cf_i = tracker.shift_to_ref(img, ref_center)
|
|
1996
|
+
|
|
1997
|
+
if float(cf_i) >= 0.25:
|
|
1998
|
+
cur_m = _to_mono01(img).astype(np.float32, copy=False)
|
|
1999
|
+
dxr, dyr, c_ssd = _refine_shift_ssd(ref_m_full, cur_m, float(dx_i), float(dy_i), radius=2, crop=0.80, bruteforce=bool(getattr(cfg, "ssd_refine_bruteforce", False)))
|
|
2000
|
+
dx_i = float(dx_i) + dxr
|
|
2001
|
+
dy_i = float(dy_i) + dyr
|
|
2002
|
+
cf_i = float(np.clip(0.85 * float(cf_i) + 0.15 * c_ssd, 0.05, 1.0))
|
|
2003
|
+
out_i.append(int(i))
|
|
2004
|
+
out_dx.append(float(dx_i))
|
|
2005
|
+
out_dy.append(float(dy_i))
|
|
2006
|
+
out_cf.append(float(cf_i))
|
|
2007
|
+
|
|
2008
|
+
finally:
|
|
2009
|
+
if owns:
|
|
2010
|
+
try:
|
|
2011
|
+
src.close()
|
|
2012
|
+
except Exception:
|
|
2013
|
+
pass
|
|
2014
|
+
|
|
2015
|
+
return (
|
|
2016
|
+
np.asarray(out_i, np.int32),
|
|
2017
|
+
np.asarray(out_dx, np.float32),
|
|
2018
|
+
np.asarray(out_dy, np.float32),
|
|
2019
|
+
np.asarray(out_cf, np.float32),
|
|
2020
|
+
)
|
|
2021
|
+
|
|
2022
|
+
|
|
2023
|
+
done_ct = 0
|
|
2024
|
+
with ThreadPoolExecutor(max_workers=workers) as ex:
|
|
2025
|
+
futs = [ex.submit(_shift_chunk, c) for c in chunks2 if c.size > 0]
|
|
2026
|
+
for fut in as_completed(futs):
|
|
2027
|
+
if cfg.track_mode == "surface":
|
|
2028
|
+
ii, ddx, ddy, ccf, ccc = fut.result()
|
|
2029
|
+
if coarse_conf is not None:
|
|
2030
|
+
coarse_conf[ii] = ccc
|
|
2031
|
+
else:
|
|
2032
|
+
ii, ddx, ddy, ccf = fut.result()
|
|
2033
|
+
|
|
2034
|
+
dx[ii] = ddx
|
|
2035
|
+
dy[ii] = ddy
|
|
2036
|
+
conf[ii] = np.clip(ccf, 0.05, 1.0).astype(np.float32, copy=False)
|
|
2037
|
+
|
|
2038
|
+
done_ct += int(ii.size)
|
|
2039
|
+
if progress_cb:
|
|
2040
|
+
progress_cb(done_ct, n, "SSD Refine")
|
|
2041
|
+
|
|
2042
|
+
analysis.dx = dx
|
|
2043
|
+
analysis.dy = dy
|
|
2044
|
+
analysis.conf = conf
|
|
2045
|
+
if hasattr(analysis, "coarse_conf"):
|
|
2046
|
+
analysis.coarse_conf = coarse_conf
|
|
2047
|
+
|
|
2048
|
+
if cfg.track_mode == "surface":
|
|
2049
|
+
_print_surface_debug(dx=dx, dy=dy, conf=conf, coarse_conf=coarse_conf, floor=0.05, prefix="[SER][Surface][realign]")
|
|
2050
|
+
|
|
2051
|
+
return analysis
|
|
2052
|
+
|
|
2053
|
+
def _autoplace_aps(ref_img01: np.ndarray, ap_size: int, ap_spacing: int, ap_min_mean: float) -> np.ndarray:
|
|
2054
|
+
"""
|
|
2055
|
+
Return AP centers as int32 array of shape (M,2) with columns (cx, cy) in ROI coords.
|
|
2056
|
+
We grid-scan by spacing and keep patches whose mean brightness exceeds ap_min_mean.
|
|
2057
|
+
"""
|
|
2058
|
+
m = _to_mono01(ref_img01).astype(np.float32, copy=False)
|
|
2059
|
+
H, W = m.shape[:2]
|
|
2060
|
+
s = int(max(16, ap_size))
|
|
2061
|
+
step = int(max(4, ap_spacing))
|
|
2062
|
+
|
|
2063
|
+
half = s // 2
|
|
2064
|
+
xs = list(range(half, max(half + 1, W - half), step))
|
|
2065
|
+
ys = list(range(half, max(half + 1, H - half), step))
|
|
2066
|
+
|
|
2067
|
+
pts = []
|
|
2068
|
+
for cy in ys:
|
|
2069
|
+
y0 = cy - half
|
|
2070
|
+
y1 = y0 + s
|
|
2071
|
+
if y0 < 0 or y1 > H:
|
|
2072
|
+
continue
|
|
2073
|
+
for cx in xs:
|
|
2074
|
+
x0 = cx - half
|
|
2075
|
+
x1 = x0 + s
|
|
2076
|
+
if x0 < 0 or x1 > W:
|
|
2077
|
+
continue
|
|
2078
|
+
patch = m[y0:y1, x0:x1]
|
|
2079
|
+
if float(patch.mean()) >= float(ap_min_mean):
|
|
2080
|
+
pts.append((cx, cy))
|
|
2081
|
+
|
|
2082
|
+
if not pts:
|
|
2083
|
+
# absolute fallback: a single center point (behaves like single-point)
|
|
2084
|
+
pts = [(W // 2, H // 2)]
|
|
2085
|
+
|
|
2086
|
+
return np.asarray(pts, dtype=np.int32)
|
|
2087
|
+
|
|
2088
|
+
def _scaled_ap_sizes(base: int) -> tuple[int, int, int]:
|
|
2089
|
+
b = int(base)
|
|
2090
|
+
s2 = int(round(b * 2.0))
|
|
2091
|
+
s1 = int(round(b * 1.0))
|
|
2092
|
+
s05 = int(round(b * 0.5))
|
|
2093
|
+
# clamp to sane limits
|
|
2094
|
+
s2 = max(16, min(256, s2))
|
|
2095
|
+
s1 = max(16, min(256, s1))
|
|
2096
|
+
s05 = max(16, min(256, s05))
|
|
2097
|
+
return s2, s1, s05
|
|
2098
|
+
|
|
2099
|
+
def _dense_field_from_ap_shifts(
|
|
2100
|
+
H: int, W: int,
|
|
2101
|
+
ap_centers: np.ndarray, # (M,2)
|
|
2102
|
+
ap_dx: np.ndarray, # (M,)
|
|
2103
|
+
ap_dy: np.ndarray, # (M,)
|
|
2104
|
+
ap_cf: np.ndarray, # (M,)
|
|
2105
|
+
*,
|
|
2106
|
+
grid: int = 32, # coarse grid resolution (32 or 48 are good)
|
|
2107
|
+
power: float = 2.0,
|
|
2108
|
+
conf_floor: float = 0.15,
|
|
2109
|
+
radius: float | None = None, # optional clamp in pixels (ROI coords)
|
|
2110
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
2111
|
+
"""
|
|
2112
|
+
Returns dense (dx_field, dy_field) as float32 arrays (H,W) in ROI pixels.
|
|
2113
|
+
Computed on coarse grid then upsampled.
|
|
2114
|
+
"""
|
|
2115
|
+
# coarse grid points
|
|
2116
|
+
gh = max(4, int(grid))
|
|
2117
|
+
gw = max(4, int(round(grid * (W / max(1, H)))))
|
|
2118
|
+
|
|
2119
|
+
ys = np.linspace(0, H - 1, gh, dtype=np.float32)
|
|
2120
|
+
xs = np.linspace(0, W - 1, gw, dtype=np.float32)
|
|
2121
|
+
gx, gy = np.meshgrid(xs, ys) # (gh,gw)
|
|
2122
|
+
|
|
2123
|
+
pts = ap_centers.astype(np.float32)
|
|
2124
|
+
px = pts[:, 0].reshape(-1, 1, 1) # (M,1,1)
|
|
2125
|
+
py = pts[:, 1].reshape(-1, 1, 1) # (M,1,1)
|
|
2126
|
+
|
|
2127
|
+
cf = np.maximum(ap_cf.astype(np.float32), 0.0)
|
|
2128
|
+
good = cf >= float(conf_floor)
|
|
2129
|
+
|
|
2130
|
+
if not np.any(good):
|
|
2131
|
+
dxg = np.zeros((gh, gw), np.float32)
|
|
2132
|
+
dyg = np.zeros((gh, gw), np.float32)
|
|
2133
|
+
else:
|
|
2134
|
+
px = px[good]
|
|
2135
|
+
py = py[good]
|
|
2136
|
+
dx = ap_dx[good].astype(np.float32).reshape(-1, 1, 1)
|
|
2137
|
+
dy = ap_dy[good].astype(np.float32).reshape(-1, 1, 1)
|
|
2138
|
+
cw = cf[good].astype(np.float32).reshape(-1, 1, 1)
|
|
2139
|
+
|
|
2140
|
+
dxp = px - gx[None, :, :] # (M,gh,gw)
|
|
2141
|
+
dyp = py - gy[None, :, :] # (M,gh,gw)
|
|
2142
|
+
d2 = dxp * dxp + dyp * dyp # (M,gh,gw)
|
|
2143
|
+
|
|
2144
|
+
if radius is not None:
|
|
2145
|
+
r2 = float(radius) * float(radius)
|
|
2146
|
+
far = d2 > r2
|
|
2147
|
+
else:
|
|
2148
|
+
far = None
|
|
2149
|
+
|
|
2150
|
+
w = 1.0 / np.maximum(d2, 1.0) ** (power * 0.5)
|
|
2151
|
+
w *= cw
|
|
2152
|
+
|
|
2153
|
+
if far is not None:
|
|
2154
|
+
w = np.where(far, 0.0, w)
|
|
2155
|
+
|
|
2156
|
+
wsum = np.sum(w, axis=0) # (gh,gw)
|
|
2157
|
+
|
|
2158
|
+
dxg = np.sum(w * dx, axis=0) / np.maximum(wsum, 1e-6)
|
|
2159
|
+
dyg = np.sum(w * dy, axis=0) / np.maximum(wsum, 1e-6)
|
|
2160
|
+
|
|
2161
|
+
|
|
2162
|
+
# upsample to full res
|
|
2163
|
+
dx_field = cv2.resize(dxg, (W, H), interpolation=cv2.INTER_CUBIC).astype(np.float32, copy=False)
|
|
2164
|
+
dy_field = cv2.resize(dyg, (W, H), interpolation=cv2.INTER_CUBIC).astype(np.float32, copy=False)
|
|
2165
|
+
return dx_field, dy_field
|
|
2166
|
+
|
|
2167
|
+
def _warp_by_dense_field(img01: np.ndarray, dx_field: np.ndarray, dy_field: np.ndarray) -> np.ndarray:
|
|
2168
|
+
"""
|
|
2169
|
+
img01 (H,W) or (H,W,3)
|
|
2170
|
+
dx_field/dy_field are (H,W) in pixels: shifting cur by (dx,dy) aligns to ref.
|
|
2171
|
+
"""
|
|
2172
|
+
H, W = dx_field.shape
|
|
2173
|
+
# remap wants map_x/map_y = source sampling coordinates
|
|
2174
|
+
# If we want output aligned-to-ref, we sample from cur at (x - dx, y - dy)
|
|
2175
|
+
xs, ys = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32))
|
|
2176
|
+
map_x = xs - dx_field
|
|
2177
|
+
map_y = ys - dy_field
|
|
2178
|
+
|
|
2179
|
+
if img01.ndim == 2:
|
|
2180
|
+
return cv2.remap(img01, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
|
|
2181
|
+
else:
|
|
2182
|
+
return cv2.remap(img01, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
|
|
2183
|
+
|
|
2184
|
+
def _ap_phase_shift(
|
|
2185
|
+
ref_m: np.ndarray,
|
|
2186
|
+
cur_m: np.ndarray,
|
|
2187
|
+
ap_centers: np.ndarray,
|
|
2188
|
+
ap_size: int,
|
|
2189
|
+
max_dim: int,
|
|
2190
|
+
) -> tuple[float, float, float]:
|
|
2191
|
+
"""
|
|
2192
|
+
Compute a robust global shift from multiple local AP shifts.
|
|
2193
|
+
Returns (dx, dy, conf) in ROI pixel units.
|
|
2194
|
+
conf is median of per-AP phase correlation responses.
|
|
2195
|
+
"""
|
|
2196
|
+
s = int(max(16, ap_size))
|
|
2197
|
+
half = s // 2
|
|
2198
|
+
|
|
2199
|
+
H, W = ref_m.shape[:2]
|
|
2200
|
+
dxs = []
|
|
2201
|
+
dys = []
|
|
2202
|
+
resps = []
|
|
2203
|
+
|
|
2204
|
+
# downsample reference patches once per AP? (fast enough as-is; M is usually modest)
|
|
2205
|
+
for (cx, cy) in ap_centers.tolist():
|
|
2206
|
+
x0 = cx - half
|
|
2207
|
+
y0 = cy - half
|
|
2208
|
+
x1 = x0 + s
|
|
2209
|
+
y1 = y0 + s
|
|
2210
|
+
if x0 < 0 or y0 < 0 or x1 > W or y1 > H:
|
|
2211
|
+
continue
|
|
2212
|
+
|
|
2213
|
+
ref_patch = ref_m[y0:y1, x0:x1]
|
|
2214
|
+
cur_patch = cur_m[y0:y1, x0:x1]
|
|
2215
|
+
|
|
2216
|
+
rp = _downsample_mono01(ref_patch, max_dim=max_dim)
|
|
2217
|
+
cp = _downsample_mono01(cur_patch, max_dim=max_dim)
|
|
2218
|
+
|
|
2219
|
+
if rp.shape != cp.shape:
|
|
2220
|
+
cp = cv2.resize(cp, (rp.shape[1], rp.shape[0]), interpolation=cv2.INTER_AREA)
|
|
2221
|
+
|
|
2222
|
+
sdx, sdy, resp = _phase_corr_shift(rp, cp)
|
|
2223
|
+
|
|
2224
|
+
# scale back to ROI pixels (patch pixels -> ROI pixels)
|
|
2225
|
+
sx = float(s) / float(rp.shape[1])
|
|
2226
|
+
sy = float(s) / float(rp.shape[0])
|
|
2227
|
+
|
|
2228
|
+
dxs.append(float(sdx * sx))
|
|
2229
|
+
dys.append(float(sdy * sy))
|
|
2230
|
+
resps.append(float(resp))
|
|
2231
|
+
|
|
2232
|
+
if not dxs:
|
|
2233
|
+
return 0.0, 0.0, 0.5
|
|
2234
|
+
|
|
2235
|
+
dx_med = float(np.median(np.asarray(dxs, np.float32)))
|
|
2236
|
+
dy_med = float(np.median(np.asarray(dys, np.float32)))
|
|
2237
|
+
conf = float(np.median(np.asarray(resps, np.float32)))
|
|
2238
|
+
|
|
2239
|
+
return dx_med, dy_med, conf
|
|
2240
|
+
|
|
2241
|
+
def _ap_phase_shifts_per_ap(
|
|
2242
|
+
ref_m: np.ndarray,
|
|
2243
|
+
cur_m: np.ndarray,
|
|
2244
|
+
ap_centers: np.ndarray,
|
|
2245
|
+
ap_size: int,
|
|
2246
|
+
max_dim: int,
|
|
2247
|
+
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
2248
|
+
"""
|
|
2249
|
+
Per-AP phase correlation shifts (NO SEARCH).
|
|
2250
|
+
Returns arrays (ap_dx, ap_dy, ap_resp) in ROI pixels, where shifting cur by (dx,dy)
|
|
2251
|
+
aligns it to ref for each AP.
|
|
2252
|
+
"""
|
|
2253
|
+
s = int(max(16, ap_size))
|
|
2254
|
+
half = s // 2
|
|
2255
|
+
|
|
2256
|
+
H, W = ref_m.shape[:2]
|
|
2257
|
+
M = int(ap_centers.shape[0])
|
|
2258
|
+
|
|
2259
|
+
ap_dx = np.zeros((M,), np.float32)
|
|
2260
|
+
ap_dy = np.zeros((M,), np.float32)
|
|
2261
|
+
ap_resp = np.zeros((M,), np.float32)
|
|
2262
|
+
|
|
2263
|
+
if cv2 is None or M == 0:
|
|
2264
|
+
ap_resp[:] = 0.5
|
|
2265
|
+
return ap_dx, ap_dy, ap_resp
|
|
2266
|
+
|
|
2267
|
+
for j, (cx, cy) in enumerate(ap_centers.tolist()):
|
|
2268
|
+
x0 = int(cx - half)
|
|
2269
|
+
y0 = int(cy - half)
|
|
2270
|
+
x1 = x0 + s
|
|
2271
|
+
y1 = y0 + s
|
|
2272
|
+
if x0 < 0 or y0 < 0 or x1 > W or y1 > H:
|
|
2273
|
+
ap_resp[j] = 0.0
|
|
2274
|
+
continue
|
|
2275
|
+
|
|
2276
|
+
ref_patch = ref_m[y0:y1, x0:x1]
|
|
2277
|
+
cur_patch = cur_m[y0:y1, x0:x1]
|
|
2278
|
+
|
|
2279
|
+
rp = _downsample_mono01(ref_patch, max_dim=max_dim)
|
|
2280
|
+
cp = _downsample_mono01(cur_patch, max_dim=max_dim)
|
|
2281
|
+
|
|
2282
|
+
if rp.shape != cp.shape and cv2 is not None:
|
|
2283
|
+
cp = cv2.resize(cp, (rp.shape[1], rp.shape[0]), interpolation=cv2.INTER_AREA)
|
|
2284
|
+
|
|
2285
|
+
sdx, sdy, resp = _phase_corr_shift(rp, cp)
|
|
2286
|
+
|
|
2287
|
+
# scale to ROI pixels
|
|
2288
|
+
sx = float(s) / float(rp.shape[1])
|
|
2289
|
+
sy = float(s) / float(rp.shape[0])
|
|
2290
|
+
|
|
2291
|
+
ap_dx[j] = float(sdx * sx)
|
|
2292
|
+
ap_dy[j] = float(sdy * sy)
|
|
2293
|
+
ap_resp[j] = float(resp)
|
|
2294
|
+
|
|
2295
|
+
return ap_dx, ap_dy, ap_resp
|
|
2296
|
+
|
|
2297
|
+
|
|
2298
|
+
def _ap_phase_shift_multiscale(
|
|
2299
|
+
ref_m: np.ndarray,
|
|
2300
|
+
cur_m: np.ndarray,
|
|
2301
|
+
ap_centers: np.ndarray,
|
|
2302
|
+
base_ap_size: int,
|
|
2303
|
+
max_dim: int,
|
|
2304
|
+
) -> tuple[float, float, float]:
|
|
2305
|
+
"""
|
|
2306
|
+
Multi-scale AP shift:
|
|
2307
|
+
- compute shifts at 2×, 1×, ½× AP sizes using same centers
|
|
2308
|
+
- combine using confidence weights (favoring coarser slightly)
|
|
2309
|
+
Returns (dx, dy, conf) in ROI pixels.
|
|
2310
|
+
"""
|
|
2311
|
+
s2, s1, s05 = _scaled_ap_sizes(base_ap_size)
|
|
2312
|
+
|
|
2313
|
+
dx2, dy2, cf2 = _ap_phase_shift(ref_m, cur_m, ap_centers, s2, max_dim)
|
|
2314
|
+
dx1, dy1, cf1 = _ap_phase_shift(ref_m, cur_m, ap_centers, s1, max_dim)
|
|
2315
|
+
dx0, dy0, cf0 = _ap_phase_shift(ref_m, cur_m, ap_centers, s05, max_dim)
|
|
2316
|
+
|
|
2317
|
+
# weights: confidence * slight preference for larger scale (stability)
|
|
2318
|
+
w2 = max(1e-3, float(cf2)) * 1.25
|
|
2319
|
+
w1 = max(1e-3, float(cf1)) * 1.00
|
|
2320
|
+
w0 = max(1e-3, float(cf0)) * 0.85
|
|
2321
|
+
|
|
2322
|
+
wsum = (w2 + w1 + w0)
|
|
2323
|
+
dx = (w2 * dx2 + w1 * dx1 + w0 * dx0) / wsum
|
|
2324
|
+
dy = (w2 * dy2 + w1 * dy1 + w0 * dy0) / wsum
|
|
2325
|
+
conf = float(np.clip((w2 * cf2 + w1 * cf1 + w0 * cf0) / wsum, 0.0, 1.0))
|
|
2326
|
+
|
|
2327
|
+
return float(dx), float(dy), float(conf)
|