setiastrosuitepro 1.6.4__py3-none-any.whl → 1.7.1.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of setiastrosuitepro might be problematic. Click here for more details.

Files changed (132) hide show
  1. setiastro/images/TextureClarity.svg +56 -0
  2. setiastro/images/abeicon.svg +16 -0
  3. setiastro/images/acv_icon.png +0 -0
  4. setiastro/images/colorwheel.svg +97 -0
  5. setiastro/images/cosmic.svg +40 -0
  6. setiastro/images/cosmicsat.svg +24 -0
  7. setiastro/images/first_quarter.png +0 -0
  8. setiastro/images/full_moon.png +0 -0
  9. setiastro/images/graxpert.svg +19 -0
  10. setiastro/images/last_quarter.png +0 -0
  11. setiastro/images/linearfit.svg +32 -0
  12. setiastro/images/narrowbandnormalization.png +0 -0
  13. setiastro/images/new_moon.png +0 -0
  14. setiastro/images/pixelmath.svg +42 -0
  15. setiastro/images/planetarystacker.png +0 -0
  16. setiastro/images/waning_crescent_1.png +0 -0
  17. setiastro/images/waning_crescent_2.png +0 -0
  18. setiastro/images/waning_crescent_3.png +0 -0
  19. setiastro/images/waning_crescent_4.png +0 -0
  20. setiastro/images/waning_crescent_5.png +0 -0
  21. setiastro/images/waning_gibbous_1.png +0 -0
  22. setiastro/images/waning_gibbous_2.png +0 -0
  23. setiastro/images/waning_gibbous_3.png +0 -0
  24. setiastro/images/waning_gibbous_4.png +0 -0
  25. setiastro/images/waning_gibbous_5.png +0 -0
  26. setiastro/images/waxing_crescent_1.png +0 -0
  27. setiastro/images/waxing_crescent_2.png +0 -0
  28. setiastro/images/waxing_crescent_3.png +0 -0
  29. setiastro/images/waxing_crescent_4.png +0 -0
  30. setiastro/images/waxing_crescent_5.png +0 -0
  31. setiastro/images/waxing_gibbous_1.png +0 -0
  32. setiastro/images/waxing_gibbous_2.png +0 -0
  33. setiastro/images/waxing_gibbous_3.png +0 -0
  34. setiastro/images/waxing_gibbous_4.png +0 -0
  35. setiastro/images/waxing_gibbous_5.png +0 -0
  36. setiastro/qml/ResourceMonitor.qml +84 -82
  37. setiastro/saspro/__main__.py +20 -1
  38. setiastro/saspro/_generated/build_info.py +2 -2
  39. setiastro/saspro/abe.py +37 -4
  40. setiastro/saspro/aberration_ai.py +364 -33
  41. setiastro/saspro/aberration_ai_preset.py +29 -3
  42. setiastro/saspro/acv_exporter.py +379 -0
  43. setiastro/saspro/add_stars.py +33 -6
  44. setiastro/saspro/astrospike_python.py +45 -3
  45. setiastro/saspro/backgroundneutral.py +108 -40
  46. setiastro/saspro/blemish_blaster.py +4 -1
  47. setiastro/saspro/blink_comparator_pro.py +150 -55
  48. setiastro/saspro/clahe.py +4 -1
  49. setiastro/saspro/continuum_subtract.py +4 -1
  50. setiastro/saspro/convo.py +13 -7
  51. setiastro/saspro/cosmicclarity.py +129 -18
  52. setiastro/saspro/crop_dialog_pro.py +123 -7
  53. setiastro/saspro/curve_editor_pro.py +181 -64
  54. setiastro/saspro/curves_preset.py +249 -47
  55. setiastro/saspro/doc_manager.py +245 -15
  56. setiastro/saspro/exoplanet_detector.py +120 -28
  57. setiastro/saspro/frequency_separation.py +1158 -204
  58. setiastro/saspro/ghs_dialog_pro.py +81 -16
  59. setiastro/saspro/graxpert.py +1 -0
  60. setiastro/saspro/gui/main_window.py +706 -264
  61. setiastro/saspro/gui/mixins/dock_mixin.py +245 -24
  62. setiastro/saspro/gui/mixins/file_mixin.py +35 -16
  63. setiastro/saspro/gui/mixins/menu_mixin.py +35 -1
  64. setiastro/saspro/gui/mixins/theme_mixin.py +160 -14
  65. setiastro/saspro/gui/mixins/toolbar_mixin.py +499 -24
  66. setiastro/saspro/gui/mixins/update_mixin.py +138 -36
  67. setiastro/saspro/gui/mixins/view_mixin.py +42 -0
  68. setiastro/saspro/halobgon.py +4 -0
  69. setiastro/saspro/histogram.py +184 -8
  70. setiastro/saspro/image_combine.py +4 -0
  71. setiastro/saspro/image_peeker_pro.py +4 -0
  72. setiastro/saspro/imageops/narrowband_normalization.py +816 -0
  73. setiastro/saspro/imageops/serloader.py +1345 -0
  74. setiastro/saspro/imageops/starbasedwhitebalance.py +23 -52
  75. setiastro/saspro/imageops/stretch.py +582 -62
  76. setiastro/saspro/isophote.py +4 -0
  77. setiastro/saspro/layers.py +13 -9
  78. setiastro/saspro/layers_dock.py +183 -3
  79. setiastro/saspro/legacy/image_manager.py +154 -20
  80. setiastro/saspro/legacy/numba_utils.py +68 -48
  81. setiastro/saspro/legacy/xisf.py +240 -98
  82. setiastro/saspro/live_stacking.py +203 -82
  83. setiastro/saspro/luminancerecombine.py +228 -27
  84. setiastro/saspro/mask_creation.py +174 -15
  85. setiastro/saspro/mfdeconv.py +113 -35
  86. setiastro/saspro/mfdeconvcudnn.py +119 -70
  87. setiastro/saspro/mfdeconvsport.py +112 -35
  88. setiastro/saspro/morphology.py +4 -0
  89. setiastro/saspro/multiscale_decomp.py +81 -29
  90. setiastro/saspro/narrowband_normalization.py +1618 -0
  91. setiastro/saspro/numba_utils.py +72 -57
  92. setiastro/saspro/ops/commands.py +18 -18
  93. setiastro/saspro/ops/script_editor.py +10 -2
  94. setiastro/saspro/ops/scripts.py +122 -0
  95. setiastro/saspro/perfect_palette_picker.py +37 -3
  96. setiastro/saspro/plate_solver.py +84 -49
  97. setiastro/saspro/psf_viewer.py +119 -37
  98. setiastro/saspro/remove_green.py +1 -1
  99. setiastro/saspro/resources.py +73 -0
  100. setiastro/saspro/rgbalign.py +460 -12
  101. setiastro/saspro/selective_color.py +4 -1
  102. setiastro/saspro/ser_stack_config.py +82 -0
  103. setiastro/saspro/ser_stacker.py +2321 -0
  104. setiastro/saspro/ser_stacker_dialog.py +1838 -0
  105. setiastro/saspro/ser_tracking.py +206 -0
  106. setiastro/saspro/serviewer.py +1625 -0
  107. setiastro/saspro/sfcc.py +662 -216
  108. setiastro/saspro/shortcuts.py +171 -33
  109. setiastro/saspro/signature_insert.py +692 -33
  110. setiastro/saspro/stacking_suite.py +1347 -485
  111. setiastro/saspro/star_alignment.py +247 -123
  112. setiastro/saspro/star_spikes.py +4 -0
  113. setiastro/saspro/star_stretch.py +38 -3
  114. setiastro/saspro/stat_stretch.py +892 -129
  115. setiastro/saspro/subwindow.py +787 -363
  116. setiastro/saspro/supernovaasteroidhunter.py +1 -1
  117. setiastro/saspro/texture_clarity.py +593 -0
  118. setiastro/saspro/wavescale_hdr.py +4 -1
  119. setiastro/saspro/wavescalede.py +4 -1
  120. setiastro/saspro/whitebalance.py +84 -12
  121. setiastro/saspro/widgets/common_utilities.py +28 -21
  122. setiastro/saspro/widgets/resource_monitor.py +209 -111
  123. setiastro/saspro/widgets/spinboxes.py +10 -13
  124. setiastro/saspro/wimi.py +27 -656
  125. setiastro/saspro/wims.py +13 -3
  126. setiastro/saspro/xisf.py +101 -11
  127. {setiastrosuitepro-1.6.4.dist-info → setiastrosuitepro-1.7.1.post2.dist-info}/METADATA +4 -2
  128. {setiastrosuitepro-1.6.4.dist-info → setiastrosuitepro-1.7.1.post2.dist-info}/RECORD +132 -87
  129. {setiastrosuitepro-1.6.4.dist-info → setiastrosuitepro-1.7.1.post2.dist-info}/WHEEL +0 -0
  130. {setiastrosuitepro-1.6.4.dist-info → setiastrosuitepro-1.7.1.post2.dist-info}/entry_points.txt +0 -0
  131. {setiastrosuitepro-1.6.4.dist-info → setiastrosuitepro-1.7.1.post2.dist-info}/licenses/LICENSE +0 -0
  132. {setiastrosuitepro-1.6.4.dist-info → setiastrosuitepro-1.7.1.post2.dist-info}/licenses/license.txt +0 -0
@@ -0,0 +1,2321 @@
1
+ # src/setiastro/saspro/ser_stacker.py
2
+ from __future__ import annotations
3
+ import os
4
+ import threading
5
+
6
+ from concurrent.futures import ThreadPoolExecutor, as_completed
7
+
8
+ from dataclasses import dataclass
9
+ from typing import Optional, Tuple, List, Dict, Any
10
+
11
+ import numpy as np
12
+
13
+ import cv2
14
+ cv2.setNumThreads(1)
15
+
16
+ from setiastro.saspro.imageops.serloader import SERReader
17
+ from setiastro.saspro.ser_stack_config import SERStackConfig
18
+ from setiastro.saspro.ser_tracking import PlanetaryTracker, SurfaceTracker, _to_mono01
19
+ from setiastro.saspro.imageops.serloader import open_planetary_source, PlanetaryFrameSource
20
+
21
+ _BAYER_TO_CV2 = {
22
+ "RGGB": cv2.COLOR_BayerRG2RGB,
23
+ "BGGR": cv2.COLOR_BayerBG2RGB,
24
+ "GRBG": cv2.COLOR_BayerGR2RGB,
25
+ "GBRG": cv2.COLOR_BayerGB2RGB,
26
+ }
27
+
28
+ def _cfg_bayer_pattern(cfg) -> str | None:
29
+ # cfg.bayer_pattern might be missing in older saved projects; be defensive
30
+ return getattr(cfg, "bayer_pattern", None)
31
+
32
+
33
+ def _get_frame(src, idx: int, *, roi, debayer: bool, to_float01: bool, force_rgb: bool, bayer_pattern: str | None):
34
+ """
35
+ Drop-in wrapper:
36
+ - passes cfg.bayer_pattern down to sources that support it
37
+ - stays compatible with sources whose get_frame() doesn't accept bayer_pattern yet
38
+ """
39
+ try:
40
+ return src.get_frame(
41
+ int(idx),
42
+ roi=roi,
43
+ debayer=debayer,
44
+ to_float01=to_float01,
45
+ force_rgb=force_rgb,
46
+ bayer_pattern=bayer_pattern,
47
+ )
48
+ except TypeError:
49
+ # Back-compat: older PlanetaryFrameSource implementations
50
+ return src.get_frame(
51
+ int(idx),
52
+ roi=roi,
53
+ debayer=debayer,
54
+ to_float01=to_float01,
55
+ force_rgb=force_rgb,
56
+ )
57
+
58
+
59
+ @dataclass
60
+ class AnalyzeResult:
61
+ frames_total: int
62
+ roi_used: Optional[Tuple[int, int, int, int]]
63
+ track_mode: str
64
+ quality: np.ndarray # (N,) float32 higher=better
65
+ dx: np.ndarray # (N,) float32
66
+ dy: np.ndarray # (N,) float32
67
+ conf: np.ndarray # (N,) float32 0..1 (final conf used by stacking)
68
+ order: np.ndarray # (N,) int indices sorted by quality desc
69
+ ref_mode: str # "best_frame" | "best_stack"
70
+ ref_count: int
71
+ ref_image: np.ndarray # float32 [0..1], ROI-sized
72
+ ap_centers: Optional[np.ndarray] = None # (M,2) int32 in ROI coords
73
+ ap_size: int = 64
74
+ ap_multiscale: bool = False
75
+
76
+ # ✅ NEW: surface anchor confidence (coarse tracker)
77
+ coarse_conf: Optional[np.ndarray] = None # (N,) float32 0..1
78
+
79
+
80
+ @dataclass
81
+ class FrameEval:
82
+ idx: int
83
+ score: float
84
+ dx: float
85
+ dy: float
86
+ conf: float
87
+
88
+ def _print_surface_debug(
89
+ *,
90
+ dx: np.ndarray,
91
+ dy: np.ndarray,
92
+ conf: np.ndarray,
93
+ coarse_conf: np.ndarray | None,
94
+ floor: float = 0.05,
95
+ prefix: str = "[SER][Surface]"
96
+ ) -> None:
97
+ try:
98
+ dx = np.asarray(dx, dtype=np.float32)
99
+ dy = np.asarray(dy, dtype=np.float32)
100
+ conf = np.asarray(conf, dtype=np.float32)
101
+
102
+ dx_min = float(np.min(dx)) if dx.size else 0.0
103
+ dx_max = float(np.max(dx)) if dx.size else 0.0
104
+ dy_min = float(np.min(dy)) if dy.size else 0.0
105
+ dy_max = float(np.max(dy)) if dy.size else 0.0
106
+
107
+ conf_mean = float(np.mean(conf)) if conf.size else 0.0
108
+ conf_min = float(np.min(conf)) if conf.size else 0.0
109
+
110
+ msg = (
111
+ f"{prefix} dx[min,max]=({dx_min:.2f},{dx_max:.2f}) "
112
+ f"dy[min,max]=({dy_min:.2f},{dy_max:.2f}) "
113
+ f"conf[mean,min]=({conf_mean:.3f},{conf_min:.3f})"
114
+ )
115
+
116
+ if coarse_conf is not None:
117
+ cc = np.asarray(coarse_conf, dtype=np.float32)
118
+ cc_mean = float(np.mean(cc)) if cc.size else 0.0
119
+ cc_min = float(np.min(cc)) if cc.size else 0.0
120
+ cc_bad = float(np.mean(cc < 0.2)) if cc.size else 0.0
121
+ msg += f" coarse_conf[mean,min]=({cc_mean:.3f},{cc_min:.3f}) frac<0.2={cc_bad:.2%}"
122
+
123
+ if conf_mean <= floor + 1e-6:
124
+ msg += f" ⚠ conf.mean near floor ({floor}); alignment likely failing"
125
+ print(msg)
126
+ except Exception as e:
127
+ print(f"{prefix} debug print failed: {e}")
128
+
129
+
130
+ def _clamp_roi_in_bounds(roi: Tuple[int, int, int, int], w: int, h: int) -> Tuple[int, int, int, int]:
131
+ x, y, rw, rh = [int(v) for v in roi]
132
+ x = max(0, min(w - 1, x))
133
+ y = max(0, min(h - 1, y))
134
+ rw = max(1, min(w - x, rw))
135
+ rh = max(1, min(h - y, rh))
136
+ return x, y, rw, rh
137
+
138
+ def _grad_img(m: np.ndarray) -> np.ndarray:
139
+ """Simple, robust edge image for SSD refine."""
140
+ m = m.astype(np.float32, copy=False)
141
+ if cv2 is None:
142
+ # fallback: finite differences
143
+ gx = np.zeros_like(m); gx[:, 1:] = m[:, 1:] - m[:, :-1]
144
+ gy = np.zeros_like(m); gy[1:, :] = m[1:, :] - m[:-1, :]
145
+ g = np.abs(gx) + np.abs(gy)
146
+ g -= float(g.mean())
147
+ s = float(g.std()) + 1e-6
148
+ return g / s
149
+
150
+ gx = cv2.Sobel(m, cv2.CV_32F, 1, 0, ksize=3)
151
+ gy = cv2.Sobel(m, cv2.CV_32F, 0, 1, ksize=3)
152
+ g = cv2.magnitude(gx, gy)
153
+ g -= float(g.mean())
154
+ s = float(g.std()) + 1e-6
155
+ return (g / s).astype(np.float32, copy=False)
156
+
157
+ def _ssd_prepare_ref(ref_m: np.ndarray, crop: float = 0.80):
158
+ """
159
+ Precompute reference gradient + crop window once.
160
+
161
+ Returns:
162
+ rg : full reference gradient image (float32)
163
+ rgc : cropped view of rg
164
+ sl : (y0,y1,x0,x1) crop slices
165
+ """
166
+ ref_m = ref_m.astype(np.float32, copy=False)
167
+ rg = _grad_img(ref_m) # compute ONCE
168
+
169
+ H, W = rg.shape[:2]
170
+ cfx = max(8, int(W * (1.0 - float(crop)) * 0.5))
171
+ cfy = max(8, int(H * (1.0 - float(crop)) * 0.5))
172
+ x0, x1 = cfx, W - cfx
173
+ y0, y1 = cfy, H - cfy
174
+
175
+ rgc = rg[y0:y1, x0:x1] # view
176
+ return rg, rgc, (y0, y1, x0, x1)
177
+
178
+ def _subpixel_quadratic_1d(vm: float, v0: float, vp: float) -> float:
179
+ """
180
+ Given SSD at (-1,0,+1): (vm, v0, vp), return vertex offset in [-0.5,0.5]-ish.
181
+ Works for minimizing SSD.
182
+ """
183
+ denom = (vm - 2.0 * v0 + vp)
184
+ if abs(denom) < 1e-12:
185
+ return 0.0
186
+ # vertex of parabola fit through -1,0,+1
187
+ t = 0.5 * (vm - vp) / denom
188
+ return float(np.clip(t, -0.75, 0.75))
189
+
190
+
191
+ def _ssd_confidence_prepared(
192
+ rgc: np.ndarray,
193
+ cgc0: np.ndarray,
194
+ dx_i: int,
195
+ dy_i: int,
196
+ ) -> float:
197
+ """
198
+ Compute SSD between rgc and cgc0 shifted by (dx_i,dy_i) using slicing overlap.
199
+ Returns SSD (lower is better).
200
+
201
+ NOTE: This is integer-only and extremely fast (no warps).
202
+ """
203
+ H, W = rgc.shape[:2]
204
+
205
+ # Overlap slices for rgc and shifted cgc0
206
+ x0r = max(0, dx_i)
207
+ x1r = min(W, W + dx_i)
208
+ y0r = max(0, dy_i)
209
+ y1r = min(H, H + dy_i)
210
+
211
+ x0c = max(0, -dx_i)
212
+ x1c = min(W, W - dx_i)
213
+ y0c = max(0, -dy_i)
214
+ y1c = min(H, H - dy_i)
215
+
216
+ rr = rgc[y0r:y1r, x0r:x1r]
217
+ cc = cgc0[y0c:y1c, x0c:x1c]
218
+
219
+ d = rr - cc
220
+ return float(np.mean(d * d))
221
+
222
+
223
+ def _ssd_confidence(
224
+ ref_m: np.ndarray,
225
+ cur_m: np.ndarray,
226
+ dx: float,
227
+ dy: float,
228
+ *,
229
+ crop: float = 0.80,
230
+ ) -> float:
231
+ """
232
+ Original API: confidence from gradient SSD, higher=better (0..1).
233
+
234
+ Optimized:
235
+ - computes ref grad once per call (still OK if used standalone)
236
+ - uses one warp for (dx,dy)
237
+ - no extra work beyond necessary
238
+
239
+ For iterative search, use _refine_shift_ssd() which avoids redoing work.
240
+ """
241
+ ref_m = ref_m.astype(np.float32, copy=False)
242
+ cur_m = cur_m.astype(np.float32, copy=False)
243
+
244
+ # shift current by the proposed shift
245
+ cur_s = _shift_image(cur_m, float(dx), float(dy))
246
+
247
+ rg, rgc, sl = _ssd_prepare_ref(ref_m, crop=crop)
248
+ y0, y1, x0, x1 = sl
249
+
250
+ cg = _grad_img(cur_s)
251
+ cgc = cg[y0:y1, x0:x1]
252
+
253
+ d = rgc - cgc
254
+ ssd = float(np.mean(d * d))
255
+
256
+ scale = 0.002
257
+ conf = float(np.exp(-ssd / max(1e-12, scale)))
258
+ return float(np.clip(conf, 0.0, 1.0))
259
+
260
+
261
+ def _refine_shift_ssd(
262
+ ref_m: np.ndarray,
263
+ cur_m: np.ndarray,
264
+ dx0: float,
265
+ dy0: float,
266
+ *,
267
+ radius: int = 10,
268
+ crop: float = 0.80,
269
+ bruteforce: bool = False,
270
+ max_steps: int | None = None,
271
+ ) -> tuple[float, float, float]:
272
+ """
273
+ Returns (dx_refine, dy_refine, conf) where you ADD refine to (dx0,dy0).
274
+
275
+ CPU-optimized:
276
+ - precompute ref gradient crop once
277
+ - apply (dx0,dy0) shift ONCE
278
+ - compute gradient ONCE for shifted cur
279
+ - evaluate integer candidates via slicing overlap SSD (no warps)
280
+
281
+ If bruteforce=True, does full window scan in [-r,r]^2 (fast).
282
+ Otherwise does 8-neighbor hill-climb over integer offsets (very fast).
283
+
284
+ Optional subpixel polish:
285
+ - after choosing best integer (best_dx,best_dy), do a tiny separable quadratic
286
+ fit along x and y using SSD at +/-1 around the best integer.
287
+ - does NOT require any new gradients/warps (just 4 extra SSD evals).
288
+ """
289
+ r = int(max(0, radius))
290
+ if r == 0:
291
+ # nothing to do; just compute confidence at dx0/dy0
292
+ c = _ssd_confidence(ref_m, cur_m, dx0, dy0, crop=crop)
293
+ return 0.0, 0.0, float(c)
294
+
295
+ # Prepare ref grad crop ONCE
296
+ _, rgc, sl = _ssd_prepare_ref(ref_m, crop=crop)
297
+ y0, y1, x0, x1 = sl
298
+
299
+ # Shift cur by the current estimate ONCE, then gradient ONCE
300
+ cur_m = cur_m.astype(np.float32, copy=False)
301
+ cur0 = _shift_image(cur_m, float(dx0), float(dy0))
302
+ cg0 = _grad_img(cur0)
303
+ cgc0 = cg0[y0:y1, x0:x1]
304
+
305
+ # Helper: parabola vertex for minimizing SSD, using (-1,0,+1) samples
306
+ def _quad_min_offset(vm: float, v0: float, vp: float) -> float:
307
+ denom = (vm - 2.0 * v0 + vp)
308
+ if abs(denom) < 1e-12:
309
+ return 0.0
310
+ t = 0.5 * (vm - vp) / denom
311
+ return float(np.clip(t, -0.75, 0.75))
312
+
313
+ if bruteforce:
314
+ # NOTE: your bruteforce path currently includes a subpixel step already.
315
+ # If you want to keep using that exact implementation, just call it:
316
+ dxr, dyr, conf = _refine_shift_ssd_bruteforce(ref_m, cur_m, dx0, dy0, radius=r, crop=crop)
317
+ return float(dxr), float(dyr), float(conf)
318
+
319
+ # Hill-climb in integer space minimizing SSD
320
+ if max_steps is None:
321
+ max_steps = max(1, min(r, 6)) # small cap helps speed; tune if you want
322
+
323
+ best_dx = 0
324
+ best_dy = 0
325
+ best_ssd = _ssd_confidence_prepared(rgc, cgc0, 0, 0)
326
+
327
+ neigh = ((-1,0),(1,0),(0,-1),(0,1),(-1,-1),(-1,1),(1,-1),(1,1))
328
+
329
+ for _ in range(int(max_steps)):
330
+ improved = False
331
+ for sx, sy in neigh:
332
+ cand_dx = best_dx + sx
333
+ cand_dy = best_dy + sy
334
+ if abs(cand_dx) > r or abs(cand_dy) > r:
335
+ continue
336
+
337
+ ssd = _ssd_confidence_prepared(rgc, cgc0, cand_dx, cand_dy)
338
+ if ssd < best_ssd:
339
+ best_ssd = ssd
340
+ best_dx = cand_dx
341
+ best_dy = cand_dy
342
+ improved = True
343
+
344
+ if not improved:
345
+ break
346
+
347
+ # ---- subpixel quadratic polish around best integer (cheap) ----
348
+ # Uses SSD at +/-1 around best integer in X and Y (separable).
349
+ dx_sub = 0.0
350
+ dy_sub = 0.0
351
+ if r >= 1:
352
+ # X samples at (best_dx-1, best_dy), (best_dx, best_dy), (best_dx+1, best_dy)
353
+ if abs(best_dx - 1) <= r:
354
+ s_xm = _ssd_confidence_prepared(rgc, cgc0, best_dx - 1, best_dy)
355
+ else:
356
+ s_xm = best_ssd
357
+ s_x0 = best_ssd
358
+ if abs(best_dx + 1) <= r:
359
+ s_xp = _ssd_confidence_prepared(rgc, cgc0, best_dx + 1, best_dy)
360
+ else:
361
+ s_xp = best_ssd
362
+ dx_sub = _quad_min_offset(s_xm, s_x0, s_xp)
363
+
364
+ # Y samples at (best_dx, best_dy-1), (best_dx, best_dy), (best_dx, best_dy+1)
365
+ if abs(best_dy - 1) <= r:
366
+ s_ym = _ssd_confidence_prepared(rgc, cgc0, best_dx, best_dy - 1)
367
+ else:
368
+ s_ym = best_ssd
369
+ s_y0 = best_ssd
370
+ if abs(best_dy + 1) <= r:
371
+ s_yp = _ssd_confidence_prepared(rgc, cgc0, best_dx, best_dy + 1)
372
+ else:
373
+ s_yp = best_ssd
374
+ dy_sub = _quad_min_offset(s_ym, s_y0, s_yp)
375
+
376
+ best_dx_f = float(best_dx) + float(dx_sub)
377
+ best_dy_f = float(best_dy) + float(dy_sub)
378
+
379
+ # Confidence: keep based on best *integer* SSD (no subpixel warp needed)
380
+ scale = 0.002
381
+ conf = float(np.exp(-best_ssd / max(1e-12, scale)))
382
+ conf = float(np.clip(conf, 0.0, 1.0))
383
+
384
+ return float(best_dx_f), float(best_dy_f), float(conf)
385
+
386
+
387
+
388
+ def _refine_shift_ssd_bruteforce(
389
+ ref_m: np.ndarray,
390
+ cur_m: np.ndarray,
391
+ dx0: float,
392
+ dy0: float,
393
+ *,
394
+ radius: int = 2,
395
+ crop: float = 0.80,
396
+ ) -> tuple[float, float, float]:
397
+ """
398
+ Full brute-force scan in [-radius,+radius]^2, but optimized:
399
+ - shift by (dx0,dy0) ONCE
400
+ - compute gradients ONCE
401
+ - evaluate candidates via slicing overlap SSD (no warps)
402
+ - keep your separable quadratic subpixel fit
403
+ """
404
+ ref_m = ref_m.astype(np.float32, copy=False)
405
+ cur_m = cur_m.astype(np.float32, copy=False)
406
+
407
+ r = int(max(0, radius))
408
+ if r == 0:
409
+ c = _ssd_confidence(ref_m, cur_m, dx0, dy0, crop=crop)
410
+ return 0.0, 0.0, float(c)
411
+
412
+ # Apply current estimate once
413
+ cur0 = _shift_image(cur_m, float(dx0), float(dy0))
414
+
415
+ # Gradients once
416
+ rg = _grad_img(ref_m)
417
+ cg0 = _grad_img(cur0)
418
+
419
+ H, W = rg.shape[:2]
420
+ cfx = max(8, int(W * (1.0 - float(crop)) * 0.5))
421
+ cfy = max(8, int(H * (1.0 - float(crop)) * 0.5))
422
+ x0, x1 = cfx, W - cfx
423
+ y0, y1 = cfy, H - cfy
424
+
425
+ rgc = rg[y0:y1, x0:x1]
426
+ cgc0 = cg0[y0:y1, x0:x1]
427
+
428
+ # brute-force integer search
429
+ best = (0, 0)
430
+ best_ssd = float("inf")
431
+ ssds: dict[tuple[int, int], float] = {}
432
+
433
+ for j in range(-r, r + 1):
434
+ for i in range(-r, r + 1):
435
+ ssd = _ssd_confidence_prepared(rgc, cgc0, i, j)
436
+ ssds[(i, j)] = ssd
437
+ if ssd < best_ssd:
438
+ best_ssd = ssd
439
+ best = (i, j)
440
+
441
+ bx, by = best
442
+
443
+ # Subpixel quadratic fit (separable) if neighbors exist
444
+ def _quad_peak(vm, v0, vp):
445
+ denom = (vm - 2.0 * v0 + vp)
446
+ if abs(denom) < 1e-12:
447
+ return 0.0
448
+ return 0.5 * (vm - vp) / denom
449
+
450
+ dx_sub = 0.0
451
+ dy_sub = 0.0
452
+ if (bx - 1, by) in ssds and (bx + 1, by) in ssds:
453
+ dx_sub = _quad_peak(ssds[(bx - 1, by)], ssds[(bx, by)], ssds[(bx + 1, by)])
454
+ if (bx, by - 1) in ssds and (bx, by + 1) in ssds:
455
+ dy_sub = _quad_peak(ssds[(bx, by - 1)], ssds[(bx, by)], ssds[(bx, by + 1)])
456
+
457
+ dxr = float(bx + np.clip(dx_sub, -0.75, 0.75))
458
+ dyr = float(by + np.clip(dy_sub, -0.75, 0.75))
459
+
460
+ # Confidence: use your “sharpness” idea (median neighbor vs best)
461
+ neigh = [v for (k, v) in ssds.items() if k != (bx, by)]
462
+ neigh_med = float(np.median(np.asarray(neigh, np.float32))) if neigh else best_ssd
463
+ sharp = max(0.0, neigh_med - best_ssd)
464
+ conf = float(np.clip(sharp / max(1e-6, neigh_med), 0.0, 1.0))
465
+
466
+ return dxr, dyr, conf
467
+
468
+ def _bandpass(m: np.ndarray) -> np.ndarray:
469
+ """Illumination-robust image for tracking (float32)."""
470
+ m = m.astype(np.float32, copy=False)
471
+
472
+ # remove large-scale illumination (terminator gradient)
473
+ lo = cv2.GaussianBlur(m, (0, 0), 6.0)
474
+ hi = cv2.GaussianBlur(m, (0, 0), 1.2)
475
+ bp = hi - lo
476
+
477
+ # normalize
478
+ bp -= float(bp.mean())
479
+ s = float(bp.std()) + 1e-6
480
+ bp = bp / s
481
+
482
+ # window to reduce FFT edge artifacts
483
+ hann_y = np.hanning(bp.shape[0]).astype(np.float32)
484
+ hann_x = np.hanning(bp.shape[1]).astype(np.float32)
485
+ bp *= (hann_y[:, None] * hann_x[None, :])
486
+
487
+ return bp
488
+
489
+ def _reject_ap_outliers(ap_dx: np.ndarray, ap_dy: np.ndarray, ap_cf: np.ndarray, *, z: float = 3.5) -> np.ndarray:
490
+ """
491
+ Return a boolean mask of APs to keep based on MAD distance from median.
492
+ """
493
+ dx = np.asarray(ap_dx, np.float32)
494
+ dy = np.asarray(ap_dy, np.float32)
495
+ cf = np.asarray(ap_cf, np.float32)
496
+
497
+ good = cf > 0.15
498
+ if not np.any(good):
499
+ return good
500
+
501
+ dxg = dx[good]
502
+ dyg = dy[good]
503
+
504
+ mx = float(np.median(dxg))
505
+ my = float(np.median(dyg))
506
+
507
+ rx = np.abs(dxg - mx)
508
+ ry = np.abs(dyg - my)
509
+
510
+ madx = float(np.median(rx)) + 1e-6
511
+ mady = float(np.median(ry)) + 1e-6
512
+
513
+ zx = rx / madx
514
+ zy = ry / mady
515
+
516
+ keep_g = (zx < z) & (zy < z)
517
+ keep = np.zeros_like(good)
518
+ keep_idx = np.where(good)[0]
519
+ keep[keep_idx] = keep_g
520
+ return keep
521
+
522
+
523
+ def _coarse_surface_ref_locked(
524
+ source_obj,
525
+ *,
526
+ n: int,
527
+ roi,
528
+ roi_used=None,
529
+ debayer: bool,
530
+ to_rgb: bool,
531
+ bayer_pattern: Optional[str] = None,
532
+ progress_cb=None,
533
+ progress_every: int = 25,
534
+ # tuning:
535
+ down: int = 2,
536
+ template_size: int = 256,
537
+ search_radius: int = 96,
538
+ bandpass: bool = True,
539
+ # ✅ NEW: parallel coarse
540
+ workers: int | None = None,
541
+ stride: int = 16,
542
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
543
+ """
544
+ Surface coarse tracking that DOES NOT DRIFT:
545
+ - Locks to frame0 reference (in roi=roi_track coords).
546
+ - Uses NCC + subpixel phaseCorr.
547
+ - Optional parallelization by chunking time into segments of length=stride.
548
+ Each segment runs sequentially (keeps pred window), segments run in parallel.
549
+ """
550
+ if cv2 is None:
551
+ dx = np.zeros((n,), np.float32)
552
+ dy = np.zeros((n,), np.float32)
553
+ cc = np.ones((n,), np.float32)
554
+ return dx, dy, cc
555
+
556
+ dx = np.zeros((n,), dtype=np.float32)
557
+ dy = np.zeros((n,), dtype=np.float32)
558
+ cc = np.zeros((n,), dtype=np.float32)
559
+
560
+ def _downN(m: np.ndarray) -> np.ndarray:
561
+ if down <= 1:
562
+ return m.astype(np.float32, copy=False)
563
+ H, W = m.shape[:2]
564
+ return cv2.resize(
565
+ m,
566
+ (max(2, W // down), max(2, H // down)),
567
+ interpolation=cv2.INTER_AREA,
568
+ ).astype(np.float32, copy=False)
569
+
570
+ def _pick_anchor_center_ds(W: int, H: int) -> tuple[int, int]:
571
+ cx = W // 2
572
+ cy = H // 2
573
+ if roi_used is None or roi is None:
574
+ return int(cx), int(cy)
575
+ try:
576
+ xt, yt, wt, ht = [int(v) for v in roi]
577
+ xu, yu, wu, hu = [int(v) for v in roi_used]
578
+ cux = xu + (wu * 0.5)
579
+ cuy = yu + (hu * 0.5)
580
+ cx_full = cux - xt
581
+ cy_full = cuy - yt
582
+ cx = int(round(cx_full / max(1, int(down))))
583
+ cy = int(round(cy_full / max(1, int(down))))
584
+ cx = max(0, min(W - 1, cx))
585
+ cy = max(0, min(H - 1, cy))
586
+ except Exception:
587
+ pass
588
+ return int(cx), int(cy)
589
+
590
+ # ---------------------------
591
+ # Prep ref/template once
592
+ # ---------------------------
593
+ src0, owns0 = _ensure_source(source_obj, cache_items=2)
594
+ try:
595
+ img0 = _get_frame(src0, 0, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
596
+
597
+ ref0 = _to_mono01(img0).astype(np.float32, copy=False)
598
+ ref0 = _downN(ref0)
599
+ ref0p = _bandpass(ref0) if bandpass else (ref0 - float(ref0.mean()))
600
+
601
+ H, W = ref0p.shape[:2]
602
+ ts = int(max(64, min(template_size, min(H, W) - 4)))
603
+ half = ts // 2
604
+
605
+ cx0, cy0 = _pick_anchor_center_ds(W, H)
606
+ rx0 = max(0, min(W - ts, cx0 - half))
607
+ ry0 = max(0, min(H - ts, cy0 - half))
608
+ ref_t = ref0p[ry0:ry0 + ts, rx0:rx0 + ts].copy()
609
+ finally:
610
+ if owns0:
611
+ try:
612
+ src0.close()
613
+ except Exception:
614
+ pass
615
+
616
+ dx[0] = 0.0
617
+ dy[0] = 0.0
618
+ cc[0] = 1.0
619
+
620
+ if progress_cb:
621
+ progress_cb(0, n, "Surface: coarse (ref-locked NCC+subpix)…")
622
+
623
+ # If no workers requested (or too small), fall back to sequential
624
+ if workers is None:
625
+ cpu = os.cpu_count() or 4
626
+ workers = max(1, min(cpu, 48))
627
+ workers = int(max(1, workers))
628
+ stride = int(max(4, stride))
629
+
630
+ # ---------------------------
631
+ # Core "one frame" matcher
632
+ # ---------------------------
633
+ def _match_one(curp: np.ndarray, pred_x: float, pred_y: float, r: int) -> tuple[float, float, float, float, float]:
634
+ # returns (mx_ds, my_ds, dx_full, dy_full, conf)
635
+ x0 = int(max(0, min(W - 1, pred_x - r)))
636
+ y0 = int(max(0, min(H - 1, pred_y - r)))
637
+ x1 = int(min(W, pred_x + r + ts))
638
+ y1 = int(min(H, pred_y + r + ts))
639
+
640
+ win = curp[y0:y1, x0:x1]
641
+ if win.shape[0] < ts or win.shape[1] < ts:
642
+ return float(pred_x), float(pred_y), 0.0, 0.0, 0.0
643
+
644
+ res = cv2.matchTemplate(win, ref_t, cv2.TM_CCOEFF_NORMED)
645
+ _, max_val, _, max_loc = cv2.minMaxLoc(res)
646
+ conf_ncc = float(np.clip(max_val, 0.0, 1.0))
647
+
648
+ mx_ds = float(x0 + max_loc[0])
649
+ my_ds = float(y0 + max_loc[1])
650
+
651
+ # subpix refine on the matched patch
652
+ mx_i = int(round(mx_ds))
653
+ my_i = int(round(my_ds))
654
+ cur_t = curp[my_i:my_i + ts, mx_i:mx_i + ts]
655
+ if cur_t.shape == ref_t.shape:
656
+ (sdx, sdy), resp = cv2.phaseCorrelate(ref_t.astype(np.float32), cur_t.astype(np.float32))
657
+ sub_dx = float(sdx)
658
+ sub_dy = float(sdy)
659
+ conf_pc = float(np.clip(resp, 0.0, 1.0))
660
+ else:
661
+ sub_dx = 0.0
662
+ sub_dy = 0.0
663
+ conf_pc = 0.0
664
+
665
+ dx_ds = float(rx0 - mx_ds) + sub_dx
666
+ dy_ds = float(ry0 - my_ds) + sub_dy
667
+ dx_full = float(dx_ds * down)
668
+ dy_full = float(dy_ds * down)
669
+
670
+ conf = float(np.clip(0.65 * conf_ncc + 0.35 * conf_pc, 0.0, 1.0))
671
+ return float(mx_ds), float(my_ds), dx_full, dy_full, conf
672
+
673
+ # ---------------------------
674
+ # Keyframe boundary pass (sequential)
675
+ # ---------------------------
676
+ boundaries = list(range(0, n, stride))
677
+ start_pred = {} # b -> (pred_x, pred_y)
678
+ start_pred[0] = (float(rx0), float(ry0))
679
+
680
+ # We use a slightly larger radius for boundary frames to be extra safe
681
+ r_key = int(max(16, int(search_radius) * 2))
682
+
683
+ srck, ownsk = _ensure_source(source_obj, cache_items=2)
684
+ try:
685
+ pred_x, pred_y = float(rx0), float(ry0)
686
+ for b in boundaries[1:]:
687
+ img = _get_frame(srck, b, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
688
+
689
+ cur = _to_mono01(img).astype(np.float32, copy=False)
690
+ cur = _downN(cur)
691
+ curp = _bandpass(cur) if bandpass else (cur - float(cur.mean()))
692
+
693
+ mx_ds, my_ds, dx_b, dy_b, conf_b = _match_one(curp, pred_x, pred_y, r_key)
694
+
695
+ # store boundary predictor (template top-left in this frame)
696
+ start_pred[b] = (mx_ds, my_ds)
697
+
698
+ # update for next boundary
699
+ pred_x, pred_y = mx_ds, my_ds
700
+
701
+ # also fill boundary output immediately (optional but nice)
702
+ dx[b] = dx_b
703
+ dy[b] = dy_b
704
+ cc[b] = conf_b
705
+ if conf_b < 0.15 and b > 0:
706
+ dx[b] = dx[b - 1]
707
+ dy[b] = dy[b - 1]
708
+ finally:
709
+ if ownsk:
710
+ try:
711
+ srck.close()
712
+ except Exception:
713
+ pass
714
+
715
+ # ---------------------------
716
+ # Parallel per-chunk scan (each chunk sequential)
717
+ # ---------------------------
718
+ r = int(max(16, search_radius))
719
+
720
+ def _run_chunk(b: int, e: int) -> int:
721
+ src, owns = _ensure_source(source_obj, cache_items=0)
722
+ try:
723
+ pred_x, pred_y = start_pred.get(b, (float(rx0), float(ry0)))
724
+ # if boundary already computed above, keep it; start after b
725
+ i0 = b
726
+ if b in start_pred and b != 0:
727
+ i0 = b + 1 # boundary already solved with r_key
728
+
729
+ if i0 == 0:
730
+ i0 = 1
731
+ for i in range(i0, e):
732
+ if i in start_pred:
733
+ pred_x, pred_y = start_pred[i]
734
+ continue
735
+
736
+ img = _get_frame(src, i, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
737
+ cur = _to_mono01(img).astype(np.float32, copy=False)
738
+ cur = _downN(cur)
739
+ curp = _bandpass(cur) if bandpass else (cur - float(cur.mean()))
740
+
741
+ mx_ds, my_ds, dx_i, dy_i, conf_i = _match_one(curp, pred_x, pred_y, r)
742
+
743
+ dx[i] = dx_i
744
+ dy[i] = dy_i
745
+ cc[i] = conf_i
746
+
747
+ pred_x, pred_y = mx_ds, my_ds
748
+
749
+ if conf_i < 0.15 and i > 0:
750
+ dx[i] = dx[i - 1]
751
+ dy[i] = dy[i - 1]
752
+ return (e - b)
753
+ finally:
754
+ if owns:
755
+ try:
756
+ src.close()
757
+ except Exception:
758
+ pass
759
+
760
+ if workers <= 1 or n <= stride * 2:
761
+ # small job: just do sequential scan exactly like before
762
+ src, owns = _ensure_source(source_obj, cache_items=2)
763
+ try:
764
+ pred_x, pred_y = float(rx0), float(ry0)
765
+ for i in range(1, n):
766
+ img = _get_frame(src, i, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
767
+ cur = _to_mono01(img).astype(np.float32, copy=False)
768
+ cur = _downN(cur)
769
+ curp = _bandpass(cur) if bandpass else (cur - float(cur.mean()))
770
+
771
+ mx_ds, my_ds, dx_i, dy_i, conf_i = _match_one(curp, pred_x, pred_y, r)
772
+ dx[i] = dx_i
773
+ dy[i] = dy_i
774
+ cc[i] = conf_i
775
+ pred_x, pred_y = mx_ds, my_ds
776
+
777
+ if conf_i < 0.15:
778
+ dx[i] = dx[i - 1]
779
+ dy[i] = dy[i - 1]
780
+
781
+ if progress_cb and (i % int(max(1, progress_every)) == 0 or i == n - 1):
782
+ progress_cb(i, n, "Surface: coarse (ref-locked NCC+subpix)…")
783
+ finally:
784
+ if owns:
785
+ try:
786
+ src.close()
787
+ except Exception:
788
+ pass
789
+ return dx, dy, cc
790
+
791
+ # Parallel chunks
792
+ done = 0
793
+ with ThreadPoolExecutor(max_workers=workers) as ex:
794
+ futs = []
795
+ for b in boundaries:
796
+ e = min(n, b + stride)
797
+ futs.append(ex.submit(_run_chunk, b, e))
798
+
799
+ for fut in as_completed(futs):
800
+ done += int(fut.result())
801
+ if progress_cb:
802
+ # best-effort: done is "frames processed" not exact index
803
+ progress_cb(min(done, n - 1), n, "Surface: coarse (ref-locked NCC+subpix)…")
804
+
805
+ return dx, dy, cc
806
+
807
+
808
+ def _shift_image(img01: np.ndarray, dx: float, dy: float) -> np.ndarray:
809
+ """
810
+ Shift image by (dx,dy) in pixel units. Positive dx shifts right, positive dy shifts down.
811
+ Uses cv2.warpAffine if available; else nearest-ish roll (wrap) fallback.
812
+ """
813
+ if abs(dx) < 1e-6 and abs(dy) < 1e-6:
814
+ return img01
815
+
816
+ if cv2 is not None:
817
+ # border replicate is usually better than constant black for planetary
818
+ h, w = img01.shape[:2]
819
+ M = np.array([[1.0, 0.0, dx],
820
+ [0.0, 1.0, dy]], dtype=np.float32)
821
+ if img01.ndim == 2:
822
+ return cv2.warpAffine(img01, M, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
823
+ else:
824
+ return cv2.warpAffine(img01, M, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
825
+ # very rough fallback (wraps!)
826
+ rx = int(round(dx))
827
+ ry = int(round(dy))
828
+ out = np.roll(img01, shift=ry, axis=0)
829
+ out = np.roll(out, shift=rx, axis=1)
830
+ return out
831
+
832
+ def _downsample_mono01(img01: np.ndarray, max_dim: int = 512) -> np.ndarray:
833
+ """
834
+ Convert to mono and downsample for analysis/tracking. Returns float32 in [0,1].
835
+ """
836
+ m = _to_mono01(img01).astype(np.float32, copy=False)
837
+ H, W = m.shape[:2]
838
+ mx = int(max(1, max_dim))
839
+ if max(H, W) <= mx:
840
+ return m
841
+
842
+ if cv2 is None:
843
+ # crude fallback
844
+ scale = mx / float(max(H, W))
845
+ nh = max(2, int(round(H * scale)))
846
+ nw = max(2, int(round(W * scale)))
847
+ # nearest-ish
848
+ ys = (np.linspace(0, H - 1, nh)).astype(np.int32)
849
+ xs = (np.linspace(0, W - 1, nw)).astype(np.int32)
850
+ return m[ys[:, None], xs[None, :]].astype(np.float32)
851
+
852
+ scale = mx / float(max(H, W))
853
+ nh = max(2, int(round(H * scale)))
854
+ nw = max(2, int(round(W * scale)))
855
+ return cv2.resize(m, (nw, nh), interpolation=cv2.INTER_AREA).astype(np.float32, copy=False)
856
+
857
+
858
+ def _phase_corr_shift(ref_m: np.ndarray, cur_m: np.ndarray) -> tuple[float, float, float]:
859
+ """
860
+ Returns (dx, dy, response) such that shifting cur by (dx,dy) aligns to ref.
861
+ Uses cv2.phaseCorrelate if available.
862
+ """
863
+ if cv2 is None:
864
+ return 0.0, 0.0, 1.0
865
+
866
+ # phaseCorrelate expects float32/float64
867
+ ref = ref_m.astype(np.float32, copy=False)
868
+ cur = cur_m.astype(np.float32, copy=False)
869
+ (dx, dy), resp = cv2.phaseCorrelate(ref, cur) # shift cur -> ref
870
+ return float(dx), float(dy), float(resp)
871
+
872
+ def _ensure_source(source, cache_items: int = 10) -> tuple[PlanetaryFrameSource, bool]:
873
+ """
874
+ Returns (src, owns_src)
875
+
876
+ Accepts:
877
+ - PlanetaryFrameSource-like object (duck typed: get_frame/meta/close)
878
+ - path string
879
+ - list/tuple of paths
880
+ """
881
+ # Already an opened source-like object
882
+ if source is not None and hasattr(source, "get_frame") and hasattr(source, "meta") and hasattr(source, "close"):
883
+ return source, False
884
+
885
+ # allow tuple -> list
886
+ if isinstance(source, tuple):
887
+ source = list(source)
888
+
889
+ src = open_planetary_source(source, cache_items=cache_items)
890
+ return src, True
891
+
892
+ def stack_ser(
893
+ source: str | list[str] | PlanetaryFrameSource,
894
+ *,
895
+ roi=None,
896
+ debayer: bool = True,
897
+ keep_percent: float = 20.0,
898
+ track_mode: str = "planetary",
899
+ surface_anchor=None,
900
+ to_rgb: bool = False, # ✅ add this
901
+ bayer_pattern: Optional[str] = None, # ✅ strongly recommended since dialog passes it
902
+ analysis: AnalyzeResult | None = None,
903
+ local_warp: bool = True,
904
+ max_dim: int = 512,
905
+ progress_cb=None,
906
+ cache_items: int = 10,
907
+ workers: int | None = None,
908
+ chunk_size: int | None = None,
909
+ # ✅ NEW drizzle knobs
910
+ drizzle_scale: float = 1.0,
911
+ drizzle_pixfrac: float = 0.80,
912
+ drizzle_kernel: str = "gaussian",
913
+ drizzle_sigma: float = 0.0,
914
+ keep_mask=None,
915
+ ) -> tuple[np.ndarray, dict]:
916
+ source_obj = source
917
+
918
+ # ---- Worker count ----
919
+ if workers is None:
920
+ cpu = os.cpu_count() or 4
921
+ workers = max(1, min(cpu, 48))
922
+
923
+ if cv2 is not None:
924
+ try:
925
+ cv2.setNumThreads(1)
926
+ except Exception:
927
+ pass
928
+
929
+ drizzle_scale = float(drizzle_scale)
930
+ drizzle_on = drizzle_scale > 1.0001
931
+ drizzle_pixfrac = float(drizzle_pixfrac)
932
+ drizzle_kernel = str(drizzle_kernel).strip().lower()
933
+ if drizzle_kernel not in ("square", "circle", "gaussian"):
934
+ drizzle_kernel = "gaussian"
935
+ drizzle_sigma = float(drizzle_sigma)
936
+
937
+ # ---- Open once to get meta + first frame shape ----
938
+ src0, owns0 = _ensure_source(source_obj, cache_items=cache_items)
939
+ try:
940
+ n = int(src0.meta.frames)
941
+ keep_percent = max(0.1, min(100.0, float(keep_percent)))
942
+ k = max(1, int(round(n * (keep_percent / 100.0))))
943
+
944
+ if analysis is None or analysis.ref_image is None or analysis.ap_centers is None:
945
+ raise ValueError("stack_ser expects analysis with ref_image + ap_centers (run Analyze first).")
946
+
947
+ order = np.asarray(analysis.order, np.int32)
948
+ keep_idx = order[:k].astype(np.int32, copy=False)
949
+
950
+ # ✅ NEW: apply keep_mask (global mask in original frame index space)
951
+ if keep_mask is not None:
952
+ km = np.asarray(keep_mask, dtype=bool)
953
+ if km.ndim != 1 or km.shape[0] != n:
954
+ raise ValueError(f"keep_mask must be 1D bool of length {n}, got shape {km.shape}")
955
+ keep_idx = keep_idx[km[keep_idx]]
956
+
957
+ # Ensure at least one frame survives (or decide to error)
958
+ if keep_idx.size == 0:
959
+ raise ValueError("keep_mask rejected all frames in the Keep% set.")
960
+ # reference / APs
961
+ ref_img = analysis.ref_image.astype(np.float32, copy=False)
962
+ ref_m = _to_mono01(ref_img).astype(np.float32, copy=False)
963
+ ap_centers_all = np.asarray(analysis.ap_centers, np.int32)
964
+ ap_size = int(getattr(analysis, "ap_size", 64) or 64)
965
+
966
+ # frame shape for accumulator
967
+ first = _get_frame(src0, int(keep_idx[0]), roi=roi, debayer=debayer, to_float01=True, force_rgb=False, bayer_pattern=bayer_pattern)
968
+ acc_shape = first.shape # (H,W) or (H,W,3)
969
+ finally:
970
+ if owns0:
971
+ try:
972
+ src0.close()
973
+ except Exception:
974
+ pass
975
+
976
+ # ---- Progress aggregation (thread-safe) ----
977
+ done_lock = threading.Lock()
978
+ done_ct = 0
979
+ total_ct = int(len(keep_idx))
980
+
981
+ def _bump_progress(delta: int, phase: str = "Stack"):
982
+ nonlocal done_ct
983
+ if progress_cb is None:
984
+ return
985
+ with done_lock:
986
+ done_ct += int(delta)
987
+ d = done_ct
988
+ progress_cb(d, total_ct, phase)
989
+
990
+ # ---- Chunking ----
991
+ idx_list = keep_idx.tolist()
992
+ if chunk_size is None:
993
+ chunk_size = max(8, int(np.ceil(len(idx_list) / float(workers * 2))))
994
+ chunks: list[list[int]] = [idx_list[i:i + chunk_size] for i in range(0, len(idx_list), chunk_size)]
995
+
996
+ if progress_cb:
997
+ progress_cb(0, total_ct, "Stack")
998
+
999
+ # ---- drizzle helpers ----
1000
+ if drizzle_on:
1001
+ from setiastro.saspro.legacy.numba_utils import (
1002
+ drizzle_deposit_numba_kernel_mono,
1003
+ drizzle_deposit_color_kernel,
1004
+ finalize_drizzle_2d,
1005
+ finalize_drizzle_3d,
1006
+ )
1007
+
1008
+ # map kernel string -> code used by your numba
1009
+ kernel_code = {"square": 0, "circle": 1, "gaussian": 2}[drizzle_kernel]
1010
+
1011
+ # If gaussian sigma isn't provided, use something tied to pixfrac.
1012
+ # Your numba interprets gaussian sigma as "sigma_out", and also enforces >= drop_shrink*0.5.
1013
+ if drizzle_sigma <= 1e-9:
1014
+ # a good practical default: sigma ~ pixfrac*0.5
1015
+ drizzle_sigma_eff = max(1e-3, float(drizzle_pixfrac) * 0.5)
1016
+ else:
1017
+ drizzle_sigma_eff = drizzle_sigma
1018
+
1019
+ H, W = int(acc_shape[0]), int(acc_shape[1])
1020
+ outH = int(round(H * drizzle_scale))
1021
+ outW = int(round(W * drizzle_scale))
1022
+
1023
+ # Identity transform from input pixels -> aligned/reference pixel coords
1024
+ # drizzle_factor applies the scale.
1025
+ T = np.zeros((2, 3), dtype=np.float32)
1026
+ T[0, 0] = 1.0
1027
+ T[1, 1] = 1.0
1028
+
1029
+ # ---- Worker: accumulate its own sum OR its own drizzle buffers ----
1030
+ def _stack_chunk(chunk: list[int]):
1031
+ src, owns = _ensure_source(source_obj, cache_items=0)
1032
+ try:
1033
+ if drizzle_on:
1034
+ if len(acc_shape) == 2:
1035
+ dbuf = np.zeros((outH, outW), dtype=np.float32)
1036
+ cbuf = np.zeros((outH, outW), dtype=np.float32)
1037
+ else:
1038
+ dbuf = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
1039
+ cbuf = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
1040
+ else:
1041
+ acc = np.zeros(acc_shape, dtype=np.float32)
1042
+ wacc = 0.0
1043
+
1044
+ for i in chunk:
1045
+ img = _get_frame(src, int(i), roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern).astype(np.float32, copy=False)
1046
+
1047
+ # Global prior (from Analyze)
1048
+ gdx = float(analysis.dx[int(i)]) if (analysis.dx is not None) else 0.0
1049
+ gdy = float(analysis.dy[int(i)]) if (analysis.dy is not None) else 0.0
1050
+
1051
+ # Global prior always first
1052
+ warped_g = _shift_image(img, gdx, gdy)
1053
+
1054
+ if cv2 is None or (not local_warp):
1055
+ warped = warped_g
1056
+ else:
1057
+ cur_m_g = _to_mono01(warped_g).astype(np.float32, copy=False)
1058
+
1059
+ ap_rdx, ap_rdy, ap_resp = _ap_phase_shifts_per_ap(
1060
+ ref_m, cur_m_g,
1061
+ ap_centers=ap_centers_all,
1062
+ ap_size=ap_size,
1063
+ max_dim=max_dim,
1064
+ )
1065
+ ap_cf = np.clip(ap_resp.astype(np.float32, copy=False), 0.0, 1.0)
1066
+
1067
+ keep = _reject_ap_outliers(ap_rdx, ap_rdy, ap_cf, z=3.5)
1068
+ if np.any(keep):
1069
+ ap_centers = ap_centers_all[keep]
1070
+ ap_dx_k = ap_rdx[keep]
1071
+ ap_dy_k = ap_rdy[keep]
1072
+ ap_cf_k = ap_cf[keep]
1073
+
1074
+ dx_field, dy_field = _dense_field_from_ap_shifts(
1075
+ warped_g.shape[0], warped_g.shape[1],
1076
+ ap_centers, ap_dx_k, ap_dy_k, ap_cf_k,
1077
+ grid=32, power=2.0, conf_floor=0.15,
1078
+ radius=float(ap_size) * 3.0,
1079
+ )
1080
+ warped = _warp_by_dense_field(warped_g, dx_field, dy_field)
1081
+ else:
1082
+ warped = warped_g
1083
+
1084
+ if drizzle_on:
1085
+ # deposit aligned frame into drizzle buffers
1086
+ fw = 1.0 # frame_weight (could later use quality weights)
1087
+ if warped.ndim == 2:
1088
+ drizzle_deposit_numba_kernel_mono(
1089
+ warped, T, dbuf, cbuf,
1090
+ drizzle_factor=drizzle_scale,
1091
+ drop_shrink=drizzle_pixfrac,
1092
+ frame_weight=fw,
1093
+ kernel_code=kernel_code,
1094
+ gaussian_sigma_or_radius=drizzle_sigma_eff,
1095
+ )
1096
+ else:
1097
+ drizzle_deposit_color_kernel(
1098
+ warped, T, dbuf, cbuf,
1099
+ drizzle_factor=drizzle_scale,
1100
+ drop_shrink=drizzle_pixfrac,
1101
+ frame_weight=fw,
1102
+ kernel_code=kernel_code,
1103
+ gaussian_sigma_or_radius=drizzle_sigma_eff,
1104
+ )
1105
+ else:
1106
+ acc += warped
1107
+ wacc += 1.0
1108
+
1109
+ _bump_progress(len(chunk), "Stack")
1110
+
1111
+ if drizzle_on:
1112
+ return dbuf, cbuf
1113
+ return acc, wacc
1114
+
1115
+ finally:
1116
+ if owns:
1117
+ try:
1118
+ src.close()
1119
+ except Exception:
1120
+ pass
1121
+
1122
+ # ---- Parallel run + reduce ----
1123
+ if drizzle_on:
1124
+ # reduce drizzle buffers
1125
+ if len(acc_shape) == 2:
1126
+ dbuf_total = np.zeros((outH, outW), dtype=np.float32)
1127
+ cbuf_total = np.zeros((outH, outW), dtype=np.float32)
1128
+ else:
1129
+ dbuf_total = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
1130
+ cbuf_total = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
1131
+
1132
+ with ThreadPoolExecutor(max_workers=workers) as ex:
1133
+ futs = [ex.submit(_stack_chunk, c) for c in chunks if c]
1134
+ for fut in as_completed(futs):
1135
+ db, cb = fut.result()
1136
+ dbuf_total += db
1137
+ cbuf_total += cb
1138
+
1139
+ # finalize
1140
+ if len(acc_shape) == 2:
1141
+ out = np.zeros((outH, outW), dtype=np.float32)
1142
+ finalize_drizzle_2d(dbuf_total, cbuf_total, out)
1143
+ else:
1144
+ out = np.zeros((outH, outW, acc_shape[2]), dtype=np.float32)
1145
+ finalize_drizzle_3d(dbuf_total, cbuf_total, out)
1146
+
1147
+ out = np.clip(out, 0.0, 1.0).astype(np.float32, copy=False)
1148
+
1149
+ else:
1150
+ acc_total = np.zeros(acc_shape, dtype=np.float32)
1151
+ wacc_total = 0.0
1152
+
1153
+ with ThreadPoolExecutor(max_workers=workers) as ex:
1154
+ futs = [ex.submit(_stack_chunk, c) for c in chunks if c]
1155
+ for fut in as_completed(futs):
1156
+ acc_c, w_c = fut.result()
1157
+ acc_total += acc_c
1158
+ wacc_total += float(w_c)
1159
+
1160
+ out = np.clip(acc_total / max(1e-6, wacc_total), 0.0, 1.0).astype(np.float32, copy=False)
1161
+
1162
+ diag = {
1163
+ "frames_total": int(n),
1164
+ "frames_kept": int(len(keep_idx)),
1165
+ "roi_used": roi,
1166
+ "track_mode": track_mode,
1167
+ "local_warp": bool(local_warp),
1168
+ "workers": int(workers),
1169
+ "chunk_size": int(chunk_size),
1170
+ "drizzle_scale": float(drizzle_scale),
1171
+ "drizzle_pixfrac": float(drizzle_pixfrac),
1172
+ "drizzle_kernel": str(drizzle_kernel),
1173
+ "drizzle_sigma": float(drizzle_sigma),
1174
+ }
1175
+ return out, diag
1176
+
1177
+ def _build_reference(
1178
+ src: PlanetaryFrameSource,
1179
+ *,
1180
+ order: np.ndarray,
1181
+ roi,
1182
+ debayer: bool,
1183
+ to_rgb: bool,
1184
+ ref_mode: str,
1185
+ ref_count: int,
1186
+ bayer_pattern=None,
1187
+ ) -> np.ndarray:
1188
+ """
1189
+ ref_mode:
1190
+ - "best_frame": return best single frame
1191
+ - "best_stack": return mean of best ref_count frames
1192
+ """
1193
+ best_idx = int(order[0])
1194
+ f0 = _get_frame(src, best_idx, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
1195
+ if ref_mode != "best_stack" or ref_count <= 1:
1196
+ return f0.astype(np.float32, copy=False)
1197
+
1198
+ k = int(max(2, min(ref_count, len(order))))
1199
+ acc = np.zeros_like(f0, dtype=np.float32)
1200
+ for j in range(k):
1201
+ idx = int(order[j])
1202
+ fr = _get_frame(src, idx, roi=roi, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb), bayer_pattern=bayer_pattern)
1203
+ acc += fr.astype(np.float32, copy=False)
1204
+ ref = acc / float(k)
1205
+ return np.clip(ref, 0.0, 1.0).astype(np.float32, copy=False)
1206
+
1207
+ def _cfg_get_source(cfg) -> Any:
1208
+ """
1209
+ Back-compat: prefer cfg.source (new), else cfg.ser_path (old).
1210
+ cfg.source may be:
1211
+ - path string (ser/avi/mp4/etc)
1212
+ - list of image paths
1213
+ - PlanetaryFrameSource
1214
+ """
1215
+ src = getattr(cfg, "source", None)
1216
+ if src is not None and src != "":
1217
+ return src
1218
+ return getattr(cfg, "ser_path", None)
1219
+
1220
+ def analyze_ser(
1221
+ cfg: SERStackConfig,
1222
+ *,
1223
+ debayer: bool = True,
1224
+ to_rgb: bool = False,
1225
+ smooth_sigma: float = 1.5, # kept for API compat
1226
+ thresh_pct: float = 92.0, # kept for API compat
1227
+ ref_mode: str = "best_frame", # "best_frame" or "best_stack"
1228
+ bayer_pattern: Optional[str] = None,
1229
+ ref_count: int = 5,
1230
+ max_dim: int = 512,
1231
+ progress_cb=None,
1232
+ workers: Optional[int] = None,
1233
+ ) -> AnalyzeResult:
1234
+ """
1235
+ Parallel analyze for *any* PlanetaryFrameSource (SER/AVI/MP4/images/sequence).
1236
+ - Pass 1: quality for every frame
1237
+ - Build reference:
1238
+ - planetary: best frame or best-N stack
1239
+ - surface: frame 0 (chronological anchor)
1240
+ - Autoplace APs (always)
1241
+ - Pass 2:
1242
+ - planetary: AP-based shift directly
1243
+ - surface:
1244
+ (A) coarse drift stabilization via ref-locked NCC+subpix (on a larger tracking ROI),
1245
+ (B) AP search+refine that follows coarse, with outlier rejection,
1246
+ (C) robust median -> final dx/dy/conf
1247
+ """
1248
+
1249
+ source_obj = _cfg_get_source(cfg)
1250
+ bpat = bayer_pattern or _cfg_bayer_pattern(cfg)
1251
+
1252
+ if not source_obj:
1253
+ raise ValueError("SERStackConfig.source/ser_path is empty")
1254
+
1255
+ # ---- open source + meta (single open) ----
1256
+ src0, owns0 = _ensure_source(source_obj, cache_items=2)
1257
+ try:
1258
+ meta = src0.meta
1259
+ base_roi = cfg.roi
1260
+ if base_roi is not None:
1261
+ base_roi = _clamp_roi_in_bounds(base_roi, meta.width, meta.height)
1262
+ n = int(meta.frames)
1263
+ if n <= 0:
1264
+ raise ValueError("Source contains no frames")
1265
+ src_w = int(meta.width)
1266
+ src_h = int(meta.height)
1267
+ finally:
1268
+ if owns0:
1269
+ try:
1270
+ src0.close()
1271
+ except Exception:
1272
+ pass
1273
+
1274
+ # ---- Worker count ----
1275
+ if workers is None:
1276
+ cpu = os.cpu_count() or 4
1277
+ workers = max(1, min(cpu, 48))
1278
+
1279
+ if cv2 is not None:
1280
+ try:
1281
+ cv2.setNumThreads(1)
1282
+ except Exception:
1283
+ pass
1284
+
1285
+ # ---- Surface tracking ROI (IMPORTANT for big drift) ----
1286
+ def _surface_tracking_roi() -> Optional[Tuple[int, int, int, int]]:
1287
+ if base_roi is None:
1288
+ return None # full frame
1289
+ margin = int(getattr(cfg, "surface_track_margin", 256))
1290
+ x, y, w, h = [int(v) for v in base_roi]
1291
+ x0 = max(0, x - margin)
1292
+ y0 = max(0, y - margin)
1293
+ x1 = min(src_w, x + w + margin)
1294
+ y1 = min(src_h, y + h + margin)
1295
+ return _clamp_roi_in_bounds((x0, y0, x1 - x0, y1 - y0), src_w, src_h)
1296
+
1297
+ roi_track = _surface_tracking_roi() if cfg.track_mode == "surface" else base_roi
1298
+ roi_used = base_roi # APs and final ref are in this coordinate system
1299
+
1300
+ # -------------------------------------------------------------------------
1301
+ # Pass 1: quality (use roi_used)
1302
+ # -------------------------------------------------------------------------
1303
+ quality = np.zeros((n,), dtype=np.float32)
1304
+ idxs = np.arange(n, dtype=np.int32)
1305
+ n_chunks = max(5, int(workers) * int(getattr(cfg, "progress_chunk_factor", 5)))
1306
+ n_chunks = max(1, min(int(n), n_chunks))
1307
+ chunks = np.array_split(idxs, n_chunks)
1308
+
1309
+ if progress_cb:
1310
+ progress_cb(0, n, "Quality")
1311
+
1312
+ def _q_chunk(chunk: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
1313
+ out_i: list[int] = []
1314
+ out_q: list[float] = []
1315
+ src, owns = _ensure_source(source_obj, cache_items=0)
1316
+ try:
1317
+ for i in chunk.tolist():
1318
+ img = _get_frame(
1319
+ src, int(i),
1320
+ roi=roi_used,
1321
+ debayer=debayer,
1322
+ to_float01=True,
1323
+ force_rgb=bool(to_rgb),
1324
+ bayer_pattern=bpat,
1325
+ )
1326
+ m = _downsample_mono01(img, max_dim=max_dim)
1327
+
1328
+ if cv2 is not None:
1329
+ lap = cv2.Laplacian(m, cv2.CV_32F, ksize=3)
1330
+ q = float(np.mean(np.abs(lap)))
1331
+ else:
1332
+ q = float(
1333
+ np.abs(m[:, 1:] - m[:, :-1]).mean() +
1334
+ np.abs(m[1:, :] - m[:-1, :]).mean()
1335
+ )
1336
+ out_i.append(int(i))
1337
+ out_q.append(q)
1338
+ finally:
1339
+ if owns:
1340
+ try:
1341
+ src.close()
1342
+ except Exception:
1343
+ pass
1344
+ return np.asarray(out_i, np.int32), np.asarray(out_q, np.float32)
1345
+
1346
+ done_ct = 0
1347
+ with ThreadPoolExecutor(max_workers=workers) as ex:
1348
+ futs = [ex.submit(_q_chunk, c) for c in chunks if c.size > 0]
1349
+ for fut in as_completed(futs):
1350
+ ii, qq = fut.result()
1351
+ quality[ii] = qq
1352
+ done_ct += int(ii.size)
1353
+ if progress_cb:
1354
+ progress_cb(done_ct, n, "Quality")
1355
+
1356
+ order = np.argsort(-quality).astype(np.int32, copy=False)
1357
+
1358
+ # -------------------------------------------------------------------------
1359
+ # Build reference
1360
+ # -------------------------------------------------------------------------
1361
+ ref_count = int(max(1, min(int(ref_count), n)))
1362
+ ref_mode = "best_stack" if ref_mode == "best_stack" else "best_frame"
1363
+
1364
+ src_ref, owns_ref = _ensure_source(source_obj, cache_items=2)
1365
+ if progress_cb:
1366
+ progress_cb(0, n, f"Building reference ({ref_mode}, N={ref_count})…")
1367
+ try:
1368
+ if cfg.track_mode == "surface":
1369
+ # Surface ref must be frame 0 in roi_used coords
1370
+ ref_img = _get_frame(
1371
+ src_ref, 0,
1372
+ roi=roi_used, debayer=debayer, to_float01=True, force_rgb=bool(to_rgb),
1373
+ bayer_pattern=bpat,
1374
+ ).astype(np.float32, copy=False)
1375
+
1376
+ ref_mode = "first_frame"
1377
+ ref_count = 1
1378
+ else:
1379
+ ref_img = _build_reference(
1380
+ src_ref,
1381
+ order=order,
1382
+ roi=roi_used,
1383
+ debayer=debayer,
1384
+ to_rgb=to_rgb,
1385
+ ref_mode=ref_mode,
1386
+ ref_count=ref_count,
1387
+ bayer_pattern=bpat, # ✅ add this
1388
+ ).astype(np.float32, copy=False)
1389
+
1390
+ finally:
1391
+ if owns_ref:
1392
+ try:
1393
+ src_ref.close()
1394
+ except Exception:
1395
+ pass
1396
+
1397
+ # -------------------------------------------------------------------------
1398
+ # Autoplace APs (always)
1399
+ # -------------------------------------------------------------------------
1400
+ if progress_cb:
1401
+ progress_cb(0, n, "Placing alignment points…")
1402
+
1403
+ ap_size = int(getattr(cfg, "ap_size", 64) or 64)
1404
+ ap_centers = _autoplace_aps(
1405
+ ref_img,
1406
+ ap_size=ap_size,
1407
+ ap_spacing=int(getattr(cfg, "ap_spacing", 48)),
1408
+ ap_min_mean=float(getattr(cfg, "ap_min_mean", 0.03)),
1409
+ )
1410
+
1411
+ # -------------------------------------------------------------------------
1412
+ # Pass 2: shifts/conf
1413
+ # -------------------------------------------------------------------------
1414
+ dx = np.zeros((n,), dtype=np.float32)
1415
+ dy = np.zeros((n,), dtype=np.float32)
1416
+ conf = np.ones((n,), dtype=np.float32)
1417
+ coarse_conf: Optional[np.ndarray] = None
1418
+
1419
+ if cfg.track_mode == "off" or cv2 is None:
1420
+ return AnalyzeResult(
1421
+ frames_total=n,
1422
+ roi_used=roi_used,
1423
+ track_mode=cfg.track_mode,
1424
+ quality=quality,
1425
+ dx=dx,
1426
+ dy=dy,
1427
+ conf=conf,
1428
+ order=order,
1429
+ ref_mode=ref_mode,
1430
+ ref_count=ref_count,
1431
+ ref_image=ref_img,
1432
+ ap_centers=ap_centers,
1433
+ ap_size=ap_size,
1434
+ ap_multiscale=bool(getattr(cfg, "ap_multiscale", False)),
1435
+ coarse_conf=None,
1436
+ )
1437
+
1438
+ ref_m_full = _to_mono01(ref_img).astype(np.float32, copy=False)
1439
+ use_multiscale = bool(getattr(cfg, "ap_multiscale", False))
1440
+
1441
+ # ---- surface coarse drift (ref-locked) ----
1442
+ if cfg.track_mode == "surface":
1443
+ coarse_conf = np.zeros((n,), dtype=np.float32)
1444
+ if progress_cb:
1445
+ progress_cb(0, n, "Surface: coarse drift (ref-locked NCC+subpix)…")
1446
+
1447
+ dx_chain, dy_chain, cc_chain = _coarse_surface_ref_locked(
1448
+ source_obj,
1449
+ n=n,
1450
+ roi=roi_track,
1451
+ roi_used=roi_used, # ✅ NEW
1452
+ debayer=debayer,
1453
+ to_rgb=to_rgb,
1454
+ bayer_pattern=bpat,
1455
+ progress_cb=progress_cb,
1456
+ progress_every=25,
1457
+ down=2,
1458
+ template_size=256,
1459
+ search_radius=96,
1460
+ bandpass=True,
1461
+ workers=min(workers, 8), # coarse doesn’t need 48; 4–8 is usually ideal
1462
+ stride=16, # 8–32 typical
1463
+ )
1464
+ dx[:] = dx_chain
1465
+ dy[:] = dy_chain
1466
+ coarse_conf[:] = cc_chain
1467
+
1468
+ # ---- chunked refine ----
1469
+ idxs2 = np.arange(n, dtype=np.int32)
1470
+
1471
+ # More/smaller chunks => progress updates sooner (futures complete more frequently)
1472
+ chunk_factor = int(getattr(cfg, "progress_chunk_factor", 5)) # optional knob
1473
+ min_chunks = 5
1474
+ n_chunks2 = max(min_chunks, int(workers) * chunk_factor)
1475
+ n_chunks2 = max(1, min(int(n), n_chunks2))
1476
+
1477
+ chunks2 = np.array_split(idxs2, n_chunks2)
1478
+
1479
+ if progress_cb:
1480
+ progress_cb(0, n, "SSD Refine")
1481
+
1482
+ if cfg.track_mode == "surface":
1483
+ # FAST surface refine:
1484
+ # - use coarse dx/dy from ref-locked tracker
1485
+ # - apply coarse shift to current mono frame
1486
+ # - compute residual per-AP phase shifts (NO SEARCH)
1487
+ # - final dx/dy = coarse + median(residual)
1488
+ def _shift_chunk(chunk: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
1489
+ out_i: list[int] = []
1490
+ out_dx: list[float] = []
1491
+ out_dy: list[float] = []
1492
+ out_cf: list[float] = []
1493
+
1494
+ src, owns = _ensure_source(source_obj, cache_items=0)
1495
+ try:
1496
+ for i in chunk.tolist():
1497
+ img = _get_frame(
1498
+ src, int(i),
1499
+ roi=roi_used,
1500
+ debayer=debayer,
1501
+ to_float01=True,
1502
+ force_rgb=bool(to_rgb),
1503
+ bayer_pattern=bpat,
1504
+ )
1505
+ cur_m = _to_mono01(img).astype(np.float32, copy=False)
1506
+
1507
+ coarse_dx = float(dx[int(i)])
1508
+ coarse_dy = float(dy[int(i)])
1509
+
1510
+ # Apply coarse shift FIRST (so APs line up without any searching)
1511
+ cur_m_g = _shift_image(cur_m, coarse_dx, coarse_dy)
1512
+
1513
+ if use_multiscale:
1514
+ s2, s1, s05 = _scaled_ap_sizes(ap_size)
1515
+
1516
+ def _one_scale(s_ap: int):
1517
+ rdx, rdy, resp = _ap_phase_shifts_per_ap(
1518
+ ref_m_full, cur_m_g,
1519
+ ap_centers=ap_centers,
1520
+ ap_size=s_ap,
1521
+ max_dim=max_dim,
1522
+ )
1523
+ cf = np.clip(resp.astype(np.float32, copy=False), 0.0, 1.0)
1524
+ keep = _reject_ap_outliers(rdx, rdy, cf, z=3.5)
1525
+ if not np.any(keep):
1526
+ return 0.0, 0.0, 0.25
1527
+ dx_r = float(np.median(rdx[keep]))
1528
+ dy_r = float(np.median(rdy[keep]))
1529
+ cf_r = float(np.median(cf[keep]))
1530
+ return dx_r, dy_r, cf_r
1531
+
1532
+ dx2, dy2, cf2 = _one_scale(s2)
1533
+ dx1, dy1, cf1 = _one_scale(s1)
1534
+ dx0, dy0, cf0 = _one_scale(s05)
1535
+
1536
+ w2 = max(1e-3, float(cf2)) * 1.25
1537
+ w1 = max(1e-3, float(cf1)) * 1.00
1538
+ w0 = max(1e-3, float(cf0)) * 0.85
1539
+ wsum = (w2 + w1 + w0)
1540
+
1541
+ dx_res = (w2 * dx2 + w1 * dx1 + w0 * dx0) / wsum
1542
+ dy_res = (w2 * dy2 + w1 * dy1 + w0 * dy0) / wsum
1543
+ cf_ap = float(np.clip((w2 * cf2 + w1 * cf1 + w0 * cf0) / wsum, 0.0, 1.0))
1544
+ else:
1545
+ rdx, rdy, resp = _ap_phase_shifts_per_ap(
1546
+ ref_m_full, cur_m_g,
1547
+ ap_centers=ap_centers,
1548
+ ap_size=ap_size,
1549
+ max_dim=max_dim,
1550
+ )
1551
+ cf = np.clip(resp.astype(np.float32, copy=False), 0.0, 1.0)
1552
+ keep = _reject_ap_outliers(rdx, rdy, cf, z=3.5)
1553
+ if np.any(keep):
1554
+ dx_res = float(np.median(rdx[keep]))
1555
+ dy_res = float(np.median(rdy[keep]))
1556
+ cf_ap = float(np.median(cf[keep]))
1557
+ else:
1558
+ dx_res, dy_res, cf_ap = 0.0, 0.0, 0.25
1559
+
1560
+ # Final = coarse + residual (residual is relative to coarse-shifted frame)
1561
+ # Final = coarse + residual (residual is relative to coarse-shifted frame)
1562
+ dx_i = float(coarse_dx + dx_res)
1563
+ dy_i = float(coarse_dy + dy_res)
1564
+
1565
+ # Final lock-in refinement: minimize (ref-cur)^2 on gradients in a tiny window
1566
+ # NOTE: pass *unshifted* cur_m with the current dx_i/dy_i estimate
1567
+ dxr, dyr, c_ssd = _refine_shift_ssd(
1568
+ ref_m_full, cur_m, dx_i, dy_i,
1569
+ radius=5, crop=0.80,
1570
+ bruteforce=bool(getattr(cfg, "ssd_refine_bruteforce", False)),
1571
+ )
1572
+ dx_i += float(dxr)
1573
+ dy_i += float(dyr)
1574
+
1575
+ # Confidence: combine coarse + AP, then optionally nudge with SSD
1576
+ cc = float(coarse_conf[int(i)]) if coarse_conf is not None else 0.5
1577
+ cf_i = float(np.clip(0.60 * cc + 0.40 * float(cf_ap), 0.0, 1.0))
1578
+ cf_i = float(np.clip(0.85 * cf_i + 0.15 * float(c_ssd), 0.05, 1.0))
1579
+
1580
+ out_i.append(int(i))
1581
+ out_dx.append(dx_i)
1582
+ out_dy.append(dy_i)
1583
+ out_cf.append(cf_i)
1584
+ finally:
1585
+ if owns:
1586
+ try:
1587
+ src.close()
1588
+ except Exception:
1589
+ pass
1590
+
1591
+ return (
1592
+ np.asarray(out_i, np.int32),
1593
+ np.asarray(out_dx, np.float32),
1594
+ np.asarray(out_dy, np.float32),
1595
+ np.asarray(out_cf, np.float32),
1596
+ )
1597
+
1598
+ else:
1599
+ # planetary: centroid tracking (same as viewer) for GLOBAL dx/dy/conf
1600
+ # APs are still computed and used later by stack_ser for local_warp residuals.
1601
+ tracker = PlanetaryTracker(
1602
+ smooth_sigma=float(getattr(cfg, "planet_smooth_sigma", smooth_sigma)),
1603
+ thresh_pct=float(getattr(cfg, "planet_thresh_pct", thresh_pct)),
1604
+ )
1605
+
1606
+ # IMPORTANT: reference center is computed from the SAME reference image that Analyze chose
1607
+ ref_cx, ref_cy, ref_cc = tracker.compute_center(ref_img)
1608
+ if ref_cc <= 0.0:
1609
+ # fallback: center of ROI
1610
+ mref = _to_mono01(ref_img)
1611
+ ref_cx = float(mref.shape[1] * 0.5)
1612
+ ref_cy = float(mref.shape[0] * 0.5)
1613
+
1614
+ ref_center = (float(ref_cx), float(ref_cy))
1615
+ ref_m_full = _to_mono01(ref_img).astype(np.float32, copy=False)
1616
+
1617
+ def _shift_chunk(chunk: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
1618
+ out_i: list[int] = []
1619
+ out_dx: list[float] = []
1620
+ out_dy: list[float] = []
1621
+ out_cf: list[float] = []
1622
+
1623
+ src, owns = _ensure_source(source_obj, cache_items=0)
1624
+ try:
1625
+ for i in chunk.tolist():
1626
+ img = _get_frame(
1627
+ src, int(i),
1628
+ roi=roi_used,
1629
+ debayer=debayer,
1630
+ to_float01=True,
1631
+ force_rgb=bool(to_rgb),
1632
+ bayer_pattern=bpat,
1633
+ )
1634
+
1635
+ dx_i, dy_i, cf_i = tracker.shift_to_ref(img, ref_center)
1636
+
1637
+ if float(cf_i) >= 0.25:
1638
+ cur_m = _to_mono01(img).astype(np.float32, copy=False)
1639
+ dxr, dyr, c_ssd = _refine_shift_ssd(
1640
+ ref_m_full, cur_m, dx_i, dy_i,
1641
+ radius=5, crop=0.80,
1642
+ bruteforce=bool(getattr(cfg, "ssd_refine_bruteforce", False)),
1643
+ )
1644
+
1645
+ dx_i = float(dx_i) + dxr
1646
+ dy_i = float(dy_i) + dyr
1647
+ cf_i = float(np.clip(0.85 * float(cf_i) + 0.15 * c_ssd, 0.05, 1.0))
1648
+ out_i.append(int(i))
1649
+ out_dx.append(float(dx_i))
1650
+ out_dy.append(float(dy_i))
1651
+ out_cf.append(float(cf_i))
1652
+ finally:
1653
+ if owns:
1654
+ try:
1655
+ src.close()
1656
+ except Exception:
1657
+ pass
1658
+
1659
+ return (
1660
+ np.asarray(out_i, np.int32),
1661
+ np.asarray(out_dx, np.float32),
1662
+ np.asarray(out_dy, np.float32),
1663
+ np.asarray(out_cf, np.float32),
1664
+ )
1665
+
1666
+
1667
+ done_ct = 0
1668
+ with ThreadPoolExecutor(max_workers=workers) as ex:
1669
+ futs = [ex.submit(_shift_chunk, c) for c in chunks2 if c.size > 0]
1670
+ for fut in as_completed(futs):
1671
+ ii, ddx, ddy, ccf = fut.result()
1672
+ dx[ii] = ddx
1673
+ dy[ii] = ddy
1674
+ conf[ii] = np.clip(ccf, 0.05, 1.0).astype(np.float32, copy=False)
1675
+
1676
+ done_ct += int(ii.size)
1677
+ if progress_cb:
1678
+ progress_cb(done_ct, n, "SSD Refine")
1679
+
1680
+ if cfg.track_mode == "surface":
1681
+ _print_surface_debug(dx=dx, dy=dy, conf=conf, coarse_conf=coarse_conf, floor=0.05, prefix="[SER][Surface]")
1682
+
1683
+ return AnalyzeResult(
1684
+ frames_total=n,
1685
+ roi_used=roi_used,
1686
+ track_mode=cfg.track_mode,
1687
+ quality=quality,
1688
+ dx=dx,
1689
+ dy=dy,
1690
+ conf=conf,
1691
+ order=order,
1692
+ ref_mode=ref_mode,
1693
+ ref_count=ref_count,
1694
+ ref_image=ref_img,
1695
+ ap_centers=ap_centers,
1696
+ ap_size=ap_size,
1697
+ ap_multiscale=use_multiscale,
1698
+ coarse_conf=coarse_conf,
1699
+ )
1700
+
1701
+
1702
+ def realign_ser(
1703
+ cfg: SERStackConfig,
1704
+ analysis: AnalyzeResult,
1705
+ *,
1706
+ debayer: bool = True,
1707
+ to_rgb: bool = False,
1708
+ max_dim: int = 512,
1709
+ progress_cb=None,
1710
+ bayer_pattern: Optional[str] = None,
1711
+ workers: Optional[int] = None,
1712
+ ) -> AnalyzeResult:
1713
+ """
1714
+ Recompute dx/dy/conf only using analysis.ref_image and analysis.ap_centers.
1715
+ Keeps quality/order/ref_image unchanged.
1716
+
1717
+ Surface mode:
1718
+ - recompute coarse drift (ref-locked) on roi_track
1719
+ - refine via AP search+refine FOLLOWING coarse + outlier rejection
1720
+ """
1721
+ bpat = bayer_pattern or _cfg_bayer_pattern(cfg)
1722
+
1723
+ if analysis is None:
1724
+ raise ValueError("analysis is None")
1725
+ if analysis.ref_image is None:
1726
+ raise ValueError("analysis.ref_image is missing")
1727
+
1728
+ source_obj = _cfg_get_source(cfg)
1729
+ if not source_obj:
1730
+ raise ValueError("SERStackConfig.source/ser_path is empty")
1731
+
1732
+ n = int(analysis.frames_total)
1733
+ roi_used = analysis.roi_used
1734
+ ref_img = analysis.ref_image
1735
+
1736
+ if cfg.track_mode == "off" or cv2 is None:
1737
+ analysis.dx = np.zeros((n,), dtype=np.float32)
1738
+ analysis.dy = np.zeros((n,), dtype=np.float32)
1739
+ analysis.conf = np.ones((n,), dtype=np.float32)
1740
+ if hasattr(analysis, "coarse_conf"):
1741
+ analysis.coarse_conf = None
1742
+ return analysis
1743
+
1744
+ # Ensure AP centers exist
1745
+ ap_centers = getattr(analysis, "ap_centers", None)
1746
+ if ap_centers is None or np.asarray(ap_centers).size == 0:
1747
+ ap_centers = _autoplace_aps(
1748
+ ref_img,
1749
+ ap_size=int(getattr(cfg, "ap_size", 64)),
1750
+ ap_spacing=int(getattr(cfg, "ap_spacing", 48)),
1751
+ ap_min_mean=float(getattr(cfg, "ap_min_mean", 0.03)),
1752
+ )
1753
+ analysis.ap_centers = ap_centers
1754
+
1755
+ if workers is None:
1756
+ cpu = os.cpu_count() or 4
1757
+ workers = max(1, min(cpu, 48))
1758
+
1759
+ if cv2 is not None:
1760
+ try:
1761
+ cv2.setNumThreads(1)
1762
+ except Exception:
1763
+ pass
1764
+
1765
+ # Need meta for ROI expansion (surface tracking)
1766
+ src0, owns0 = _ensure_source(source_obj, cache_items=2)
1767
+ try:
1768
+ meta = src0.meta
1769
+ src_w = int(meta.width)
1770
+ src_h = int(meta.height)
1771
+ finally:
1772
+ if owns0:
1773
+ try:
1774
+ src0.close()
1775
+ except Exception:
1776
+ pass
1777
+
1778
+ def _surface_tracking_roi() -> Optional[Tuple[int, int, int, int]]:
1779
+ if roi_used is None:
1780
+ return None
1781
+ margin = int(getattr(cfg, "surface_track_margin", 256))
1782
+ x, y, w, h = [int(v) for v in roi_used]
1783
+ x0 = max(0, x - margin)
1784
+ y0 = max(0, y - margin)
1785
+ x1 = min(src_w, x + w + margin)
1786
+ y1 = min(src_h, y + h + margin)
1787
+ return _clamp_roi_in_bounds((x0, y0, x1 - x0, y1 - y0), src_w, src_h)
1788
+
1789
+ roi_track = _surface_tracking_roi() if cfg.track_mode == "surface" else roi_used
1790
+
1791
+ # ---- chunked refine ----
1792
+ idxs2 = np.arange(n, dtype=np.int32)
1793
+
1794
+ # More/smaller chunks => progress updates sooner (futures complete more frequently)
1795
+ chunk_factor = int(getattr(cfg, "progress_chunk_factor", 5)) # optional knob
1796
+ min_chunks = 5
1797
+ n_chunks2 = max(min_chunks, int(workers) * chunk_factor)
1798
+ n_chunks2 = max(1, min(int(n), n_chunks2))
1799
+
1800
+ chunks2 = np.array_split(idxs2, n_chunks2)
1801
+
1802
+ dx = np.zeros((n,), dtype=np.float32)
1803
+ dy = np.zeros((n,), dtype=np.float32)
1804
+ conf = np.ones((n,), dtype=np.float32)
1805
+
1806
+ ref_m = _to_mono01(ref_img).astype(np.float32, copy=False)
1807
+
1808
+ ap_size = int(getattr(cfg, "ap_size", 64) or 64)
1809
+ use_multiscale = bool(getattr(cfg, "ap_multiscale", False))
1810
+
1811
+ coarse_conf: Optional[np.ndarray] = None
1812
+ if cfg.track_mode == "surface":
1813
+ coarse_conf = np.zeros((n,), dtype=np.float32)
1814
+ if progress_cb:
1815
+ progress_cb(0, n, "Surface: coarse drift (ref-locked NCC+subpix)…")
1816
+
1817
+ dx_chain, dy_chain, cc_chain = _coarse_surface_ref_locked(
1818
+ source_obj,
1819
+ n=n,
1820
+ roi=roi_track,
1821
+ roi_used=roi_used, # ✅ NEW
1822
+ debayer=debayer,
1823
+ to_rgb=to_rgb,
1824
+ bayer_pattern=bpat,
1825
+ progress_cb=progress_cb,
1826
+ progress_every=25,
1827
+ down=2,
1828
+ template_size=256,
1829
+ search_radius=96,
1830
+ bandpass=True,
1831
+ workers=min(workers, 8), # coarse doesn’t need 48; 4–8 is usually ideal
1832
+ stride=16, # 8–32 typical
1833
+ )
1834
+
1835
+ dx[:] = dx_chain
1836
+ dy[:] = dy_chain
1837
+ coarse_conf[:] = cc_chain
1838
+
1839
+ if progress_cb:
1840
+ progress_cb(0, n, "SSD Refine")
1841
+
1842
+ if cfg.track_mode == "surface":
1843
+ def _shift_chunk(chunk: np.ndarray):
1844
+ out_i: list[int] = []
1845
+ out_dx: list[float] = []
1846
+ out_dy: list[float] = []
1847
+ out_cf: list[float] = []
1848
+ out_cc: list[float] = []
1849
+
1850
+ src, owns = _ensure_source(source_obj, cache_items=0)
1851
+ try:
1852
+ for i in chunk.tolist():
1853
+ img = _get_frame(
1854
+ src, int(i),
1855
+ roi=roi_used,
1856
+ debayer=debayer,
1857
+ to_float01=True,
1858
+ force_rgb=bool(to_rgb),
1859
+ bayer_pattern=bpat,
1860
+ )
1861
+ cur_m = _to_mono01(img).astype(np.float32, copy=False)
1862
+
1863
+ coarse_dx = float(dx[int(i)])
1864
+ coarse_dy = float(dy[int(i)])
1865
+ cc = float(coarse_conf[int(i)]) if coarse_conf is not None else 0.5
1866
+
1867
+ # Apply coarse shift first
1868
+ cur_m_g = _shift_image(cur_m, coarse_dx, coarse_dy)
1869
+
1870
+ if use_multiscale:
1871
+ s2, s1, s05 = _scaled_ap_sizes(ap_size)
1872
+
1873
+ def _one_scale(s_ap: int):
1874
+ rdx, rdy, resp = _ap_phase_shifts_per_ap(
1875
+ ref_m, cur_m_g,
1876
+ ap_centers=ap_centers,
1877
+ ap_size=s_ap,
1878
+ max_dim=max_dim,
1879
+ )
1880
+ cf = np.clip(resp.astype(np.float32, copy=False), 0.0, 1.0)
1881
+ keep = _reject_ap_outliers(rdx, rdy, cf, z=3.5)
1882
+ if not np.any(keep):
1883
+ return 0.0, 0.0, 0.25
1884
+ return (
1885
+ float(np.median(rdx[keep])),
1886
+ float(np.median(rdy[keep])),
1887
+ float(np.median(cf[keep])),
1888
+ )
1889
+
1890
+ dx2, dy2, cf2 = _one_scale(s2)
1891
+ dx1, dy1, cf1 = _one_scale(s1)
1892
+ dx0, dy0, cf0 = _one_scale(s05)
1893
+
1894
+ w2 = max(1e-3, float(cf2)) * 1.25
1895
+ w1 = max(1e-3, float(cf1)) * 1.00
1896
+ w0 = max(1e-3, float(cf0)) * 0.85
1897
+ wsum = (w2 + w1 + w0)
1898
+
1899
+ dx_res = (w2 * dx2 + w1 * dx1 + w0 * dx0) / wsum
1900
+ dy_res = (w2 * dy2 + w1 * dy1 + w0 * dy0) / wsum
1901
+ cf_ap = float(np.clip((w2 * cf2 + w1 * cf1 + w0 * cf0) / wsum, 0.0, 1.0))
1902
+ else:
1903
+ rdx, rdy, resp = _ap_phase_shifts_per_ap(
1904
+ ref_m, cur_m_g,
1905
+ ap_centers=ap_centers,
1906
+ ap_size=ap_size,
1907
+ max_dim=max_dim,
1908
+ )
1909
+ cf = np.clip(resp.astype(np.float32, copy=False), 0.0, 1.0)
1910
+ keep = _reject_ap_outliers(rdx, rdy, cf, z=3.5)
1911
+ if np.any(keep):
1912
+ dx_res = float(np.median(rdx[keep]))
1913
+ dy_res = float(np.median(rdy[keep]))
1914
+ cf_ap = float(np.median(cf[keep]))
1915
+ else:
1916
+ dx_res, dy_res, cf_ap = 0.0, 0.0, 0.25
1917
+
1918
+ # Final = coarse + residual (residual is relative to coarse-shifted frame)
1919
+ dx_i = float(coarse_dx + dx_res)
1920
+ dy_i = float(coarse_dy + dy_res)
1921
+
1922
+ # Final lock-in refinement: minimize (ref-cur)^2 on gradients in a tiny window
1923
+ # NOTE: pass *unshifted* cur_m with the current dx_i/dy_i estimate
1924
+ dxr, dyr, c_ssd = _refine_shift_ssd(ref_m, cur_m, dx_i, dy_i, radius=5, crop=0.80, bruteforce=bool(getattr(cfg, "ssd_refine_bruteforce", False)))
1925
+ dx_i += float(dxr)
1926
+ dy_i += float(dyr)
1927
+
1928
+ # Confidence: combine coarse + AP, then optionally nudge with SSD
1929
+ cc = float(coarse_conf[int(i)]) if coarse_conf is not None else 0.5
1930
+ cf_i = float(np.clip(0.60 * cc + 0.40 * float(cf_ap), 0.0, 1.0))
1931
+ cf_i = float(np.clip(0.85 * cf_i + 0.15 * float(c_ssd), 0.05, 1.0))
1932
+
1933
+
1934
+ out_i.append(int(i))
1935
+ out_dx.append(dx_i)
1936
+ out_dy.append(dy_i)
1937
+ out_cf.append(cf_i)
1938
+ out_cc.append(float(cc))
1939
+ finally:
1940
+ if owns:
1941
+ try:
1942
+ src.close()
1943
+ except Exception:
1944
+ pass
1945
+
1946
+ return (
1947
+ np.asarray(out_i, np.int32),
1948
+ np.asarray(out_dx, np.float32),
1949
+ np.asarray(out_dy, np.float32),
1950
+ np.asarray(out_cf, np.float32),
1951
+ np.asarray(out_cc, np.float32),
1952
+ )
1953
+
1954
+ else:
1955
+ # planetary: centroid tracking (same as viewer)
1956
+ tracker = PlanetaryTracker(
1957
+ smooth_sigma=float(getattr(cfg, "planet_smooth_sigma", 1.5)),
1958
+ thresh_pct=float(getattr(cfg, "planet_thresh_pct", 92.0)),
1959
+ )
1960
+
1961
+ # Reference center comes from analysis.ref_image (same anchor as analyze_ser)
1962
+ ref_cx, ref_cy, ref_cc = tracker.compute_center(ref_img)
1963
+ if ref_cc <= 0.0:
1964
+ mref = _to_mono01(ref_img)
1965
+ ref_cx = float(mref.shape[1] * 0.5)
1966
+ ref_cy = float(mref.shape[0] * 0.5)
1967
+
1968
+ ref_center = (float(ref_cx), float(ref_cy))
1969
+ ref_m_full = _to_mono01(ref_img).astype(np.float32, copy=False)
1970
+
1971
+ def _shift_chunk(chunk: np.ndarray):
1972
+ out_i: list[int] = []
1973
+ out_dx: list[float] = []
1974
+ out_dy: list[float] = []
1975
+ out_cf: list[float] = []
1976
+
1977
+ src, owns = _ensure_source(source_obj, cache_items=0)
1978
+ try:
1979
+ for i in chunk.tolist():
1980
+ img = _get_frame(
1981
+ src, int(i),
1982
+ roi=roi_used,
1983
+ debayer=debayer,
1984
+ to_float01=True,
1985
+ force_rgb=bool(to_rgb),
1986
+ bayer_pattern=bpat,
1987
+ )
1988
+
1989
+ dx_i, dy_i, cf_i = tracker.shift_to_ref(img, ref_center)
1990
+
1991
+ if float(cf_i) >= 0.25:
1992
+ cur_m = _to_mono01(img).astype(np.float32, copy=False)
1993
+ dxr, dyr, c_ssd = _refine_shift_ssd(ref_m_full, cur_m, float(dx_i), float(dy_i), radius=2, crop=0.80, bruteforce=bool(getattr(cfg, "ssd_refine_bruteforce", False)))
1994
+ dx_i = float(dx_i) + dxr
1995
+ dy_i = float(dy_i) + dyr
1996
+ cf_i = float(np.clip(0.85 * float(cf_i) + 0.15 * c_ssd, 0.05, 1.0))
1997
+ out_i.append(int(i))
1998
+ out_dx.append(float(dx_i))
1999
+ out_dy.append(float(dy_i))
2000
+ out_cf.append(float(cf_i))
2001
+
2002
+ finally:
2003
+ if owns:
2004
+ try:
2005
+ src.close()
2006
+ except Exception:
2007
+ pass
2008
+
2009
+ return (
2010
+ np.asarray(out_i, np.int32),
2011
+ np.asarray(out_dx, np.float32),
2012
+ np.asarray(out_dy, np.float32),
2013
+ np.asarray(out_cf, np.float32),
2014
+ )
2015
+
2016
+
2017
+ done_ct = 0
2018
+ with ThreadPoolExecutor(max_workers=workers) as ex:
2019
+ futs = [ex.submit(_shift_chunk, c) for c in chunks2 if c.size > 0]
2020
+ for fut in as_completed(futs):
2021
+ if cfg.track_mode == "surface":
2022
+ ii, ddx, ddy, ccf, ccc = fut.result()
2023
+ if coarse_conf is not None:
2024
+ coarse_conf[ii] = ccc
2025
+ else:
2026
+ ii, ddx, ddy, ccf = fut.result()
2027
+
2028
+ dx[ii] = ddx
2029
+ dy[ii] = ddy
2030
+ conf[ii] = np.clip(ccf, 0.05, 1.0).astype(np.float32, copy=False)
2031
+
2032
+ done_ct += int(ii.size)
2033
+ if progress_cb:
2034
+ progress_cb(done_ct, n, "SSD Refine")
2035
+
2036
+ analysis.dx = dx
2037
+ analysis.dy = dy
2038
+ analysis.conf = conf
2039
+ if hasattr(analysis, "coarse_conf"):
2040
+ analysis.coarse_conf = coarse_conf
2041
+
2042
+ if cfg.track_mode == "surface":
2043
+ _print_surface_debug(dx=dx, dy=dy, conf=conf, coarse_conf=coarse_conf, floor=0.05, prefix="[SER][Surface][realign]")
2044
+
2045
+ return analysis
2046
+
2047
+ def _autoplace_aps(ref_img01: np.ndarray, ap_size: int, ap_spacing: int, ap_min_mean: float) -> np.ndarray:
2048
+ """
2049
+ Return AP centers as int32 array of shape (M,2) with columns (cx, cy) in ROI coords.
2050
+ We grid-scan by spacing and keep patches whose mean brightness exceeds ap_min_mean.
2051
+ """
2052
+ m = _to_mono01(ref_img01).astype(np.float32, copy=False)
2053
+ H, W = m.shape[:2]
2054
+ s = int(max(16, ap_size))
2055
+ step = int(max(4, ap_spacing))
2056
+
2057
+ half = s // 2
2058
+ xs = list(range(half, max(half + 1, W - half), step))
2059
+ ys = list(range(half, max(half + 1, H - half), step))
2060
+
2061
+ pts = []
2062
+ for cy in ys:
2063
+ y0 = cy - half
2064
+ y1 = y0 + s
2065
+ if y0 < 0 or y1 > H:
2066
+ continue
2067
+ for cx in xs:
2068
+ x0 = cx - half
2069
+ x1 = x0 + s
2070
+ if x0 < 0 or x1 > W:
2071
+ continue
2072
+ patch = m[y0:y1, x0:x1]
2073
+ if float(patch.mean()) >= float(ap_min_mean):
2074
+ pts.append((cx, cy))
2075
+
2076
+ if not pts:
2077
+ # absolute fallback: a single center point (behaves like single-point)
2078
+ pts = [(W // 2, H // 2)]
2079
+
2080
+ return np.asarray(pts, dtype=np.int32)
2081
+
2082
+ def _scaled_ap_sizes(base: int) -> tuple[int, int, int]:
2083
+ b = int(base)
2084
+ s2 = int(round(b * 2.0))
2085
+ s1 = int(round(b * 1.0))
2086
+ s05 = int(round(b * 0.5))
2087
+ # clamp to sane limits
2088
+ s2 = max(16, min(256, s2))
2089
+ s1 = max(16, min(256, s1))
2090
+ s05 = max(16, min(256, s05))
2091
+ return s2, s1, s05
2092
+
2093
+ def _dense_field_from_ap_shifts(
2094
+ H: int, W: int,
2095
+ ap_centers: np.ndarray, # (M,2)
2096
+ ap_dx: np.ndarray, # (M,)
2097
+ ap_dy: np.ndarray, # (M,)
2098
+ ap_cf: np.ndarray, # (M,)
2099
+ *,
2100
+ grid: int = 32, # coarse grid resolution (32 or 48 are good)
2101
+ power: float = 2.0,
2102
+ conf_floor: float = 0.15,
2103
+ radius: float | None = None, # optional clamp in pixels (ROI coords)
2104
+ ) -> tuple[np.ndarray, np.ndarray]:
2105
+ """
2106
+ Returns dense (dx_field, dy_field) as float32 arrays (H,W) in ROI pixels.
2107
+ Computed on coarse grid then upsampled.
2108
+ """
2109
+ # coarse grid points
2110
+ gh = max(4, int(grid))
2111
+ gw = max(4, int(round(grid * (W / max(1, H)))))
2112
+
2113
+ ys = np.linspace(0, H - 1, gh, dtype=np.float32)
2114
+ xs = np.linspace(0, W - 1, gw, dtype=np.float32)
2115
+ gx, gy = np.meshgrid(xs, ys) # (gh,gw)
2116
+
2117
+ pts = ap_centers.astype(np.float32)
2118
+ px = pts[:, 0].reshape(-1, 1, 1) # (M,1,1)
2119
+ py = pts[:, 1].reshape(-1, 1, 1) # (M,1,1)
2120
+
2121
+ cf = np.maximum(ap_cf.astype(np.float32), 0.0)
2122
+ good = cf >= float(conf_floor)
2123
+
2124
+ if not np.any(good):
2125
+ dxg = np.zeros((gh, gw), np.float32)
2126
+ dyg = np.zeros((gh, gw), np.float32)
2127
+ else:
2128
+ px = px[good]
2129
+ py = py[good]
2130
+ dx = ap_dx[good].astype(np.float32).reshape(-1, 1, 1)
2131
+ dy = ap_dy[good].astype(np.float32).reshape(-1, 1, 1)
2132
+ cw = cf[good].astype(np.float32).reshape(-1, 1, 1)
2133
+
2134
+ dxp = px - gx[None, :, :] # (M,gh,gw)
2135
+ dyp = py - gy[None, :, :] # (M,gh,gw)
2136
+ d2 = dxp * dxp + dyp * dyp # (M,gh,gw)
2137
+
2138
+ if radius is not None:
2139
+ r2 = float(radius) * float(radius)
2140
+ far = d2 > r2
2141
+ else:
2142
+ far = None
2143
+
2144
+ w = 1.0 / np.maximum(d2, 1.0) ** (power * 0.5)
2145
+ w *= cw
2146
+
2147
+ if far is not None:
2148
+ w = np.where(far, 0.0, w)
2149
+
2150
+ wsum = np.sum(w, axis=0) # (gh,gw)
2151
+
2152
+ dxg = np.sum(w * dx, axis=0) / np.maximum(wsum, 1e-6)
2153
+ dyg = np.sum(w * dy, axis=0) / np.maximum(wsum, 1e-6)
2154
+
2155
+
2156
+ # upsample to full res
2157
+ dx_field = cv2.resize(dxg, (W, H), interpolation=cv2.INTER_CUBIC).astype(np.float32, copy=False)
2158
+ dy_field = cv2.resize(dyg, (W, H), interpolation=cv2.INTER_CUBIC).astype(np.float32, copy=False)
2159
+ return dx_field, dy_field
2160
+
2161
+ def _warp_by_dense_field(img01: np.ndarray, dx_field: np.ndarray, dy_field: np.ndarray) -> np.ndarray:
2162
+ """
2163
+ img01 (H,W) or (H,W,3)
2164
+ dx_field/dy_field are (H,W) in pixels: shifting cur by (dx,dy) aligns to ref.
2165
+ """
2166
+ H, W = dx_field.shape
2167
+ # remap wants map_x/map_y = source sampling coordinates
2168
+ # If we want output aligned-to-ref, we sample from cur at (x - dx, y - dy)
2169
+ xs, ys = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32))
2170
+ map_x = xs - dx_field
2171
+ map_y = ys - dy_field
2172
+
2173
+ if img01.ndim == 2:
2174
+ return cv2.remap(img01, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
2175
+ else:
2176
+ return cv2.remap(img01, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
2177
+
2178
+ def _ap_phase_shift(
2179
+ ref_m: np.ndarray,
2180
+ cur_m: np.ndarray,
2181
+ ap_centers: np.ndarray,
2182
+ ap_size: int,
2183
+ max_dim: int,
2184
+ ) -> tuple[float, float, float]:
2185
+ """
2186
+ Compute a robust global shift from multiple local AP shifts.
2187
+ Returns (dx, dy, conf) in ROI pixel units.
2188
+ conf is median of per-AP phase correlation responses.
2189
+ """
2190
+ s = int(max(16, ap_size))
2191
+ half = s // 2
2192
+
2193
+ H, W = ref_m.shape[:2]
2194
+ dxs = []
2195
+ dys = []
2196
+ resps = []
2197
+
2198
+ # downsample reference patches once per AP? (fast enough as-is; M is usually modest)
2199
+ for (cx, cy) in ap_centers.tolist():
2200
+ x0 = cx - half
2201
+ y0 = cy - half
2202
+ x1 = x0 + s
2203
+ y1 = y0 + s
2204
+ if x0 < 0 or y0 < 0 or x1 > W or y1 > H:
2205
+ continue
2206
+
2207
+ ref_patch = ref_m[y0:y1, x0:x1]
2208
+ cur_patch = cur_m[y0:y1, x0:x1]
2209
+
2210
+ rp = _downsample_mono01(ref_patch, max_dim=max_dim)
2211
+ cp = _downsample_mono01(cur_patch, max_dim=max_dim)
2212
+
2213
+ if rp.shape != cp.shape:
2214
+ cp = cv2.resize(cp, (rp.shape[1], rp.shape[0]), interpolation=cv2.INTER_AREA)
2215
+
2216
+ sdx, sdy, resp = _phase_corr_shift(rp, cp)
2217
+
2218
+ # scale back to ROI pixels (patch pixels -> ROI pixels)
2219
+ sx = float(s) / float(rp.shape[1])
2220
+ sy = float(s) / float(rp.shape[0])
2221
+
2222
+ dxs.append(float(sdx * sx))
2223
+ dys.append(float(sdy * sy))
2224
+ resps.append(float(resp))
2225
+
2226
+ if not dxs:
2227
+ return 0.0, 0.0, 0.5
2228
+
2229
+ dx_med = float(np.median(np.asarray(dxs, np.float32)))
2230
+ dy_med = float(np.median(np.asarray(dys, np.float32)))
2231
+ conf = float(np.median(np.asarray(resps, np.float32)))
2232
+
2233
+ return dx_med, dy_med, conf
2234
+
2235
+ def _ap_phase_shifts_per_ap(
2236
+ ref_m: np.ndarray,
2237
+ cur_m: np.ndarray,
2238
+ ap_centers: np.ndarray,
2239
+ ap_size: int,
2240
+ max_dim: int,
2241
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
2242
+ """
2243
+ Per-AP phase correlation shifts (NO SEARCH).
2244
+ Returns arrays (ap_dx, ap_dy, ap_resp) in ROI pixels, where shifting cur by (dx,dy)
2245
+ aligns it to ref for each AP.
2246
+ """
2247
+ s = int(max(16, ap_size))
2248
+ half = s // 2
2249
+
2250
+ H, W = ref_m.shape[:2]
2251
+ M = int(ap_centers.shape[0])
2252
+
2253
+ ap_dx = np.zeros((M,), np.float32)
2254
+ ap_dy = np.zeros((M,), np.float32)
2255
+ ap_resp = np.zeros((M,), np.float32)
2256
+
2257
+ if cv2 is None or M == 0:
2258
+ ap_resp[:] = 0.5
2259
+ return ap_dx, ap_dy, ap_resp
2260
+
2261
+ for j, (cx, cy) in enumerate(ap_centers.tolist()):
2262
+ x0 = int(cx - half)
2263
+ y0 = int(cy - half)
2264
+ x1 = x0 + s
2265
+ y1 = y0 + s
2266
+ if x0 < 0 or y0 < 0 or x1 > W or y1 > H:
2267
+ ap_resp[j] = 0.0
2268
+ continue
2269
+
2270
+ ref_patch = ref_m[y0:y1, x0:x1]
2271
+ cur_patch = cur_m[y0:y1, x0:x1]
2272
+
2273
+ rp = _downsample_mono01(ref_patch, max_dim=max_dim)
2274
+ cp = _downsample_mono01(cur_patch, max_dim=max_dim)
2275
+
2276
+ if rp.shape != cp.shape and cv2 is not None:
2277
+ cp = cv2.resize(cp, (rp.shape[1], rp.shape[0]), interpolation=cv2.INTER_AREA)
2278
+
2279
+ sdx, sdy, resp = _phase_corr_shift(rp, cp)
2280
+
2281
+ # scale to ROI pixels
2282
+ sx = float(s) / float(rp.shape[1])
2283
+ sy = float(s) / float(rp.shape[0])
2284
+
2285
+ ap_dx[j] = float(sdx * sx)
2286
+ ap_dy[j] = float(sdy * sy)
2287
+ ap_resp[j] = float(resp)
2288
+
2289
+ return ap_dx, ap_dy, ap_resp
2290
+
2291
+
2292
+ def _ap_phase_shift_multiscale(
2293
+ ref_m: np.ndarray,
2294
+ cur_m: np.ndarray,
2295
+ ap_centers: np.ndarray,
2296
+ base_ap_size: int,
2297
+ max_dim: int,
2298
+ ) -> tuple[float, float, float]:
2299
+ """
2300
+ Multi-scale AP shift:
2301
+ - compute shifts at 2×, 1×, ½× AP sizes using same centers
2302
+ - combine using confidence weights (favoring coarser slightly)
2303
+ Returns (dx, dy, conf) in ROI pixels.
2304
+ """
2305
+ s2, s1, s05 = _scaled_ap_sizes(base_ap_size)
2306
+
2307
+ dx2, dy2, cf2 = _ap_phase_shift(ref_m, cur_m, ap_centers, s2, max_dim)
2308
+ dx1, dy1, cf1 = _ap_phase_shift(ref_m, cur_m, ap_centers, s1, max_dim)
2309
+ dx0, dy0, cf0 = _ap_phase_shift(ref_m, cur_m, ap_centers, s05, max_dim)
2310
+
2311
+ # weights: confidence * slight preference for larger scale (stability)
2312
+ w2 = max(1e-3, float(cf2)) * 1.25
2313
+ w1 = max(1e-3, float(cf1)) * 1.00
2314
+ w0 = max(1e-3, float(cf0)) * 0.85
2315
+
2316
+ wsum = (w2 + w1 + w0)
2317
+ dx = (w2 * dx2 + w1 * dx1 + w0 * dx0) / wsum
2318
+ dy = (w2 * dy2 + w1 * dy1 + w0 * dy0) / wsum
2319
+ conf = float(np.clip((w2 * cf2 + w1 * cf1 + w0 * cf0) / wsum, 0.0, 1.0))
2320
+
2321
+ return float(dx), float(dy), float(conf)