setiastrosuitepro 1.6.2__py3-none-any.whl → 1.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. setiastro/images/rotatearbitrary.png +0 -0
  2. setiastro/saspro/_generated/build_info.py +2 -2
  3. setiastro/saspro/backgroundneutral.py +10 -1
  4. setiastro/saspro/blink_comparator_pro.py +474 -251
  5. setiastro/saspro/crop_dialog_pro.py +11 -1
  6. setiastro/saspro/doc_manager.py +1 -1
  7. setiastro/saspro/function_bundle.py +16 -16
  8. setiastro/saspro/gui/main_window.py +93 -64
  9. setiastro/saspro/gui/mixins/dock_mixin.py +31 -18
  10. setiastro/saspro/gui/mixins/geometry_mixin.py +105 -5
  11. setiastro/saspro/gui/mixins/menu_mixin.py +1 -0
  12. setiastro/saspro/gui/mixins/toolbar_mixin.py +33 -10
  13. setiastro/saspro/multiscale_decomp.py +710 -256
  14. setiastro/saspro/remove_stars_preset.py +55 -13
  15. setiastro/saspro/resources.py +30 -11
  16. setiastro/saspro/selective_color.py +79 -20
  17. setiastro/saspro/shortcuts.py +94 -21
  18. setiastro/saspro/stacking_suite.py +296 -107
  19. setiastro/saspro/star_alignment.py +275 -330
  20. setiastro/saspro/status_log_dock.py +1 -1
  21. setiastro/saspro/swap_manager.py +77 -42
  22. setiastro/saspro/translations/all_source_strings.json +1588 -516
  23. setiastro/saspro/translations/ar_translations.py +915 -684
  24. setiastro/saspro/translations/de_translations.py +442 -463
  25. setiastro/saspro/translations/es_translations.py +277 -47
  26. setiastro/saspro/translations/fr_translations.py +279 -47
  27. setiastro/saspro/translations/hi_translations.py +253 -21
  28. setiastro/saspro/translations/integrate_translations.py +3 -2
  29. setiastro/saspro/translations/it_translations.py +1211 -161
  30. setiastro/saspro/translations/ja_translations.py +3340 -3107
  31. setiastro/saspro/translations/pt_translations.py +3315 -3337
  32. setiastro/saspro/translations/ru_translations.py +351 -117
  33. setiastro/saspro/translations/saspro_ar.qm +0 -0
  34. setiastro/saspro/translations/saspro_ar.ts +15902 -138
  35. setiastro/saspro/translations/saspro_de.qm +0 -0
  36. setiastro/saspro/translations/saspro_de.ts +14428 -133
  37. setiastro/saspro/translations/saspro_es.qm +0 -0
  38. setiastro/saspro/translations/saspro_es.ts +11503 -7821
  39. setiastro/saspro/translations/saspro_fr.qm +0 -0
  40. setiastro/saspro/translations/saspro_fr.ts +11168 -7812
  41. setiastro/saspro/translations/saspro_hi.qm +0 -0
  42. setiastro/saspro/translations/saspro_hi.ts +14733 -135
  43. setiastro/saspro/translations/saspro_it.qm +0 -0
  44. setiastro/saspro/translations/saspro_it.ts +14347 -7821
  45. setiastro/saspro/translations/saspro_ja.qm +0 -0
  46. setiastro/saspro/translations/saspro_ja.ts +14860 -137
  47. setiastro/saspro/translations/saspro_pt.qm +0 -0
  48. setiastro/saspro/translations/saspro_pt.ts +14904 -137
  49. setiastro/saspro/translations/saspro_ru.qm +0 -0
  50. setiastro/saspro/translations/saspro_ru.ts +11766 -168
  51. setiastro/saspro/translations/saspro_sw.qm +0 -0
  52. setiastro/saspro/translations/saspro_sw.ts +15115 -135
  53. setiastro/saspro/translations/saspro_uk.qm +0 -0
  54. setiastro/saspro/translations/saspro_uk.ts +11206 -6729
  55. setiastro/saspro/translations/saspro_zh.qm +0 -0
  56. setiastro/saspro/translations/saspro_zh.ts +10581 -7812
  57. setiastro/saspro/translations/sw_translations.py +282 -56
  58. setiastro/saspro/translations/uk_translations.py +264 -35
  59. setiastro/saspro/translations/zh_translations.py +282 -47
  60. setiastro/saspro/view_bundle.py +17 -17
  61. setiastro/saspro/widgets/minigame/game.js +11 -6
  62. setiastro/saspro/widgets/resource_monitor.py +26 -0
  63. setiastro/saspro/widgets/spinboxes.py +18 -0
  64. setiastro/saspro/wimi.py +65 -65
  65. setiastro/saspro/wims.py +33 -33
  66. setiastro/saspro/window_shelf.py +2 -2
  67. {setiastrosuitepro-1.6.2.dist-info → setiastrosuitepro-1.6.4.dist-info}/METADATA +7 -7
  68. {setiastrosuitepro-1.6.2.dist-info → setiastrosuitepro-1.6.4.dist-info}/RECORD +72 -71
  69. {setiastrosuitepro-1.6.2.dist-info → setiastrosuitepro-1.6.4.dist-info}/WHEEL +0 -0
  70. {setiastrosuitepro-1.6.2.dist-info → setiastrosuitepro-1.6.4.dist-info}/entry_points.txt +0 -0
  71. {setiastrosuitepro-1.6.2.dist-info → setiastrosuitepro-1.6.4.dist-info}/licenses/LICENSE +0 -0
  72. {setiastrosuitepro-1.6.2.dist-info → setiastrosuitepro-1.6.4.dist-info}/licenses/license.txt +0 -0
@@ -159,7 +159,7 @@ def _align_prefs(settings: QSettings | None = None) -> dict:
159
159
  prefs = {
160
160
  "model": model, # "affine" | "homography" | "poly3" | "poly4"
161
161
  "max_cp": _get("max_cp", 250, int),
162
- "downsample": _get("downsample", 2, int),
162
+ "downsample": _get("downsample", 3, int),
163
163
  "h_reproj": _get("h_reproj", 3.0, float),
164
164
 
165
165
  # Star detection / solve limits
@@ -1438,6 +1438,34 @@ class RegistrationWorkerSignals(QObject):
1438
1438
  # Identity transform (2x3)
1439
1439
  IDENTITY_2x3 = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.float64)
1440
1440
 
1441
+ def _to3x3_affine(A2x3: np.ndarray) -> np.ndarray:
1442
+ A = np.asarray(A2x3, np.float64).reshape(2,3)
1443
+ return np.vstack([A, [0,0,1]])
1444
+
1445
+ def _from3x3_affine(A3: np.ndarray) -> np.ndarray:
1446
+ return np.asarray(A3, np.float64)[:2,:]
1447
+
1448
+ def _S(ds: float) -> np.ndarray:
1449
+ ds = float(ds)
1450
+ return np.array([[1.0/ds, 0, 0],
1451
+ [0, 1.0/ds, 0],
1452
+ [0, 0, 1]], np.float64)
1453
+
1454
+ def lift_affine_2x3_from_ds(A_ds_2x3: np.ndarray, ds: float) -> np.ndarray:
1455
+ S = _S(ds); Si = np.linalg.inv(S)
1456
+ A3_full = Si @ _to3x3_affine(A_ds_2x3) @ S
1457
+ return _from3x3_affine(A3_full)
1458
+
1459
+ def downscale_affine_2x3_to_ds(A_full_2x3: np.ndarray, ds: float) -> np.ndarray:
1460
+ S = _S(ds); Si = np.linalg.inv(S)
1461
+ A3_ds = S @ _to3x3_affine(A_full_2x3) @ Si
1462
+ return _from3x3_affine(A3_ds)
1463
+
1464
+ def lift_homography_from_ds(H_ds: np.ndarray, ds: float) -> np.ndarray:
1465
+ S = _S(ds); Si = np.linalg.inv(S)
1466
+ return Si @ np.asarray(H_ds, np.float64) @ S
1467
+
1468
+
1441
1469
  def compute_affine_transform_astroalign_cropped(source_img, reference_img,
1442
1470
  scale: float = 1.20,
1443
1471
  limit_stars: int | None = None,
@@ -1879,31 +1907,34 @@ def project_affine_to_similarity(A2x3: np.ndarray) -> np.ndarray:
1879
1907
  def _solve_delta_job(args):
1880
1908
  """
1881
1909
  Worker: compute incremental affine/similarity delta for one frame against the ref preview.
1882
- args = (orig_path, current_transform_2x3, ref_small, Wref, Href,
1883
- resample_flag, det_sigma, limit_stars, minarea,
1884
- model, h_reproj)
1910
+ args =
1911
+ (orig_path, current_transform_2x3,
1912
+ ref_small_ds, Wref_ds, Href_ds,
1913
+ resample_flag, det_sigma, limit_stars, minarea,
1914
+ model, h_reproj, ds)
1885
1915
  """
1886
1916
  try:
1887
1917
  import os
1888
1918
  import numpy as np
1889
1919
  import cv2
1890
- import sep
1891
1920
  from astropy.io import fits
1892
1921
 
1893
- (orig_path, current_transform_2x3, ref_small, Wref, Href,
1922
+ (orig_path, current_transform_2x3,
1923
+ ref_small_ds, Wref_ds, Href_ds,
1894
1924
  resample_flag, det_sigma, limit_stars, minarea,
1895
- model, h_reproj) = args
1925
+ model, h_reproj, ds) = args
1926
+
1927
+ ds = max(1, int(ds))
1896
1928
 
1897
1929
  try:
1898
1930
  cv2.setNumThreads(1)
1899
1931
  try: cv2.ocl.setUseOpenCL(False)
1900
- except Exception as e:
1901
- import logging
1902
- logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
1932
+ except Exception:
1933
+ pass
1903
1934
  except Exception:
1904
1935
  pass
1905
1936
 
1906
- # 1) read → gray float32
1937
+ # 1) read → gray float32 (full)
1907
1938
  with fits.open(orig_path, memmap=True) as hdul:
1908
1939
  arr = hdul[0].data
1909
1940
  if arr is None:
@@ -1911,48 +1942,66 @@ def _solve_delta_job(args):
1911
1942
  gray = arr if arr.ndim == 2 else np.mean(arr, axis=2)
1912
1943
  gray = np.nan_to_num(gray, nan=0.0, posinf=0.0, neginf=0.0).astype(np.float32, copy=False)
1913
1944
 
1914
- # 2) pre-warp to REF size
1915
- T_prev = np.asarray(current_transform_2x3, np.float32).reshape(2, 3)
1916
- src_for_match = cv2.warpAffine(
1917
- gray, T_prev, (Wref, Href),
1945
+ # 2) downsample source to DS space
1946
+ if ds > 1:
1947
+ Wds = max(1, int(gray.shape[1] // ds))
1948
+ Hds = max(1, int(gray.shape[0] // ds))
1949
+ gray_ds = cv2.resize(gray, (Wds, Hds), interpolation=cv2.INTER_AREA)
1950
+ else:
1951
+ gray_ds = gray
1952
+
1953
+ # 3) pre-warp in DS space using downscaled transform
1954
+ T_prev_full = np.asarray(current_transform_2x3, np.float64).reshape(2, 3)
1955
+ T_prev_ds = downscale_affine_2x3_to_ds(T_prev_full, ds).astype(np.float32)
1956
+
1957
+ # Warp DS source into DS ref geometry
1958
+ src_for_match_ds = cv2.warpAffine(
1959
+ gray_ds, T_prev_ds, (int(Wref_ds), int(Href_ds)),
1918
1960
  flags=resample_flag, borderMode=cv2.BORDER_REFLECT_101
1919
1961
  )
1920
1962
 
1921
- # 3) denoise sparse islands to stabilize AA
1922
- src_for_match = _suppress_tiny_islands(src_for_match, det_sigma=det_sigma, minarea=minarea)
1923
- ref_small = _suppress_tiny_islands(ref_small, det_sigma=det_sigma, minarea=minarea)
1963
+ # 4) denoise sparse islands in DS space (cheaper)
1964
+ src_for_match_ds = _suppress_tiny_islands(src_for_match_ds, det_sigma=det_sigma, minarea=minarea)
1965
+ ref_for_match_ds = _suppress_tiny_islands(np.asarray(ref_small_ds, np.float32, order="C"),
1966
+ det_sigma=det_sigma, minarea=minarea)
1924
1967
 
1925
- # 4) AA incremental delta on cropped ref
1968
+ # 5) AA delta solve in DS space
1926
1969
  m = (model or "affine").lower()
1927
1970
  if m in ("no_distortion", "nodistortion"):
1928
1971
  m = "similarity"
1929
1972
 
1930
1973
  if m == "similarity":
1931
- tform = compute_similarity_transform_astroalign_cropped(
1932
- src_for_match, ref_small,
1974
+ tform_ds = compute_similarity_transform_astroalign_cropped(
1975
+ src_for_match_ds, ref_for_match_ds,
1933
1976
  limit_stars=int(limit_stars) if limit_stars is not None else None,
1934
1977
  det_sigma=float(det_sigma),
1935
1978
  minarea=int(minarea),
1936
1979
  h_reproj=float(h_reproj)
1937
1980
  )
1938
1981
  else:
1939
- tform = compute_affine_transform_astroalign_cropped(
1940
- src_for_match, ref_small,
1982
+ tform_ds = compute_affine_transform_astroalign_cropped(
1983
+ src_for_match_ds, ref_for_match_ds,
1941
1984
  limit_stars=int(limit_stars) if limit_stars is not None else None,
1942
1985
  det_sigma=float(det_sigma),
1943
1986
  minarea=int(minarea)
1944
1987
  )
1945
1988
 
1946
- if tform is None:
1989
+ if tform_ds is None:
1947
1990
  return (orig_path, None,
1948
1991
  f"Astroalign failed for {os.path.basename(orig_path)} – skipping (no transform returned)")
1949
1992
 
1950
- T_new = np.asarray(tform, np.float64).reshape(2, 3)
1951
- return (orig_path, T_new, None)
1993
+ # 6) lift DS delta back to full-res coords
1994
+ T_new_full = lift_affine_2x3_from_ds(np.asarray(tform_ds, np.float64).reshape(2, 3), ds)
1995
+
1996
+ return (orig_path, np.asarray(T_new_full, np.float64).reshape(2, 3), None)
1952
1997
 
1953
1998
  except Exception as e:
1999
+ try:
2000
+ base = os.path.basename(args[0]) if args else "<unknown>"
2001
+ except Exception:
2002
+ base = "<unknown>"
1954
2003
  return (args[0] if args else "<unknown>", None,
1955
- f"Astroalign failed for {os.path.basename(args[0]) if args else '<unknown>'}: {e}")
2004
+ f"Astroalign failed for {base}: {e}")
1956
2005
 
1957
2006
 
1958
2007
 
@@ -2036,7 +2085,7 @@ def _suppress_tiny_islands(img32: np.ndarray, det_sigma: float, minarea: int) ->
2036
2085
  # ─────────────────────────────────────────────────────────────
2037
2086
  def _finalize_write_job(args):
2038
2087
  """
2039
- Process-safe worker: read full-res, compute/choose model, warp, save.
2088
+ Process-safe worker: read full-res, choose model, warp, save.
2040
2089
  Returns (orig_path, out_path or "", msg, success, drizzle_tuple or None)
2041
2090
  drizzle_tuple = (kind, matrix_or_None)
2042
2091
  """
@@ -2057,17 +2106,19 @@ def _finalize_write_job(args):
2057
2106
  try:
2058
2107
  cv2.setNumThreads(1)
2059
2108
  try: cv2.ocl.setUseOpenCL(False)
2060
- except Exception as e:
2061
- import logging
2062
- logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
2109
+ except Exception:
2110
+ pass
2063
2111
  except Exception:
2064
2112
  pass
2065
2113
 
2066
2114
  debug_lines = []
2067
2115
  def dbg(s: str):
2068
- # keep it short-ish; UI emits each line
2069
2116
  debug_lines.append(str(s))
2070
2117
 
2118
+ def _A3(A2x3):
2119
+ A = np.asarray(A2x3, np.float64).reshape(2, 3)
2120
+ return np.vstack([A, [0, 0, 1]])
2121
+
2071
2122
  try:
2072
2123
  # 1) load source (full-res)
2073
2124
  with fits.open(orig_path, memmap=True) as hdul:
@@ -2076,12 +2127,12 @@ def _finalize_write_job(args):
2076
2127
  if img is None:
2077
2128
  return (orig_path, "", f"⚠️ Failed to read {os.path.basename(orig_path)}", False, None)
2078
2129
 
2079
- # Fix for white images: Normalize integer types to [0,1]
2130
+ # normalize ints
2080
2131
  if img.dtype == np.uint16:
2081
2132
  img = img.astype(np.float32) / 65535.0
2082
2133
  elif img.dtype == np.uint8:
2083
2134
  img = img.astype(np.float32) / 255.0
2084
-
2135
+
2085
2136
  is_mono = (img.ndim == 2)
2086
2137
  src_gray_full = img if is_mono else np.mean(img, axis=2)
2087
2138
  src_gray_full = np.nan_to_num(src_gray_full, nan=0.0, posinf=0.0, neginf=0.0).astype(np.float32, copy=False)
@@ -2090,40 +2141,61 @@ def _finalize_write_job(args):
2090
2141
 
2091
2142
  Href, Wref = ref_shape
2092
2143
 
2093
- # 2) load reference via memmap
2144
+ # 2) load reference (full-res) via memmap
2094
2145
  ref2d = np.load(ref_npy_path, mmap_mode="r").astype(np.float32, copy=False)
2095
2146
  if ref2d.shape[:2] != (Href, Wref):
2096
2147
  return (orig_path, "", f"⚠️ Ref shape mismatch for {os.path.basename(orig_path)}", False, None)
2097
2148
 
2098
2149
  base = os.path.basename(orig_path)
2099
2150
 
2100
- # helper: force affine to similarity (no shear)
2101
- def _affine_to_similarity(A2x3: np.ndarray) -> np.ndarray:
2102
- A2x3 = np.asarray(A2x3, np.float64).reshape(2, 3)
2103
- R = A2x3[:, :2]
2104
- t = A2x3[:, 2]
2105
- U, S, Vt = np.linalg.svd(R)
2106
- rot = U @ Vt
2107
- if np.linalg.det(rot) < 0:
2108
- U[:, -1] *= -1
2109
- rot = U @ Vt
2110
- s = float((S[0] + S[1]) * 0.5)
2111
- Rsim = rot * s
2112
- out = np.zeros((2, 3), dtype=np.float64)
2113
- out[:, :2] = Rsim
2114
- out[:, 2] = t
2115
- return out
2116
-
2117
- # 3) choose transform
2118
2151
  model = (align_model or "affine").lower()
2119
2152
  if model in ("no_distortion", "nodistortion"):
2120
2153
  model = "similarity"
2121
2154
 
2155
+ # Base (accumulated) affine from refinement
2156
+ A_prev = np.asarray(affine_2x3, np.float64).reshape(2, 3)
2157
+ A_prev3 = _A3(A_prev)
2158
+
2159
+ # Default finalize is just the affine refinement result
2122
2160
  kind = "affine"
2123
- X = np.asarray(affine_2x3, np.float64).reshape(2, 3)
2161
+ X = A_prev.copy()
2124
2162
 
2163
+ # ---- Non-affine finalize: DS solve + lift, but KEEP affine-as-start ----
2125
2164
  if model != "affine":
2126
- # ---- AA pairs (adaptive tiling) ----
2165
+ dbg(f"[finalize] base={base} model={model} det_sigma={det_sigma} minarea={minarea} limit_stars={limit_stars}")
2166
+
2167
+ ds = 2 # ✅ keep simple/safe; only DS+lift change requested
2168
+ ds = max(1, int(ds))
2169
+
2170
+ # DS reference
2171
+ if ds > 1:
2172
+ ref_ds = cv2.resize(ref2d, (max(1, Wref // ds), max(1, Href // ds)), interpolation=cv2.INTER_AREA)
2173
+ else:
2174
+ ref_ds = np.ascontiguousarray(ref2d)
2175
+
2176
+ ref_ds = np.ascontiguousarray(ref_ds.astype(np.float32, copy=False))
2177
+ Hds, Wds = ref_ds.shape[:2]
2178
+
2179
+ # DS source
2180
+ if ds > 1:
2181
+ src_ds0 = cv2.resize(src_gray_full, (Wds, Hds), interpolation=cv2.INTER_AREA)
2182
+ else:
2183
+ src_ds0 = cv2.resize(src_gray_full, (Wds, Hds), interpolation=cv2.INTER_AREA) if (src_gray_full.shape[:2] != (Hds, Wds)) else src_gray_full
2184
+
2185
+ src_ds0 = np.ascontiguousarray(src_ds0.astype(np.float32, copy=False))
2186
+
2187
+ # Pre-warp source in DS space using downscaled accumulated affine
2188
+ A_prev_ds = downscale_affine_2x3_to_ds(A_prev, ds).astype(np.float32)
2189
+ src_pre_ds = cv2.warpAffine(
2190
+ src_ds0, A_prev_ds, (Wds, Hds),
2191
+ flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101
2192
+ )
2193
+
2194
+ # Optional suppress tiny islands (your existing helper)
2195
+ src_pre_ds = _suppress_tiny_islands(src_pre_ds, det_sigma=float(det_sigma), minarea=int(minarea))
2196
+ ref_ds = _suppress_tiny_islands(ref_ds, det_sigma=float(det_sigma), minarea=int(minarea))
2197
+
2198
+ # AA correspondences in DS space: prewarped src vs ref
2127
2199
  max_cp = None
2128
2200
  try:
2129
2201
  if limit_stars is not None and int(limit_stars) > 0:
@@ -2131,13 +2203,10 @@ def _finalize_write_job(args):
2131
2203
  except Exception:
2132
2204
  max_cp = None
2133
2205
 
2134
- dbg(f"[finalize] base={base} model={model} det_sigma={det_sigma} minarea={minarea} limit_stars={limit_stars}")
2135
-
2136
- AA_SCALE = 0.80 # finalize-only
2206
+ AA_SCALE = 0.80
2137
2207
 
2138
- # ---- tiles=1 (center crop) ----
2139
2208
  src_xy, tgt_xy, best_P, best_xy0 = _aa_find_pairs_multitile(
2140
- src_gray_full, ref2d,
2209
+ src_pre_ds, ref_ds,
2141
2210
  scale=AA_SCALE,
2142
2211
  tiles=1,
2143
2212
  det_sigma=float(det_sigma),
@@ -2145,143 +2214,72 @@ def _finalize_write_job(args):
2145
2214
  max_control_points=max_cp,
2146
2215
  _dbg=dbg
2147
2216
  )
2148
-
2149
2217
  if src_xy is None or len(src_xy) < 8:
2150
- dbg("[AA] tiles=1 too few matches")
2151
- raise RuntimeError("astroalign produced too few matches")
2152
-
2153
- dbg(f"[AA] tiles=1 matches={len(src_xy)} best_tile_xy0={best_xy0}")
2154
-
2155
- spread_ok1 = _points_spread_ok(tgt_xy, Wref, Href, _dbg=dbg)
2156
- dbg(f"[AA] spread_ok(tiles=1)={spread_ok1}")
2157
-
2158
- # ---- fallback: tiles=5 (corners + center) ----
2159
- if not spread_ok1:
2160
- src_xy5, tgt_xy5, best_P5, best_xy0_5 = _aa_find_pairs_multitile(
2161
- src_gray_full, ref2d,
2162
- scale=AA_SCALE,
2163
- tiles=5, # <-- NEW primary fallback
2164
- det_sigma=float(det_sigma),
2165
- minarea=int(minarea),
2166
- max_control_points=max_cp,
2167
- _dbg=dbg
2168
- )
2169
-
2170
- if src_xy5 is None or len(src_xy5) < 8:
2171
- dbg("[AA] tiles=5 too few matches; keeping tiles=1")
2172
- else:
2173
- dbg(f"[AA] tiles=5 matches={len(src_xy5)} best_tile_xy0={best_xy0_5}")
2174
- spread_ok5 = _points_spread_ok(tgt_xy5, Wref, Href, _dbg=dbg)
2175
- dbg(f"[AA] spread_ok(tiles=5)={spread_ok5}")
2176
-
2177
- # choose tiles=5 if it spreads better OR gives more matches
2178
- if spread_ok5 or len(src_xy5) > len(src_xy):
2179
- dbg("[AA] switching to tiles=5 result")
2180
- src_xy, tgt_xy = src_xy5, tgt_xy5
2181
- best_P, best_xy0 = best_P5, best_xy0_5
2182
- else:
2183
- dbg("[AA] keeping tiles=1 result (tiles=5 not better)")
2184
-
2185
- # ---- tertiary fallback: tiles=3 grid ----
2186
- spread_ok_after = _points_spread_ok(tgt_xy, Wref, Href, _dbg=dbg)
2187
- dbg(f"[AA] spread_ok(after tiles=5 check)={spread_ok_after}")
2188
-
2189
- if not spread_ok_after:
2190
- src_xy3, tgt_xy3, best_P3, best_xy0_3 = _aa_find_pairs_multitile(
2191
- src_gray_full, ref2d,
2192
- scale=AA_SCALE,
2193
- tiles=3,
2194
- det_sigma=float(det_sigma),
2195
- minarea=int(minarea),
2196
- max_control_points=max_cp,
2197
- _dbg=dbg
2198
- )
2199
-
2200
- if src_xy3 is None or len(src_xy3) < 8:
2201
- dbg("[AA] tiles=3 too few matches; keeping current result")
2202
- else:
2203
- dbg(f"[AA] tiles=3 matches={len(src_xy3)} best_tile_xy0={best_xy0_3}")
2204
- spread_ok3 = _points_spread_ok(tgt_xy3, Wref, Href, _dbg=dbg)
2205
- dbg(f"[AA] spread_ok(tiles=3)={spread_ok3}")
2206
-
2207
- if spread_ok3 or len(src_xy3) > len(src_xy):
2208
- dbg("[AA] switching to tiles=3 result")
2209
- src_xy, tgt_xy = src_xy3, tgt_xy3
2210
- best_P, best_xy0 = best_P3, best_xy0_3
2211
- else:
2212
- dbg("[AA] keeping current result (tiles=3 not better)")
2213
-
2214
- x0, y0 = best_xy0
2215
- P = np.asarray(best_P, np.float64)
2216
-
2217
- # ---- base full-ref from best_P + best_xy0 ----
2218
- if P.shape == (3, 3):
2219
- base_kind0 = "homography"
2220
- T = np.array([[1,0,x0],[0,1,y0],[0,0,1]], dtype=np.float64)
2221
- base_X0 = T @ P
2222
- else:
2223
- base_kind0 = "affine"
2224
- A3 = np.vstack([P[0:2, :], [0,0,1]])
2225
- T = np.array([[1,0,x0],[0,1,y0],[0,0,1]], dtype=np.float64)
2226
- base_X0 = (T @ A3)[0:2, :]
2218
+ raise RuntimeError("astroalign produced too few matches (finalize)")
2227
2219
 
2220
+ # RANSAC threshold in DS pixels
2228
2221
  hth = float(h_reproj)
2229
2222
 
2230
2223
  if model == "homography":
2231
- H, inl = cv2.findHomography(src_xy, tgt_xy, cv2.RANSAC, ransacReprojThreshold=hth)
2224
+ # Delta homography maps prewarped -> ref (both in DS coords)
2225
+ H_delta_ds, inl = cv2.findHomography(src_xy, tgt_xy, cv2.RANSAC, ransacReprojThreshold=hth)
2232
2226
  ninl = int(inl.sum()) if inl is not None else 0
2233
- dbg(f"[RANSAC] homography inliers={ninl}/{len(src_xy)} thr={hth}")
2227
+ dbg(f"[RANSAC] homography delta(DS) inliers={ninl}/{len(src_xy)} thr={hth}")
2234
2228
 
2235
- if H is not None:
2236
- kind, X = "homography", np.asarray(H, np.float64)
2229
+ if H_delta_ds is None:
2230
+ # fallback to just affine refinement
2231
+ kind, X = "affine", A_prev.copy()
2237
2232
  else:
2238
- kind, X = base_kind0, base_X0
2233
+ H_delta_full = lift_homography_from_ds(H_delta_ds, ds)
2234
+ H_final = np.asarray(H_delta_full, np.float64) @ A_prev3
2235
+ kind, X = "homography", H_final
2239
2236
 
2240
2237
  elif model == "similarity":
2241
- A, inl = cv2.estimateAffinePartial2D(src_xy, tgt_xy, cv2.RANSAC, ransacReprojThreshold=hth)
2238
+ # Delta similarity (affine partial) maps prewarped -> ref in DS coords
2239
+ A_delta_ds, inl = cv2.estimateAffinePartial2D(
2240
+ src_xy, tgt_xy, cv2.RANSAC, ransacReprojThreshold=hth
2241
+ )
2242
2242
  ninl = int(inl.sum()) if inl is not None else 0
2243
- dbg(f"[RANSAC] similarity inliers={ninl}/{len(src_xy)} thr={hth}")
2243
+ dbg(f"[RANSAC] similarity delta(DS) inliers={ninl}/{len(src_xy)} thr={hth}")
2244
2244
 
2245
- if A is not None:
2246
- kind, X = "similarity", np.asarray(A, np.float64)
2245
+ if A_delta_ds is None:
2246
+ kind, X = "similarity", _project_to_similarity(A_prev)
2247
2247
  else:
2248
- if base_kind0 == "affine":
2249
- kind, X = "similarity", _affine_to_similarity(base_X0)
2250
- else:
2251
- kind, X = base_kind0, base_X0
2252
-
2253
- elif model == "affine":
2254
- kind, X = "affine", np.asarray(affine_2x3, np.float64)
2248
+ A_delta_full = lift_affine_2x3_from_ds(A_delta_ds, ds)
2249
+ # Compose delta prev in affine space
2250
+ A_final3 = _A3(A_delta_full) @ A_prev3
2251
+ A_final = A_final3[:2, :]
2252
+ kind, X = "similarity", _project_to_similarity(A_final)
2255
2253
 
2256
2254
  elif model in ("poly3", "poly4"):
2255
+ # Keep behavior simple: poly fit in FULL coords using pairs from prewarped DS,
2256
+ # then apply as remap on the ORIGINAL image (same as your current poly path).
2257
+ # (If you later want true "poly residual after affine", we can do that safely,
2258
+ # but that is a pattern change beyond DS+lift.)
2257
2259
  order = 3 if model == "poly3" else 4
2258
- cx, cy = _fit_poly_xy(src_xy, tgt_xy, order=order)
2260
+ src_full = (np.asarray(src_xy, np.float32) * float(ds)).astype(np.float32)
2261
+ tgt_full = (np.asarray(tgt_xy, np.float32) * float(ds)).astype(np.float32)
2262
+
2263
+ cx, cy = _fit_poly_xy(src_full, tgt_full, order=order)
2259
2264
  map_x, map_y = _poly_eval_grid(cx, cy, Wref, Href, order=order)
2260
2265
  kind, X = model, (map_x, map_y)
2261
2266
 
2262
2267
  else:
2263
- dbg(f"[AA] unknown model '{model}', falling back to base {base_kind0}")
2264
- kind, X = base_kind0, base_X0
2268
+ # Unknown model -> just write affine refinement
2269
+ kind, X = "affine", A_prev.copy()
2265
2270
 
2266
- # 4) warp
2271
+ # 4) warp full-res
2267
2272
  Hh, Ww = Href, Wref
2268
2273
 
2269
2274
  if kind in ("affine", "similarity"):
2270
2275
  A = np.asarray(X, np.float64).reshape(2, 3)
2271
-
2272
2276
  if is_mono:
2273
- aligned = cv2.warpAffine(
2274
- img, A, (Ww, Hh),
2275
- flags=cv2.INTER_LANCZOS4,
2276
- borderMode=cv2.BORDER_CONSTANT, borderValue=0
2277
- )
2277
+ aligned = cv2.warpAffine(img, A, (Ww, Hh), flags=cv2.INTER_LANCZOS4,
2278
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
2278
2279
  else:
2279
2280
  aligned = np.stack([
2280
- cv2.warpAffine(
2281
- img[..., c], A, (Ww, Hh),
2282
- flags=cv2.INTER_LANCZOS4,
2283
- borderMode=cv2.BORDER_CONSTANT, borderValue=0
2284
- )
2281
+ cv2.warpAffine(img[..., c], A, (Ww, Hh), flags=cv2.INTER_LANCZOS4,
2282
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
2285
2283
  for c in range(img.shape[2])
2286
2284
  ], axis=2)
2287
2285
 
@@ -2290,34 +2288,27 @@ def _finalize_write_job(args):
2290
2288
 
2291
2289
  elif kind == "homography":
2292
2290
  Hm = np.asarray(X, np.float64).reshape(3, 3)
2293
-
2294
2291
  if is_mono:
2295
- aligned = cv2.warpPerspective(
2296
- img, Hm, (Ww, Hh),
2297
- flags=cv2.INTER_LANCZOS4,
2298
- borderMode=cv2.BORDER_CONSTANT, borderValue=0
2299
- )
2292
+ aligned = cv2.warpPerspective(img, Hm, (Ww, Hh), flags=cv2.INTER_LANCZOS4,
2293
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
2300
2294
  else:
2301
2295
  aligned = np.stack([
2302
- cv2.warpPerspective(
2303
- img[..., c], Hm, (Ww, Hh),
2304
- flags=cv2.INTER_LANCZOS4,
2305
- borderMode=cv2.BORDER_CONSTANT, borderValue=0
2306
- )
2296
+ cv2.warpPerspective(img[..., c], Hm, (Ww, Hh), flags=cv2.INTER_LANCZOS4,
2297
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
2307
2298
  for c in range(img.shape[2])
2308
2299
  ], axis=2)
2309
2300
 
2310
2301
  drizzle_tuple = ("homography", Hm.astype(np.float64))
2311
2302
  warp_label = "homography"
2312
2303
 
2313
- elif kind in ("poly3","poly4"):
2304
+ elif kind in ("poly3", "poly4"):
2314
2305
  map_x, map_y = X
2315
2306
  if is_mono:
2316
2307
  aligned = cv2.remap(img, map_x, map_y, cv2.INTER_LANCZOS4,
2317
2308
  borderMode=cv2.BORDER_CONSTANT, borderValue=0)
2318
2309
  else:
2319
2310
  aligned = np.stack([
2320
- cv2.remap(img[...,c], map_x, map_y, cv2.INTER_LANCZOS4,
2311
+ cv2.remap(img[..., c], map_x, map_y, cv2.INTER_LANCZOS4,
2321
2312
  borderMode=cv2.BORDER_CONSTANT, borderValue=0)
2322
2313
  for c in range(img.shape[2])
2323
2314
  ], axis=2)
@@ -2378,14 +2369,10 @@ class StarRegistrationWorker(QRunnable):
2378
2369
 
2379
2370
  def run(self):
2380
2371
  """
2381
- Affine:
2382
- - Apply current transform to a preview-sized image
2383
- - Solve incremental delta vs reference preview
2384
- - Emit the incremental delta (2x3) keyed by ORIGINAL path
2385
-
2386
- Non-affine (homography/poly3/4):
2387
- - This QRunnable does not try to do residuals; it just reports and emits identity.
2388
- The multi-process residual pass is handled by StarRegistrationThread.
2372
+ Refinement worker ALWAYS computes incremental deltas in affine/similarity space,
2373
+ even if the FINAL requested model is homography/poly3/poly4.
2374
+
2375
+ The final non-affine model (if any) is applied in _finalize_write_job only.
2389
2376
  """
2390
2377
  try:
2391
2378
  _cap_native_threads_once()
@@ -2411,21 +2398,19 @@ class StarRegistrationWorker(QRunnable):
2411
2398
  return
2412
2399
  Href, Wref = ref_small.shape[:2]
2413
2400
 
2414
- model = (self.model_name or "affine").lower()
2415
-
2416
- # --- Non-affine: don't accumulate here; identity + progress line only
2417
- if model in ("homography", "poly3", "poly4"):
2418
- self.signals.progress.emit(
2419
- f"Residual-only mode for {os.path.basename(self.original_file)} (model={model}); "
2420
- "emitting identity transform (handled by thread pass)."
2421
- )
2422
- self.signals.result_transform.emit(os.path.normpath(self.original_file), IDENTITY_2x3.copy())
2423
- self.signals.result.emit(self.original_file)
2424
- return
2401
+ # ✅ Refinement solve model: always affine or similarity
2402
+ model_req = (self.model_name or "affine").lower()
2403
+ if model_req in ("no_distortion", "nodistortion", "similarity"):
2404
+ refine_model = "similarity"
2405
+ else:
2406
+ refine_model = "affine" # includes when final requested is homography/poly*
2425
2407
 
2426
- # --- Affine incremental
2427
2408
  T_prev = np.array(self.current_transform, dtype=np.float32).reshape(2, 3)
2428
- use_warp = not np.allclose(T_prev, np.array([[1,0,0],[0,1,0]], dtype=np.float32), rtol=1e-5, atol=1e-5)
2409
+ use_warp = not np.allclose(
2410
+ T_prev,
2411
+ np.array([[1, 0, 0], [0, 1, 0]], dtype=np.float32),
2412
+ rtol=1e-5, atol=1e-5
2413
+ )
2429
2414
 
2430
2415
  if use_warp and cv2 is not None:
2431
2416
  src_for_match = cv2.warpAffine(
@@ -2439,9 +2424,21 @@ class StarRegistrationWorker(QRunnable):
2439
2424
  src_for_match = gray_small
2440
2425
 
2441
2426
  try:
2442
- transform = self.compute_affine_transform_astroalign(
2443
- src_for_match, ref_small, limit_stars=getattr(self, "limit_stars", None)
2444
- )
2427
+ if refine_model == "similarity":
2428
+ transform = compute_similarity_transform_astroalign_cropped(
2429
+ src_for_match, ref_small,
2430
+ limit_stars=getattr(self, "limit_stars", None),
2431
+ det_sigma=getattr(self, "det_sigma", 12.0),
2432
+ minarea=getattr(self, "minarea", 10),
2433
+ h_reproj=getattr(self, "h_reproj", 3.0),
2434
+ )
2435
+ else:
2436
+ transform = self.compute_affine_transform_astroalign(
2437
+ src_for_match, ref_small,
2438
+ limit_stars=getattr(self, "limit_stars", None),
2439
+ det_sigma=getattr(self, "det_sigma", 12.0),
2440
+ minarea=getattr(self, "minarea", 10),
2441
+ )
2445
2442
  except Exception as e:
2446
2443
  msg = str(e)
2447
2444
  base = os.path.basename(self.original_file)
@@ -2457,19 +2454,22 @@ class StarRegistrationWorker(QRunnable):
2457
2454
  return
2458
2455
 
2459
2456
  transform = np.array(transform, dtype=np.float64).reshape(2, 3)
2457
+
2458
+ # Similarity projection safety (no shear)
2459
+ if refine_model == "similarity":
2460
+ transform = _project_to_similarity(transform)
2461
+
2460
2462
  key = os.path.normpath(self.original_file)
2461
2463
  self.signals.result_transform.emit(key, transform)
2462
2464
  self.signals.progress.emit(
2463
2465
  f"Astroalign delta for {os.path.basename(self.original_file)} "
2464
- f"(model={self.model_name}): dx={transform[0, 2]:.2f}, dy={transform[1, 2]:.2f}"
2466
+ f"(refine={refine_model}, final={self.model_name}): dx={transform[0,2]:.2f}, dy={transform[1,2]:.2f}"
2465
2467
  )
2466
2468
  self.signals.result.emit(self.original_file)
2467
2469
 
2468
2470
  except Exception as e:
2469
2471
  self.signals.error.emit(f"Error processing {self.original_file}: {e}")
2470
2472
 
2471
-
2472
-
2473
2473
  @staticmethod
2474
2474
  def compute_affine_transform_astroalign(source_img, reference_img,
2475
2475
  scale=1.20,
@@ -2619,7 +2619,7 @@ class StarRegistrationThread(QThread):
2619
2619
  self.det_sigma = float(self.align_prefs.get("det_sigma", 12.0))
2620
2620
  self.limit_stars = int(self.align_prefs.get("limit_stars", 500))
2621
2621
  self.minarea = int(self.align_prefs.get("minarea", 10))
2622
- self.downsample = int(self.align_prefs.get("downsample", 2))
2622
+ self.downsample = int(self.align_prefs.get("downsample", 3))
2623
2623
  self.drizzle_xforms = {} # {orig_norm_path: (kind, matrix)}
2624
2624
 
2625
2625
  @staticmethod
@@ -2969,23 +2969,37 @@ class StarRegistrationThread(QThread):
2969
2969
 
2970
2970
  # ✂️ No DAO/RANSAC: astroalign handles detection internally.
2971
2971
 
2972
- # Single shared downsampled ref for workers
2973
- #ds = max(1, int(self.align_prefs.get("downsample", 2)))
2974
- #if ds > 1:
2975
- # new_hw = (max(1, ref2d.shape[1] // ds), max(1, ref2d.shape[0] // ds)) # (W, H)
2976
- # ref_small = cv2.resize(ref2d, new_hw, interpolation=cv2.INTER_AREA)
2977
- #else:
2978
- # ref_small = ref2d
2979
- #self.ref_small = np.ascontiguousarray(ref_small.astype(np.float32))
2980
- self.ref_small = np.ascontiguousarray(ref2d.astype(np.float32))
2972
+ # --- Build shared ref at full + downsampled solve-res ---
2973
+ self.ref_small_full = np.ascontiguousarray(ref2d.astype(np.float32, copy=False))
2974
+
2975
+ # Use existing preference key you already have: self.downsample
2976
+ # (you load it in __init__: self.downsample = int(self.align_prefs.get("downsample", 2)))
2977
+ ds = max(1, int(self.downsample))
2978
+ self.solve_downsample = ds
2979
+
2980
+ if ds > 1 and cv2 is not None:
2981
+ new_hw = (max(1, ref2d.shape[1] // ds), max(1, ref2d.shape[0] // ds)) # (W, H)
2982
+ ref_ds = cv2.resize(self.ref_small_full, new_hw, interpolation=cv2.INTER_AREA)
2983
+ else:
2984
+ ref_ds = self.ref_small_full
2985
+
2986
+ self.ref_small = self.ref_small_full # keep existing attribute name (full)
2987
+ self.ref_small_ds = np.ascontiguousarray(ref_ds.astype(np.float32, copy=False))
2981
2988
 
2982
2989
  # Initialize transforms to identity for EVERY original frame
2983
2990
  self.alignment_matrices = {os.path.normpath(f): IDENTITY_2x3.copy() for f in self.original_files}
2984
2991
  self.delta_transforms = {}
2985
2992
 
2986
2993
  # Progress totals (units = number of worker completions across passes)
2994
+ # Progress totals:
2995
+ # passes = N * passes
2996
+ # finalize = N
2997
+ N = len(self.original_files)
2998
+ P = max(1, int(self.max_refinement_passes))
2999
+
2987
3000
  self._done = 0
2988
- self._total = len(self.original_files) * max(1, int(self.max_refinement_passes))
3001
+ self._total = (N * P) + N # <-- IMPORTANT: include finalize
3002
+ self.progress_step.emit(self._done, self._total) # optional but helps UI reset immediately
2989
3003
 
2990
3004
  # Registration passes (compute deltas only)
2991
3005
  for pass_idx in range(self.max_refinement_passes):
@@ -3027,109 +3041,30 @@ class StarRegistrationThread(QThread):
3027
3041
  def run_one_registration_pass(self, _ref_stars_unused, _ref_triangles_unused, pass_index):
3028
3042
  _cap_native_threads_once()
3029
3043
  import os
3030
- import shutil
3031
- import tempfile
3032
3044
  import cv2
3045
+ import time
3033
3046
 
3034
- model = (self.align_model or "affine").lower()
3035
- ref_small = np.ascontiguousarray(self.ref_small.astype(np.float32, copy=False))
3036
- Href, Wref = ref_small.shape[:2]
3047
+ # Requested final model (used ONLY in finalize)
3048
+ final_model = (self.align_model or "affine").lower()
3049
+
3050
+ # ✅ Refinement model: affine or similarity only
3051
+ if final_model in ("no_distortion", "nodistortion", "similarity"):
3052
+ refine_model = "similarity"
3053
+ else:
3054
+ refine_model = "affine"
3055
+
3056
+ ref_small_ds = np.ascontiguousarray(self.ref_small_ds.astype(np.float32, copy=False))
3057
+ Href_ds, Wref_ds = ref_small_ds.shape[:2]
3058
+ ds = max(1, int(getattr(self, "solve_downsample", 1)))
3037
3059
 
3038
- # --- Build reverse map: current_path -> original_key (handles bin2-upscale / rewrites)
3060
+ # --- reverse map: current_path -> original_key
3039
3061
  rev_current_to_orig = {}
3040
3062
  for orig_k, curr_p in self.file_key_to_current_path.items():
3041
3063
  rev_current_to_orig[os.path.normpath(curr_p)] = os.path.normpath(orig_k)
3042
3064
 
3043
- # ---------- NON-AFFINE PATH: residuals-only ----------
3044
- if model in ("homography", "poly3", "poly4"):
3045
- work_list = list(self.original_files)
3046
-
3047
- from concurrent.futures import ProcessPoolExecutor, as_completed
3048
- procs = max(2, min((os.cpu_count() or 8), 32))
3049
- self.progress_update.emit(f"Using {procs} processes to measure residuals (model={model}).")
3050
-
3051
- tmpdir = tempfile.mkdtemp(prefix="sas_resid_")
3052
- ref_npy = os.path.join(tmpdir, "ref_small.npy")
3053
- try:
3054
- np.save(ref_npy, ref_small)
3055
- except Exception as e:
3056
- try: shutil.rmtree(tmpdir, ignore_errors=True)
3057
- except Exception as e:
3058
- import logging
3059
- logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
3060
- self.on_worker_error(f"Failed to persist residual reference: {e}")
3061
- return False, "Residual pass aborted."
3062
-
3063
- pass_deltas = []
3064
- try:
3065
-
3066
- import time
3067
-
3068
- jobs = [
3069
- (p, ref_npy, model, self.h_reproj, self.det_sigma, self.minarea, self.limit_stars)
3070
- for p in work_list
3071
- ]
3072
- total = len(jobs)
3073
- done = 0
3074
-
3075
- self.progress_update.emit(f"Using {procs} processes to measure residuals (model={model}).")
3076
- self.progress_step.emit(0, total)
3077
-
3078
- with _make_executor(procs) as ex:
3079
- pending = {ex.submit(_residual_job_worker, j): j[0] for j in jobs}
3080
- last_heartbeat = time.monotonic()
3081
-
3082
- while pending:
3083
- done_set, pending = wait(pending, timeout=0.6, return_when=FIRST_COMPLETED)
3084
- # heartbeat if nothing finished for a bit
3085
- now = time.monotonic()
3086
- if not done_set and (now - last_heartbeat) > 2.0:
3087
- self.progress_update.emit(f"… measuring residuals ({done}/{total} done)")
3088
- last_heartbeat = now
3089
-
3090
- for fut in done_set:
3091
- orig_pth = os.path.normpath(pending.pop(fut, "<unknown>")) if fut in pending else "<unknown>"
3092
- try:
3093
- pth, rms, err = fut.result()
3094
- except Exception as e:
3095
- pth, rms, err = (orig_pth, float("inf"), f"Worker crashed: {e}")
3096
-
3097
- k_orig = os.path.normpath(pth or orig_pth)
3098
- if err:
3099
- self.on_worker_error(f"Residual measure failed for {os.path.basename(k_orig)}: {err}")
3100
- self.delta_transforms[k_orig] = float("inf")
3101
- else:
3102
- self.delta_transforms[k_orig] = float(rms)
3103
- self.progress_update.emit(
3104
- f"[residuals] {os.path.basename(k_orig)} → RMS={rms:.2f}px"
3105
- )
3106
-
3107
- done += 1
3108
- self.progress_step.emit(done, total)
3109
- last_heartbeat = now
3110
-
3111
- for orig in self.original_files:
3112
- pass_deltas.append(self.delta_transforms.get(os.path.normpath(orig), float("inf")))
3113
- self.transform_deltas.append(pass_deltas)
3114
-
3115
- preview = ", ".join([f"{d:.2f}" if np.isfinite(d) else "∞" for d in pass_deltas[:10]])
3116
- if len(pass_deltas) > 10:
3117
- preview += f" … ({len(pass_deltas)} total)"
3118
- self.progress_update.emit(f"Pass {pass_index + 1}: residual RMS px [{preview}]")
3119
-
3120
- aligned_count = sum(1 for d in pass_deltas if np.isfinite(d) and d <= self.shift_tolerance)
3121
- if aligned_count:
3122
- self.progress_update.emit(f"Within tolerance (≤ {self.shift_tolerance:.2f}px): {aligned_count} frame(s)")
3123
- return True, "Residual pass complete."
3124
- finally:
3125
- try: shutil.rmtree(tmpdir, ignore_errors=True)
3126
- except Exception as e:
3127
- import logging
3128
- logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
3129
-
3130
- # ---------- AFFINE PATH (incremental delta accumulation) ----------
3131
3065
  resample_flag = cv2.INTER_AREA if pass_index == 0 else cv2.INTER_LINEAR
3132
3066
 
3067
+ # Work list: pass 0 all; later passes skip within tolerance
3133
3068
  if pass_index == 0:
3134
3069
  work_list = list(self.original_files)
3135
3070
  else:
@@ -3156,30 +3091,36 @@ class StarRegistrationThread(QThread):
3156
3091
  return True, "Pass complete (nothing to refine)."
3157
3092
 
3158
3093
  procs = max(2, min((os.cpu_count() or 8), 32))
3159
- self.progress_update.emit(f"Using {procs} processes for stellar alignment (HW={os.cpu_count() or 8}).")
3094
+ self.progress_update.emit(f"Using {procs} processes for stellar alignment (refine={refine_model}).")
3160
3095
 
3161
3096
  timeout_sec = int(self.align_prefs.get("timeout_per_job_sec", 300))
3097
+
3162
3098
  jobs = []
3163
3099
  for orig_key in work_list:
3164
3100
  ok = os.path.normpath(orig_key)
3165
- current_path = os.path.normpath(self.file_key_to_current_path.get(ok, ok))
3101
+
3102
+ # IMPORTANT: refinement reads ORIGINAL frame (no intermediate saves)
3103
+ current_path = ok
3104
+
3166
3105
  current_transform = self.alignment_matrices.get(ok, IDENTITY_2x3)
3106
+
3167
3107
  jobs.append((
3168
3108
  current_path,
3169
3109
  current_transform,
3170
- ref_small, Wref, Href,
3171
- resample_flag, float(self.det_sigma), int(self.limit_stars), int(self.minarea),
3172
- model, float(self.h_reproj)
3110
+ ref_small_ds, int(Wref_ds), int(Href_ds),
3111
+ resample_flag, float(self.det_sigma),
3112
+ int(self.limit_stars) if self.limit_stars is not None else None,
3113
+ int(self.minarea),
3114
+ refine_model, float(self.h_reproj),
3115
+ int(ds)
3173
3116
  ))
3174
3117
 
3175
- import time
3176
3118
  executor = _make_executor(procs)
3177
-
3178
3119
  try:
3179
3120
  fut_info, pending = {}, set()
3180
3121
  for j in jobs:
3181
3122
  f = executor.submit(_solve_delta_job, j)
3182
- fut_info[f] = (time.monotonic(), j[0]) # j[0] = current_path
3123
+ fut_info[f] = (time.monotonic(), j[0])
3183
3124
  pending.add(f)
3184
3125
 
3185
3126
  while pending:
@@ -3191,7 +3132,7 @@ class StarRegistrationThread(QThread):
3191
3132
  except Exception as e:
3192
3133
  curr_path_r, T_new, err = (returned_path or "<unknown>", None, f"Worker crashed: {e}")
3193
3134
 
3194
- # Map CURRENT path back to ORIGINAL key for consistent accumulation
3135
+ # Map back to ORIGINAL key
3195
3136
  curr_norm = os.path.normpath(curr_path_r)
3196
3137
  k_orig = rev_current_to_orig.get(curr_norm, curr_norm)
3197
3138
 
@@ -3201,10 +3142,13 @@ class StarRegistrationThread(QThread):
3201
3142
  continue
3202
3143
 
3203
3144
  T_new = np.array(T_new, dtype=np.float64).reshape(2, 3)
3204
- if model in ("no_distortion", "nodistortion", "similarity"):
3205
- T_new = _project_to_similarity(T_new)
3145
+
3146
+ if refine_model == "similarity":
3147
+ T_new = _project_to_similarity(T_new)
3148
+
3206
3149
  self.delta_transforms[k_orig] = float(np.hypot(T_new[0, 2], T_new[1, 2]))
3207
3150
 
3151
+ # Accumulate: T_total = T_new ∘ T_prev
3208
3152
  T_prev = np.array(self.alignment_matrices.get(k_orig, IDENTITY_2x3), dtype=np.float64).reshape(2, 3)
3209
3153
  prev_3 = np.vstack([T_prev, [0, 0, 1]])
3210
3154
  new_3 = np.vstack([T_new, [0, 0, 1]])
@@ -3212,7 +3156,7 @@ class StarRegistrationThread(QThread):
3212
3156
 
3213
3157
  self.on_worker_progress(
3214
3158
  f"Astroalign delta for {os.path.basename(curr_path_r)} "
3215
- f"(model={self.align_model}): dx={T_new[0, 2]:.2f}, dy={T_new[1, 2]:.2f}"
3159
+ f"(refine={refine_model}, final={final_model}): dx={T_new[0,2]:.2f}, dy={T_new[1,2]:.2f}"
3216
3160
  )
3217
3161
  self._increment_progress()
3218
3162
 
@@ -3247,7 +3191,7 @@ class StarRegistrationThread(QThread):
3247
3191
  preview += f" … ({len(pass_deltas)} total)"
3248
3192
  self.progress_update.emit(f"Pass {pass_index + 1} delta shifts: [{preview}]")
3249
3193
  if aligned_count:
3250
- self.progress_update.emit(f"Skipped (delta < {self.shift_tolerance:.2f}px): {aligned_count} frame(s)")
3194
+ self.progress_update.emit(f"Within tolerance ( {self.shift_tolerance:.2f}px): {aligned_count} frame(s)")
3251
3195
  return True, "Pass complete."
3252
3196
  finally:
3253
3197
  try:
@@ -3255,7 +3199,6 @@ class StarRegistrationThread(QThread):
3255
3199
  except Exception:
3256
3200
  pass
3257
3201
 
3258
-
3259
3202
  def on_worker_result_transform(self, persistent_key, new_transform):
3260
3203
  k = os.path.normpath(persistent_key)
3261
3204
  T_new = np.array(new_transform, dtype=np.float64).reshape(2, 3)
@@ -3396,8 +3339,8 @@ class StarRegistrationThread(QThread):
3396
3339
  A = np.asarray(self.alignment_matrices.get(k, IDENTITY_2x3), dtype=np.float64)
3397
3340
 
3398
3341
  # 👉 If non-affine, we pass identity to make workers solve from scratch
3399
- if self.align_model.lower() in ("homography", "poly3", "poly4"):
3400
- A = IDENTITY_2x3.copy()
3342
+ #if self.align_model.lower() in ("homography", "poly3", "poly4"):
3343
+ # A = IDENTITY_2x3.copy()
3401
3344
 
3402
3345
  jobs.append((
3403
3346
  orig_path,
@@ -3423,6 +3366,7 @@ class StarRegistrationThread(QThread):
3423
3366
  orig_path, out_path, msg, success, drizzle = fut.result()
3424
3367
  except Exception as e:
3425
3368
  self.progress_update.emit(f"⚠️ Finalize worker crashed: {e}")
3369
+ self._increment_progress()
3426
3370
  continue
3427
3371
 
3428
3372
  if msg:
@@ -3445,6 +3389,7 @@ class StarRegistrationThread(QThread):
3445
3389
  self.drizzle_xforms[k] = (str(kind), None) # poly3/4
3446
3390
  except Exception:
3447
3391
  pass
3392
+ self._increment_progress()
3448
3393
  finally:
3449
3394
  try: shutil.rmtree(tmpdir, ignore_errors=True)
3450
3395
  except Exception as e: