setiastrosuitepro 1.6.4__py3-none-any.whl β†’ 1.6.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of setiastrosuitepro might be problematic. Click here for more details.

Files changed (115) hide show
  1. setiastro/images/abeicon.svg +16 -0
  2. setiastro/images/acv_icon.png +0 -0
  3. setiastro/images/colorwheel.svg +97 -0
  4. setiastro/images/cosmic.svg +40 -0
  5. setiastro/images/cosmicsat.svg +24 -0
  6. setiastro/images/first_quarter.png +0 -0
  7. setiastro/images/full_moon.png +0 -0
  8. setiastro/images/graxpert.svg +19 -0
  9. setiastro/images/last_quarter.png +0 -0
  10. setiastro/images/linearfit.svg +32 -0
  11. setiastro/images/new_moon.png +0 -0
  12. setiastro/images/pixelmath.svg +42 -0
  13. setiastro/images/waning_crescent_1.png +0 -0
  14. setiastro/images/waning_crescent_2.png +0 -0
  15. setiastro/images/waning_crescent_3.png +0 -0
  16. setiastro/images/waning_crescent_4.png +0 -0
  17. setiastro/images/waning_crescent_5.png +0 -0
  18. setiastro/images/waning_gibbous_1.png +0 -0
  19. setiastro/images/waning_gibbous_2.png +0 -0
  20. setiastro/images/waning_gibbous_3.png +0 -0
  21. setiastro/images/waning_gibbous_4.png +0 -0
  22. setiastro/images/waning_gibbous_5.png +0 -0
  23. setiastro/images/waxing_crescent_1.png +0 -0
  24. setiastro/images/waxing_crescent_2.png +0 -0
  25. setiastro/images/waxing_crescent_3.png +0 -0
  26. setiastro/images/waxing_crescent_4.png +0 -0
  27. setiastro/images/waxing_crescent_5.png +0 -0
  28. setiastro/images/waxing_gibbous_1.png +0 -0
  29. setiastro/images/waxing_gibbous_2.png +0 -0
  30. setiastro/images/waxing_gibbous_3.png +0 -0
  31. setiastro/images/waxing_gibbous_4.png +0 -0
  32. setiastro/images/waxing_gibbous_5.png +0 -0
  33. setiastro/qml/ResourceMonitor.qml +84 -82
  34. setiastro/saspro/__main__.py +20 -1
  35. setiastro/saspro/_generated/build_info.py +2 -2
  36. setiastro/saspro/abe.py +37 -4
  37. setiastro/saspro/aberration_ai.py +237 -21
  38. setiastro/saspro/acv_exporter.py +379 -0
  39. setiastro/saspro/add_stars.py +33 -6
  40. setiastro/saspro/backgroundneutral.py +108 -40
  41. setiastro/saspro/blemish_blaster.py +4 -1
  42. setiastro/saspro/blink_comparator_pro.py +74 -24
  43. setiastro/saspro/clahe.py +4 -1
  44. setiastro/saspro/continuum_subtract.py +4 -1
  45. setiastro/saspro/convo.py +13 -7
  46. setiastro/saspro/cosmicclarity.py +129 -18
  47. setiastro/saspro/crop_dialog_pro.py +123 -7
  48. setiastro/saspro/curve_editor_pro.py +109 -42
  49. setiastro/saspro/doc_manager.py +245 -15
  50. setiastro/saspro/exoplanet_detector.py +120 -28
  51. setiastro/saspro/frequency_separation.py +1158 -204
  52. setiastro/saspro/ghs_dialog_pro.py +81 -16
  53. setiastro/saspro/graxpert.py +1 -0
  54. setiastro/saspro/gui/main_window.py +429 -228
  55. setiastro/saspro/gui/mixins/dock_mixin.py +245 -24
  56. setiastro/saspro/gui/mixins/menu_mixin.py +27 -1
  57. setiastro/saspro/gui/mixins/theme_mixin.py +160 -14
  58. setiastro/saspro/gui/mixins/toolbar_mixin.py +384 -18
  59. setiastro/saspro/gui/mixins/update_mixin.py +138 -36
  60. setiastro/saspro/gui/mixins/view_mixin.py +42 -0
  61. setiastro/saspro/halobgon.py +4 -0
  62. setiastro/saspro/histogram.py +5 -1
  63. setiastro/saspro/image_combine.py +4 -0
  64. setiastro/saspro/image_peeker_pro.py +4 -0
  65. setiastro/saspro/imageops/starbasedwhitebalance.py +23 -52
  66. setiastro/saspro/imageops/stretch.py +582 -62
  67. setiastro/saspro/isophote.py +4 -0
  68. setiastro/saspro/layers.py +13 -9
  69. setiastro/saspro/layers_dock.py +183 -3
  70. setiastro/saspro/legacy/image_manager.py +154 -20
  71. setiastro/saspro/legacy/numba_utils.py +67 -47
  72. setiastro/saspro/legacy/xisf.py +240 -98
  73. setiastro/saspro/live_stacking.py +180 -79
  74. setiastro/saspro/luminancerecombine.py +228 -27
  75. setiastro/saspro/mask_creation.py +174 -15
  76. setiastro/saspro/mfdeconv.py +113 -35
  77. setiastro/saspro/mfdeconvcudnn.py +119 -70
  78. setiastro/saspro/mfdeconvsport.py +112 -35
  79. setiastro/saspro/morphology.py +4 -0
  80. setiastro/saspro/multiscale_decomp.py +51 -12
  81. setiastro/saspro/numba_utils.py +72 -57
  82. setiastro/saspro/ops/commands.py +18 -18
  83. setiastro/saspro/ops/script_editor.py +10 -2
  84. setiastro/saspro/ops/scripts.py +122 -0
  85. setiastro/saspro/perfect_palette_picker.py +37 -3
  86. setiastro/saspro/plate_solver.py +84 -49
  87. setiastro/saspro/psf_viewer.py +119 -37
  88. setiastro/saspro/resources.py +67 -0
  89. setiastro/saspro/rgbalign.py +4 -0
  90. setiastro/saspro/selective_color.py +4 -1
  91. setiastro/saspro/sfcc.py +364 -152
  92. setiastro/saspro/shortcuts.py +160 -29
  93. setiastro/saspro/signature_insert.py +692 -33
  94. setiastro/saspro/stacking_suite.py +1331 -484
  95. setiastro/saspro/star_alignment.py +247 -123
  96. setiastro/saspro/star_spikes.py +4 -0
  97. setiastro/saspro/star_stretch.py +38 -3
  98. setiastro/saspro/stat_stretch.py +743 -128
  99. setiastro/saspro/subwindow.py +786 -360
  100. setiastro/saspro/supernovaasteroidhunter.py +1 -1
  101. setiastro/saspro/wavescale_hdr.py +4 -1
  102. setiastro/saspro/wavescalede.py +4 -1
  103. setiastro/saspro/whitebalance.py +84 -12
  104. setiastro/saspro/widgets/common_utilities.py +28 -21
  105. setiastro/saspro/widgets/resource_monitor.py +109 -59
  106. setiastro/saspro/widgets/spinboxes.py +10 -13
  107. setiastro/saspro/wimi.py +27 -656
  108. setiastro/saspro/wims.py +13 -3
  109. setiastro/saspro/xisf.py +101 -11
  110. {setiastrosuitepro-1.6.4.dist-info β†’ setiastrosuitepro-1.6.12.dist-info}/METADATA +2 -1
  111. {setiastrosuitepro-1.6.4.dist-info β†’ setiastrosuitepro-1.6.12.dist-info}/RECORD +115 -82
  112. {setiastrosuitepro-1.6.4.dist-info β†’ setiastrosuitepro-1.6.12.dist-info}/WHEEL +0 -0
  113. {setiastrosuitepro-1.6.4.dist-info β†’ setiastrosuitepro-1.6.12.dist-info}/entry_points.txt +0 -0
  114. {setiastrosuitepro-1.6.4.dist-info β†’ setiastrosuitepro-1.6.12.dist-info}/licenses/LICENSE +0 -0
  115. {setiastrosuitepro-1.6.4.dist-info β†’ setiastrosuitepro-1.6.12.dist-info}/licenses/license.txt +0 -0
@@ -16,6 +16,7 @@ import hashlib
16
16
  from numpy.lib.format import open_memmap
17
17
  import tzlocal
18
18
  import weakref
19
+ import ast
19
20
  import re
20
21
  import unicodedata
21
22
  import math # used in compute_safe_chunk
@@ -152,17 +153,168 @@ _WINDOWS_RESERVED = {
152
153
 
153
154
  _FITS_EXTS = ('.fits', '.fit', '.fts', '.fits.gz', '.fit.gz', '.fts.gz', '.fz')
154
155
 
156
+ def _coerce_fits_value(v):
157
+ """Convert XISF keyword 'value' strings to reasonable python scalars."""
158
+ if v is None:
159
+ return None
160
+ if isinstance(v, (int, float, bool)):
161
+ return v
162
+ s = str(v).strip()
163
+
164
+ # PixInsight often uses 'T'/'F'
165
+ if s in ("T", "TRUE", "True", "true"):
166
+ return True
167
+ if s in ("F", "FALSE", "False", "false"):
168
+ return False
169
+
170
+ # int?
171
+ try:
172
+ if s.isdigit() or (s.startswith(("+", "-")) and s[1:].isdigit()):
173
+ return int(s)
174
+ except Exception:
175
+ pass
176
+
177
+ # float?
178
+ try:
179
+ # handles "8.9669e+03", etc.
180
+ return float(s)
181
+ except Exception:
182
+ pass
183
+
184
+ # keep as string (strip surrounding quotes if present)
185
+ if (len(s) >= 2) and ((s[0] == s[-1]) and s[0] in ("'", '"')):
186
+ s = s[1:-1]
187
+ return s
188
+
189
+
190
+ def xisf_fits_header(path: str, image_index: int = 0) -> fits.Header:
191
+ """
192
+ Extract FITS keywords from XISF file into astropy.io.fits.Header.
193
+
194
+ Your XISF structure has:
195
+ ims[0]["FITSKeywords"][KEY] = [ {"value": "...", "comment": "..."}, ... ]
196
+ Sometimes nested under ims[0]["xisf_meta"] (dict or stringified dict).
197
+ """
198
+ hdr = fits.Header()
199
+ if XISF is None:
200
+ return hdr
201
+
202
+ x = XISF(path)
203
+ ims = x.get_images_metadata() or []
204
+ if not ims:
205
+ return hdr
206
+
207
+ im = ims[min(max(image_index, 0), len(ims) - 1)]
208
+
209
+ # 1) direct
210
+ kw = im.get("FITSKeywords")
211
+
212
+ # 2) nested inside xisf_meta dict
213
+ if kw is None:
214
+ xm = im.get("xisf_meta")
215
+ if isinstance(xm, dict):
216
+ kw = xm.get("FITSKeywords")
217
+
218
+ # 3) xisf_meta stringified dict (your dump shows this exact situation)
219
+ if kw is None:
220
+ xm = im.get("xisf_meta")
221
+ if isinstance(xm, str) and "FITSKeywords" in xm:
222
+ try:
223
+ xm2 = ast.literal_eval(xm)
224
+ if isinstance(xm2, dict):
225
+ kw = xm2.get("FITSKeywords")
226
+ except Exception:
227
+ kw = None
228
+
229
+ if not isinstance(kw, dict):
230
+ return hdr
231
+
232
+ # Build header
233
+ for key, entries in kw.items():
234
+ try:
235
+ k = str(key).strip()
236
+ if not k:
237
+ continue
238
+
239
+ # entries is usually a list of dicts: [{"value": "...", "comment":"..."}]
240
+ if isinstance(entries, list) and entries:
241
+ e0 = entries[0]
242
+ if isinstance(e0, dict):
243
+ val = _coerce_fits_value(e0.get("value"))
244
+ com = e0.get("comment")
245
+ else:
246
+ val = _coerce_fits_value(e0)
247
+ com = None
248
+ elif isinstance(entries, dict):
249
+ val = _coerce_fits_value(entries.get("value"))
250
+ com = entries.get("comment")
251
+ else:
252
+ val = _coerce_fits_value(entries)
253
+ com = None
254
+
255
+ if com is not None:
256
+ hdr[k] = (val, str(com))
257
+ else:
258
+ hdr[k] = val
259
+ except Exception:
260
+ # never let one bad keyword kill header extraction
261
+ pass
262
+
263
+ return hdr
264
+
155
265
  def get_valid_header(path: str):
156
266
  """
157
- Fast header-only FITS peek with a targeted fallback:
267
+ Fast header-only peek with targeted fallback.
268
+
269
+ FITS/FITS-like:
270
+ 1) Header-only scan (lazy_load_hdus=True, never touches .data)
271
+ 2) If NAXIS1/2 still missing/invalid, fallback to reading ONE image HDU's data
272
+ to infer shape, then patch NAXIS/NAXIS1/NAXIS2.
158
273
 
159
- 1) Header-only scan (lazy_load_hdus=True, never touches .data)
160
- 2) If NAXIS1/2 still missing/invalid, fallback to reading ONE image HDU's data
161
- to get shape, then patch NAXIS/NAXIS1/NAXIS2.
274
+ XISF:
275
+ - Parse XML header only (no pixel decode)
276
+ - Synthesize a FITS-like header dict with keys used by stacking ingest:
277
+ NAXIS1, NAXIS2, (optional NAXIS3), EXPOSURE/EXPTIME, IMAGETYP, FILTER, etc.
162
278
 
163
- Returns: (hdr, ok_bool)
279
+ Returns: (hdr_like, ok_bool)
280
+ - hdr_like is an astropy Header for FITS, or a dict for XISF
164
281
  """
165
282
  try:
283
+ lp = (path or "").lower()
284
+
285
+ if lp.endswith(".xisf"):
286
+ from astropy.io import fits
287
+
288
+ # Grab FITS keywords from the XISF
289
+ hdr = xisf_fits_header(path)
290
+
291
+ # Still need geometry for NAXISn
292
+ x = XISF(path)
293
+ ims = x.get_images_metadata() or []
294
+ if ims:
295
+ im = ims[0]
296
+ w, h, chc = im.get("geometry", (0, 0, 0))
297
+ w = int(w or 0)
298
+ h = int(h or 0)
299
+ c = int(chc or 0)
300
+
301
+ hdr["NAXIS"] = 3 if c > 1 else 2
302
+ hdr["NAXIS1"] = w
303
+ hdr["NAXIS2"] = h
304
+ if c > 1:
305
+ hdr["NAXIS3"] = c
306
+
307
+ # Normalize exposure keyword convenience
308
+ if "EXPTIME" not in hdr and "EXPOSURE" in hdr:
309
+ hdr["EXPTIME"] = hdr["EXPOSURE"]
310
+ if "EXPOSURE" not in hdr and "EXPTIME" in hdr:
311
+ hdr["EXPOSURE"] = hdr["EXPTIME"]
312
+
313
+ return hdr, True
314
+
315
+ # ---------------------------
316
+ # FITS path (your existing logic)
317
+ # ---------------------------
166
318
  from astropy.io import fits
167
319
 
168
320
  def _is_good_dim(v):
@@ -211,18 +363,16 @@ def get_valid_header(path: str):
211
363
  if not _is_good_dim(hdr.get("NAXIS2")) and _is_good_dim(hdr.get("ZNAXIS2")):
212
364
  hdr["NAXIS2"] = int(hdr["ZNAXIS2"])
213
365
 
214
- # If we already have good dims, we are done (FAST PATH)
366
+ # FAST PATH
215
367
  if _is_good_dim(hdr.get("NAXIS1")) and _is_good_dim(hdr.get("NAXIS2")):
216
368
  return hdr, True
217
369
 
218
370
  # ---------------------------
219
371
  # Pass 2: slow fallback (ONLY if needed)
220
372
  # ---------------------------
221
- # Re-open without lazy semantics and read ONE image-like HDU's data to infer shape.
222
373
  with fits.open(path, mode="readonly", memmap=False) as hdul:
223
374
  target_hdu = None
224
375
  for hdu in hdul:
225
- # data access is expensive; try to choose wisely by header first
226
376
  naxis = hdu.header.get("NAXIS", 0)
227
377
  znaxis = hdu.header.get("ZNAXIS", 0)
228
378
 
@@ -236,10 +386,9 @@ def get_valid_header(path: str):
236
386
  if target_hdu is None:
237
387
  target_hdu = hdul[0]
238
388
 
239
- # Now (and only now) touch data
240
389
  data = getattr(target_hdu, "data", None)
241
-
242
390
  hdr2 = target_hdu.header.copy()
391
+
243
392
  if data is not None and getattr(data, "ndim", 0) >= 2:
244
393
  try:
245
394
  ny, nx = data.shape[-2], data.shape[-1]
@@ -250,12 +399,12 @@ def get_valid_header(path: str):
250
399
  except Exception:
251
400
  pass
252
401
 
253
- # If still unknown, return header anyway (caller can show "Unknown")
254
402
  return hdr2, True
255
403
 
256
404
  except Exception:
257
405
  return None, False
258
406
 
407
+
259
408
  def _read_tile_stack(file_list, y0, y1, x0, x1, channels, out_buf):
260
409
  """
261
410
  Fill `out_buf` with the tile stack for (y0:y1, x0:x1).
@@ -1566,11 +1715,20 @@ class _MMFits:
1566
1715
  raise ValueError(f"Unsupported ndim={self.ndim} for {path}")
1567
1716
 
1568
1717
  def _apply_fixed_fits_scale(self, arr: np.ndarray) -> np.ndarray:
1718
+ """
1719
+ Map 8/16-bit FITS integer samples to [0,1] using a fixed divisor.
1720
+ IMPORTANT: Only do this for integer dtypes. If Astropy already returned
1721
+ float (e.g. BSCALE/BZERO applied), do NOT divide again.
1722
+ """
1723
+ # Only scale raw integer pixel arrays
1724
+ if arr.dtype.kind not in ("u", "i"):
1725
+ return arr
1726
+
1569
1727
  bitpix = getattr(self, "_bitpix", 0)
1570
1728
  if bitpix == 8:
1571
- arr /= 255.0
1729
+ return arr / 255.0
1572
1730
  elif bitpix == 16:
1573
- arr /= 65535.0
1731
+ return arr / 65535.0
1574
1732
  return arr
1575
1733
 
1576
1734
  def read_tile(self, y0, y1, x0, x1) -> np.ndarray:
@@ -1698,9 +1856,9 @@ class ReferenceFrameReviewDialog(QDialog):
1698
1856
  self.initUI()
1699
1857
  self.loadImageArray() # Load the image into self.original_image
1700
1858
  if self.original_image is not None:
1701
- self.updatePreview(self.original_image) # Ensure the first image is shown
1702
- if self.original_image is not None:
1703
- QTimer.singleShot(0, self.zoomIn)
1859
+ QTimer.singleShot(0, lambda: self.updatePreview(self.original_image, fit=True))
1860
+ #if self.original_image is not None:
1861
+ # QTimer.singleShot(0, self.zoomIn)
1704
1862
 
1705
1863
 
1706
1864
  def initUI(self):
@@ -1758,6 +1916,89 @@ class ReferenceFrameReviewDialog(QDialog):
1758
1916
  self.setLayout(main_layout)
1759
1917
  self.zoomIn()
1760
1918
 
1919
+ def _ensure_hwc(self, x: np.ndarray) -> np.ndarray:
1920
+ """Ensure HWC for RGB, HW for mono."""
1921
+ if x is None:
1922
+ return None
1923
+ x = np.asarray(x)
1924
+ # CHW -> HWC
1925
+ if x.ndim == 3 and x.shape[0] == 3 and x.shape[-1] != 3:
1926
+ x = np.transpose(x, (1, 2, 0))
1927
+ # squeeze HWC with singleton
1928
+ if x.ndim == 3 and x.shape[-1] == 1:
1929
+ x = np.squeeze(x, axis=-1)
1930
+ return x
1931
+
1932
+
1933
+ def _robust_preview_stretch(self, img: np.ndarray,
1934
+ lo_pct: float = 0.25,
1935
+ hi_pct: float = 99.75,
1936
+ gamma: float = 0.65) -> np.ndarray:
1937
+ """
1938
+ Robust preview stretch:
1939
+ - nan/inf safe
1940
+ - pedestal remove per channel (img - min)
1941
+ - percentile clip to kill outliers
1942
+ - scale to 0..1
1943
+ - gentle gamma (default <1 brightens)
1944
+ Returns float32 in [0,1] and preserves mono vs RGB.
1945
+ """
1946
+ x = self._ensure_hwc(img)
1947
+ if x is None:
1948
+ return None
1949
+
1950
+ x = np.asarray(x, dtype=np.float32)
1951
+ x = np.nan_to_num(x, nan=0.0, posinf=0.0, neginf=0.0)
1952
+
1953
+ # Mono
1954
+ if x.ndim == 2:
1955
+ x = x - float(x.min())
1956
+ # percentile clip on non-flat data
1957
+ p_lo = float(np.percentile(x, lo_pct))
1958
+ p_hi = float(np.percentile(x, hi_pct))
1959
+ if p_hi > p_lo:
1960
+ x = np.clip(x, p_lo, p_hi)
1961
+ x = (x - p_lo) / (p_hi - p_lo)
1962
+ else:
1963
+ mx = float(x.max())
1964
+ if mx > 0:
1965
+ x = x / mx
1966
+ if gamma is not None and gamma > 0:
1967
+ x = np.power(np.clip(x, 0.0, 1.0), gamma)
1968
+ return np.clip(x, 0.0, 1.0).astype(np.float32, copy=False)
1969
+
1970
+ # RGB (HWC)
1971
+ if x.ndim == 3 and x.shape[2] == 3:
1972
+ out = np.empty_like(x, dtype=np.float32)
1973
+ for c in range(3):
1974
+ ch = x[..., c]
1975
+ ch = ch - float(ch.min())
1976
+ p_lo = float(np.percentile(ch, lo_pct))
1977
+ p_hi = float(np.percentile(ch, hi_pct))
1978
+ if p_hi > p_lo:
1979
+ ch = np.clip(ch, p_lo, p_hi)
1980
+ ch = (ch - p_lo) / (p_hi - p_lo)
1981
+ else:
1982
+ mx = float(ch.max())
1983
+ if mx > 0:
1984
+ ch = ch / mx
1985
+ out[..., c] = ch
1986
+
1987
+ if gamma is not None and gamma > 0:
1988
+ out = np.power(np.clip(out, 0.0, 1.0), gamma)
1989
+
1990
+ return np.clip(out, 0.0, 1.0).astype(np.float32, copy=False)
1991
+
1992
+ # Fallback: treat as scalar field
1993
+ x = x - float(x.min())
1994
+ mx = float(x.max())
1995
+ if mx > 0:
1996
+ x = x / mx
1997
+ if gamma is not None and gamma > 0:
1998
+ x = np.power(np.clip(x, 0.0, 1.0), gamma)
1999
+ return np.clip(x, 0.0, 1.0).astype(np.float32, copy=False)
2000
+
2001
+
1761
2002
  def fitToPreview(self):
1762
2003
  """Calculate and set the zoom factor so that the image fills the preview area."""
1763
2004
  if self.original_image is None:
@@ -1784,32 +2025,46 @@ class ReferenceFrameReviewDialog(QDialog):
1784
2025
 
1785
2026
  def _normalize_preview_01(self, img: np.ndarray) -> np.ndarray:
1786
2027
  """
1787
- Normalize image to [0,1] for preview/stretch:
1788
-
1789
- 1. Handle NaNs/inf safely.
1790
- 2. If min < 0 or max > 1, do (img - min) / (max - min).
1791
- 3. Always return float32 in [0,1].
2028
+ Always normalize to [0,1]:
2029
+ img = img - min(img)
2030
+ img = img / max(img)
2031
+ Per-channel if RGB, global if mono.
1792
2032
  """
1793
2033
  if img is None:
1794
2034
  return None
1795
2035
 
1796
- img = np.asarray(img, dtype=np.float32)
1797
- img = np.nan_to_num(img, nan=0.0, posinf=0.0, neginf=0.0)
1798
-
1799
- finite = np.isfinite(img)
1800
- if not finite.any():
1801
- return np.zeros_like(img, dtype=np.float32)
1802
-
1803
- mn = float(img[finite].min())
1804
- mx = float(img[finite].max())
1805
- if mx == mn:
1806
- # flat frame β†’ just zero it
1807
- return np.zeros_like(img, dtype=np.float32)
2036
+ x = np.asarray(img, dtype=np.float32)
2037
+ x = np.nan_to_num(x, nan=0.0, posinf=0.0, neginf=0.0)
1808
2038
 
1809
- if mn < 0.0 or mx > 1.0:
1810
- img = (img - mn) / (mx - mn)
2039
+ if x.ndim == 2:
2040
+ mn = float(x.min())
2041
+ x = x - mn
2042
+ mx = float(x.max())
2043
+ if mx > 0:
2044
+ x = x / mx
2045
+ return np.clip(x, 0.0, 1.0).astype(np.float32, copy=False)
2046
+
2047
+ if x.ndim == 3 and x.shape[2] == 3:
2048
+ # per-channel pedestal remove + normalize
2049
+ out = x.copy()
2050
+ for c in range(3):
2051
+ ch = out[..., c]
2052
+ mn = float(ch.min())
2053
+ ch = ch - mn
2054
+ mx = float(ch.max())
2055
+ if mx > 0:
2056
+ ch = ch / mx
2057
+ out[..., c] = ch
2058
+ return np.clip(out, 0.0, 1.0).astype(np.float32, copy=False)
2059
+
2060
+ # fallback
2061
+ mn = float(x.min())
2062
+ x = x - mn
2063
+ mx = float(x.max())
2064
+ if mx > 0:
2065
+ x = x / mx
2066
+ return np.clip(x, 0.0, 1.0).astype(np.float32, copy=False)
1811
2067
 
1812
- return np.clip(img, 0.0, 1.0)
1813
2068
 
1814
2069
 
1815
2070
  def loadImageArray(self):
@@ -1832,22 +2087,44 @@ class ReferenceFrameReviewDialog(QDialog):
1832
2087
 
1833
2088
  self.original_image = img
1834
2089
 
2090
+ def _fit_zoom_to_viewport(self, image: np.ndarray):
2091
+ """Set zoom_factor so image fits inside the scrollArea viewport."""
2092
+ if image is None:
2093
+ return
2094
+
2095
+ img = self._ensure_hwc(image)
2096
+
2097
+ if img.ndim == 2:
2098
+ h, w = img.shape
2099
+ elif img.ndim == 3 and img.shape[2] == 3:
2100
+ h, w = img.shape[:2]
2101
+ else:
2102
+ return
2103
+
2104
+ vp = self.scrollArea.viewport().size()
2105
+ if vp.width() <= 0 or vp.height() <= 0 or w <= 0 or h <= 0:
2106
+ return
2107
+
2108
+ # Fit-to-viewport zoom
2109
+ self.zoom_factor = min(vp.width() / w, vp.height() / h)
1835
2110
 
1836
- def updatePreview(self, image):
1837
- """
1838
- Convert a given image array to a QPixmap and update the preview label.
1839
- """
2111
+ def updatePreview(self, image, *, fit: bool = False):
1840
2112
  self.current_preview_image = image
2113
+
2114
+ if fit:
2115
+ self._fit_zoom_to_viewport(image)
2116
+
1841
2117
  pixmap = self.convertArrayToPixmap(image)
1842
2118
  if pixmap is None or pixmap.isNull():
1843
2119
  self.previewLabel.setText(self.tr("Unable to load preview."))
1844
- else:
1845
- available_size = self.scrollArea.viewport().size()
1846
- new_size = QSize(int(available_size.width() * self.zoom_factor),
1847
- int(available_size.height() * self.zoom_factor))
1848
- scaled_pixmap = pixmap.scaled(new_size, Qt.AspectRatioMode.KeepAspectRatio,
1849
- Qt.TransformationMode.SmoothTransformation)
1850
- self.previewLabel.setPixmap(scaled_pixmap)
2120
+ return
2121
+
2122
+ scaled = pixmap.scaled(
2123
+ pixmap.size() * self.zoom_factor,
2124
+ Qt.AspectRatioMode.KeepAspectRatio,
2125
+ Qt.TransformationMode.SmoothTransformation
2126
+ )
2127
+ self.previewLabel.setPixmap(scaled)
1851
2128
 
1852
2129
  def _preview_boost(self, img: np.ndarray) -> np.ndarray:
1853
2130
  """Robust, very gentle stretch for display when image would quantize to black."""
@@ -1865,62 +2142,48 @@ class ReferenceFrameReviewDialog(QDialog):
1865
2142
  if image is None:
1866
2143
  return None
1867
2144
 
1868
- img = image.astype(np.float32, copy=False)
1869
-
1870
- # If image is so dim or flat that 8-bit will zero-out, boost for preview
1871
- ptp = float(img.max() - img.min())
1872
- needs_boost = (float(img.max()) <= (1.0 / 255.0)) or (ptp < 1e-6) or (not np.isfinite(img).all())
1873
- if needs_boost:
1874
- img = self._preview_boost(np.nan_to_num(img, nan=0.0, posinf=0.0, neginf=0.0))
2145
+ # ALWAYS normalize to [0,1]
2146
+ img = self._normalize_preview_01(image)
1875
2147
 
1876
2148
  # Convert to 8-bit for QImage
1877
2149
  display_image = (img * 255.0).clip(0, 255).astype(np.uint8)
1878
2150
 
2151
+ # IMPORTANT: ensure contiguous memory
2152
+ display_image = np.ascontiguousarray(display_image)
2153
+
2154
+ # Keep a reference so Qt's QImage always has valid backing memory
2155
+ self._last_preview_u8 = display_image
2156
+
1879
2157
  if display_image.ndim == 2:
1880
2158
  h, w = display_image.shape
1881
- q_image = QImage(display_image.data, w, h, w, QImage.Format.Format_Grayscale8)
2159
+ q_image = QImage(self._last_preview_u8.data, w, h, w, QImage.Format.Format_Grayscale8)
2160
+ q_image = q_image.copy() # detach from numpy buffer (extra safety)
1882
2161
  elif display_image.ndim == 3 and display_image.shape[2] == 3:
1883
2162
  h, w, _ = display_image.shape
1884
- q_image = QImage(display_image.data, w, h, 3 * w, QImage.Format.Format_RGB888)
2163
+ q_image = QImage(self._last_preview_u8.data, w, h, 3 * w, QImage.Format.Format_RGB888)
2164
+ q_image = q_image.copy() # detach
1885
2165
  else:
1886
2166
  return None
2167
+
1887
2168
  return QPixmap.fromImage(q_image)
1888
-
2169
+
1889
2170
  def toggleAutostretch(self):
1890
2171
  if self.original_image is None:
1891
2172
  QMessageBox.warning(self, self.tr("Error"), self.tr("Reference image not loaded."))
1892
2173
  return
1893
2174
 
1894
- # πŸ”Ή Ensure the image we feed to Statistical Stretch is in [0,1]
1895
- base = self._normalize_preview_01(self.original_image)
1896
-
1897
2175
  self.autostretch_enabled = not self.autostretch_enabled
2176
+
1898
2177
  if self.autostretch_enabled:
1899
- if base.ndim == 2:
1900
- new_image = stretch_mono_image(
1901
- base,
1902
- target_median=0.3,
1903
- normalize=True,
1904
- apply_curves=False
1905
- )
1906
- elif base.ndim == 3 and base.shape[2] == 3:
1907
- new_image = stretch_color_image(
1908
- base,
1909
- target_median=0.3,
1910
- linked=False,
1911
- normalize=True,
1912
- apply_curves=False
1913
- )
1914
- else:
1915
- new_image = base
2178
+ new_image = self._robust_preview_stretch(self.original_image)
1916
2179
  self.toggleAutoStretchButton.setText(self.tr("Disable Autostretch"))
1917
2180
  else:
1918
- new_image = base
2181
+ new_image = self._normalize_preview_01(self.original_image)
1919
2182
  self.toggleAutoStretchButton.setText(self.tr("Enable Autostretch"))
1920
2183
 
1921
- self.updatePreview(new_image)
2184
+ self.updatePreview(new_image, fit=True)
1922
2185
 
1923
-
2186
+
1924
2187
  def zoomIn(self):
1925
2188
  self.zoom_factor *= 1.2
1926
2189
  if self.current_preview_image is not None:
@@ -3383,7 +3646,8 @@ class _MMImage:
3383
3646
  self._orig_dtype = None
3384
3647
  self._color_axis = None
3385
3648
  self._spat_axes = (0, 1)
3386
-
3649
+ self._dbg = bool(os.environ.get("SASPRO_MMIMAGE_DEBUG", "0") == "1")
3650
+ self._dbg_count = 0
3387
3651
  self._xisf = None
3388
3652
  self._xisf_memmap = None # np.memmap when possible
3389
3653
  self._xisf_arr = None # decompressed ndarray when needed
@@ -3405,6 +3669,11 @@ class _MMImage:
3405
3669
  self._open_fits(path)
3406
3670
  self._kind = "fits"
3407
3671
 
3672
+ def _dbg_log(self, msg: str):
3673
+ if not getattr(self, "_dbg", False):
3674
+ return
3675
+ print(msg) # or your logger
3676
+
3408
3677
  # ---------------- FITS ----------------
3409
3678
  def _open_fits(self, path: str):
3410
3679
  """
@@ -3510,17 +3779,27 @@ class _MMImage:
3510
3779
  # ---------------- common API ----------------
3511
3780
  def _apply_fixed_fits_scale(self, arr: np.ndarray) -> np.ndarray:
3512
3781
  """
3513
- Map 8/16-bit FITS (already BZERO/BSCALE-scaled by Astropy) to [0,1]
3514
- using a fixed divisor. No per-frame img/max(img) normalization.
3782
+ Map 8/16-bit FITS integer samples to [0,1] using a fixed divisor.
3783
+ IMPORTANT: Only do this for integer dtypes. If Astropy already returned
3784
+ float (e.g. BSCALE/BZERO applied), do NOT divide again.
3515
3785
  """
3786
+ # Only scale raw integer pixel arrays
3787
+ if arr.dtype.kind not in ("u", "i"):
3788
+ return arr
3789
+
3516
3790
  bitpix = getattr(self, "_bitpix", 0)
3517
3791
  if bitpix == 8:
3518
- arr /= 255.0
3792
+ return arr / 255.0
3519
3793
  elif bitpix == 16:
3520
- arr /= 65535.0
3794
+ return arr / 65535.0
3521
3795
  return arr
3522
3796
 
3797
+
3523
3798
  def read_tile(self, y0, y1, x0, x1) -> np.ndarray:
3799
+ import os
3800
+ import numpy as np
3801
+
3802
+ # ---- FITS / XISF tile read (unchanged) ----
3524
3803
  if self._kind == "fits":
3525
3804
  d = self._fits_data
3526
3805
  if self.ndim == 2:
@@ -3534,23 +3813,23 @@ class _MMImage:
3534
3813
  tile = np.moveaxis(tile, self._color_axis, -1)
3535
3814
  else:
3536
3815
  if self._xisf_memmap is not None:
3537
- # memmapped (C,H,W) β†’ slice, then move to (H,W,C)
3538
3816
  C = 1 if self.ndim == 2 else self.shape[2]
3539
3817
  if C == 1:
3540
3818
  tile = self._xisf_memmap[0, y0:y1, x0:x1]
3541
3819
  else:
3542
- tile = np.moveaxis(
3543
- self._xisf_memmap[:, y0:y1, x0:x1], 0, -1
3544
- )
3820
+ tile = np.moveaxis(self._xisf_memmap[:, y0:y1, x0:x1], 0, -1)
3545
3821
  else:
3546
3822
  tile = self._xisf_arr[y0:y1, x0:x1]
3547
3823
 
3548
- # Cast to float32
3824
+ # Cast to float32 copy (what you actually feed the stacker)
3549
3825
  out = np.array(tile, dtype=np.float32, copy=True, order="C")
3550
3826
 
3551
- # For FITS, apply fixed 8/16-bit normalization
3827
+
3828
+ # ---- APPLY FIXED SCALE (your real suspect) ----
3552
3829
  if self._kind == "fits":
3553
- out = self._apply_fixed_fits_scale(out)
3830
+ out2 = self._apply_fixed_fits_scale(out)
3831
+
3832
+ out = out2
3554
3833
 
3555
3834
  # ensure (h,w,3) or (h,w)
3556
3835
  if out.ndim == 3 and out.shape[-1] not in (1, 3):
@@ -3558,6 +3837,7 @@ class _MMImage:
3558
3837
  out = np.moveaxis(out, 0, -1)
3559
3838
  if out.ndim == 3 and out.shape[-1] == 1:
3560
3839
  out = np.squeeze(out, axis=-1)
3840
+
3561
3841
  return out
3562
3842
 
3563
3843
  def read_full(self) -> np.ndarray:
@@ -3948,6 +4228,175 @@ def _bias_to_match_light(light_data, master_bias):
3948
4228
  return b[:, :, 0][None, :, :] # (H,W,1) -> (1,H,W)
3949
4229
  return b
3950
4230
 
4231
+ def _read_center_patch_via_mmimage(path: str, y0: int, y1: int, x0: int, x1: int):
4232
+ src = _MMImage(path)
4233
+ try:
4234
+ sub = src.read_tile(y0, y1, x0, x1)
4235
+ return sub
4236
+ finally:
4237
+ try:
4238
+ src.close()
4239
+ except Exception:
4240
+ pass
4241
+
4242
+ def _get_key_float(hdr: fits.Header, key: str):
4243
+ try:
4244
+ v = hdr.get(key, None)
4245
+ if v is None:
4246
+ return None
4247
+ # handle strings like "-10.0" or "-10 C"
4248
+ if isinstance(v, str):
4249
+ v = v.strip().replace("C", "").replace("Β°", "").strip()
4250
+ return float(v)
4251
+ except Exception:
4252
+ return None
4253
+
4254
+ def _collect_temp_stats(file_list: list[str]):
4255
+ ccd = []
4256
+ setp = []
4257
+ n_ccd = 0
4258
+ n_set = 0
4259
+
4260
+ for p in file_list:
4261
+ try:
4262
+ hdr = fits.getheader(p, memmap=True)
4263
+ except Exception:
4264
+ continue
4265
+
4266
+ v1 = _get_key_float(hdr, "CCD-TEMP")
4267
+ v2 = _get_key_float(hdr, "SET-TEMP")
4268
+
4269
+ if v1 is not None:
4270
+ ccd.append(v1); n_ccd += 1
4271
+ if v2 is not None:
4272
+ setp.append(v2); n_set += 1
4273
+
4274
+ def _stats(arr):
4275
+ if not arr:
4276
+ return None, None, None, None
4277
+ a = np.asarray(arr, dtype=np.float32)
4278
+ return float(np.median(a)), float(np.min(a)), float(np.max(a)), float(np.std(a))
4279
+
4280
+ c_med, c_min, c_max, c_std = _stats(ccd)
4281
+ s_med, s_min, s_max, s_std = _stats(setp)
4282
+
4283
+ return {
4284
+ "ccd_med": c_med, "ccd_min": c_min, "ccd_max": c_max, "ccd_std": c_std, "ccd_n": n_ccd,
4285
+ "set_med": s_med, "set_min": s_min, "set_max": s_max, "set_std": s_std, "set_n": n_set,
4286
+ "n_files": len(file_list),
4287
+ }
4288
+
4289
+ def _temp_to_stem_tag(temp_c: float, *, prefix: str = "") -> str:
4290
+ """
4291
+ Filename-safe temperature token:
4292
+ -10.0 -> 'm10p0C'
4293
+ +5.25 -> 'p5p3C' (rounded to 0.1C if you pass that in)
4294
+ Uses:
4295
+ m = minus, p = plus/decimal separator
4296
+ Never produces '_-' which your _normalize_master_stem would collapse.
4297
+ """
4298
+ try:
4299
+ t = float(temp_c)
4300
+ except Exception:
4301
+ return ""
4302
+
4303
+ sign = "m" if t < 0 else "p"
4304
+ t_abs = abs(t)
4305
+
4306
+ # keep one decimal place (match your earlier plan)
4307
+ s = f"{t_abs:.1f}" # e.g. "10.0"
4308
+ s = s.replace(".", "p") # e.g. "10p0"
4309
+ return f"{prefix}{sign}{s}C"
4310
+
4311
+
4312
+ def _arr_stats(a: np.ndarray):
4313
+ a = np.asarray(a)
4314
+ fin = np.isfinite(a)
4315
+ if fin.any():
4316
+ v = a[fin]
4317
+ return dict(
4318
+ dtype=str(a.dtype),
4319
+ shape=tuple(a.shape),
4320
+ finite=int(fin.sum()),
4321
+ nan=int(np.isnan(a).sum()),
4322
+ inf=int(np.isinf(a).sum()),
4323
+ min=float(v.min()),
4324
+ max=float(v.max()),
4325
+ p01=float(np.percentile(v, 1)),
4326
+ p50=float(np.percentile(v, 50)),
4327
+ p99=float(np.percentile(v, 99)),
4328
+ mean=float(v.mean()),
4329
+ )
4330
+ return dict(dtype=str(a.dtype), shape=tuple(a.shape), finite=0, nan=int(np.isnan(a).sum()), inf=int(np.isinf(a).sum()))
4331
+
4332
+ def _print_stats(tag: str, a: np.ndarray, *, bit_depth=None, hdr=None):
4333
+ s = _arr_stats(a)
4334
+ bd = f", bit_depth={bit_depth}" if bit_depth is not None else ""
4335
+ print(f"πŸ§ͺ {tag}{bd} dtype={s['dtype']} shape={s['shape']} finite={s['finite']} nan={s['nan']} inf={s['inf']}")
4336
+ if s["finite"] > 0:
4337
+ print(f" min={s['min']:.6f} p01={s['p01']:.6f} p50={s['p50']:.6f} p99={s['p99']:.6f} max={s['max']:.6f} mean={s['mean']:.6f}")
4338
+ # Header hints (best-effort)
4339
+ if hdr is not None:
4340
+ try:
4341
+ # FITS-ish
4342
+ if hasattr(hdr, "get"):
4343
+ print(f" hdr: BITPIX={hdr.get('BITPIX', 'NA')} BSCALE={hdr.get('BSCALE', 'NA')} BZERO={hdr.get('BZERO', 'NA')}")
4344
+ except Exception:
4345
+ pass
4346
+
4347
+ def _warn_if_units_mismatch(light: np.ndarray, dark: np.ndarray | None, flat: np.ndarray | None):
4348
+ # Heuristic: if one is ~0..1 and another is hundreds/thousands, you’ve got mixed scaling.
4349
+ def _range_kind(a):
4350
+ if a is None:
4351
+ return None
4352
+ fin = np.isfinite(a)
4353
+ if not fin.any():
4354
+ return None
4355
+ mx = float(np.max(a[fin]))
4356
+ mn = float(np.min(a[fin]))
4357
+ return (mn, mx)
4358
+
4359
+ lr = _range_kind(light)
4360
+ dr = _range_kind(dark)
4361
+ fr = _range_kind(flat)
4362
+
4363
+ def _is_01(r):
4364
+ if r is None: return False
4365
+ mn, mx = r
4366
+ return mx <= 2.5 and mn >= -0.5
4367
+
4368
+ def _is_aduish(r):
4369
+ if r is None: return False
4370
+ mn, mx = r
4371
+ return mx >= 50.0 # conservative
4372
+
4373
+ if lr and dr and _is_01(lr) and _is_aduish(dr):
4374
+ print("🚨 UNITS MISMATCH: light looks ~0–1, but dark looks like ADU (tens/hundreds/thousands). Expect huge negatives after subtraction.")
4375
+ if lr and fr and _is_01(lr) and _is_aduish(fr):
4376
+ print("🚨 UNITS MISMATCH: light looks ~0–1, but flat looks like ADU. Flat division will be wrong unless normalized to ~1 first.")
4377
+
4378
+ def _maybe_normalize_16bit_float(a: np.ndarray, *, name: str = "") -> np.ndarray:
4379
+ """
4380
+ Fast guard:
4381
+ - If float array has max > 10, assume it's really 16-bit ADU data stored as float,
4382
+ and normalize to 0..1 by dividing by 65535.
4383
+ """
4384
+ if a is None:
4385
+ return a
4386
+ if not np.issubdtype(a.dtype, np.floating):
4387
+ return a
4388
+
4389
+ fin = np.isfinite(a)
4390
+ if not fin.any():
4391
+ return a
4392
+
4393
+ mx = float(a[fin].max()) # fast reduction
4394
+
4395
+ if mx > 10.0:
4396
+ print(f"πŸ›‘οΈ Units-guard: {name or 'array'} max={mx:.3f} (>10). Assuming 16-bit ADU-in-float; normalizing /65535.")
4397
+ return (a / 65535.0).astype(np.float32, copy=False)
4398
+
4399
+ return a
3951
4400
 
3952
4401
  class StackingSuiteDialog(QDialog):
3953
4402
  requestRelaunch = pyqtSignal(str, str) # old_dir, new_dir
@@ -4087,7 +4536,7 @@ class StackingSuiteDialog(QDialog):
4087
4536
  self.image_integration_tab = self.create_image_registration_tab()
4088
4537
 
4089
4538
  # Add tabs
4090
- self.tabs.addTab(self.conversion_tab, self.tr("Convert Non-FITS Formats"))
4539
+ self.tabs.addTab(self.conversion_tab, self.tr("Convert Camera RAW/TIFF Formats"))
4091
4540
  self.tabs.addTab(self.dark_tab, self.tr("Darks"))
4092
4541
  self.tabs.addTab(self.flat_tab, self.tr("Flats"))
4093
4542
  self.tabs.addTab(self.light_tab, self.tr("Lights"))
@@ -6500,6 +6949,22 @@ class StackingSuiteDialog(QDialog):
6500
6949
 
6501
6950
  return tab
6502
6951
 
6952
+ def _bucket_temp(self, t: float | None, step: float = 3.0) -> float | None:
6953
+ """Round to stable bucket. Example: -10.2 -> -10.0 when step=1.0"""
6954
+ if t is None:
6955
+ return None
6956
+ try:
6957
+ return round(float(t) / float(step)) * float(step)
6958
+ except Exception:
6959
+ return None
6960
+
6961
+ def _temp_label(self, t: float | None, step: float = 1.0) -> str:
6962
+ if t is None:
6963
+ return "Temp: Unknown"
6964
+ # show fewer decimals if step is 1.0
6965
+ return f"Temp: {t:+.0f}C" if step >= 1.0 else f"Temp: {t:+.1f}C"
6966
+
6967
+
6503
6968
  def _tree_for_type(self, t: str):
6504
6969
  t = (t or "").upper()
6505
6970
  if t == "LIGHT": return getattr(self, "light_tree", None)
@@ -8162,13 +8627,18 @@ class StackingSuiteDialog(QDialog):
8162
8627
  mf_row3.addWidget(self.mf_Huber_hint)
8163
8628
 
8164
8629
  mf_row3.addSpacing(16)
8630
+
8165
8631
  self.mf_use_star_mask_cb = QCheckBox(self.tr("Auto Star Mask"))
8166
8632
  self.mf_use_noise_map_cb = QCheckBox(self.tr("Auto Noise Map"))
8167
- self.mf_use_star_mask_cb.setChecked(self.settings.value("stacking/mfdeconv/use_star_masks", False, type=bool))
8168
- self.mf_use_noise_map_cb.setChecked(self.settings.value("stacking/mfdeconv/use_noise_maps", False, type=bool))
8633
+
8634
+ # Always ON by default (session-only toggles)
8635
+ self.mf_use_star_mask_cb.setChecked(True)
8636
+ self.mf_use_noise_map_cb.setChecked(True)
8637
+
8169
8638
  mf_row3.addWidget(self.mf_use_star_mask_cb)
8170
8639
  mf_row3.addWidget(self.mf_use_noise_map_cb)
8171
8640
  mf_row3.addStretch(1)
8641
+
8172
8642
  mf_v.addLayout(mf_row3)
8173
8643
 
8174
8644
  # persist
@@ -9653,7 +10123,10 @@ class StackingSuiteDialog(QDialog):
9653
10123
  def load_master_dark(self):
9654
10124
  """ Loads a Master Dark and updates the UI. """
9655
10125
  last_dir = self.settings.value("last_opened_folder", "", type=str) # Get last folder
9656
- files, _ = QFileDialog.getOpenFileNames(self, "Select Master Dark", last_dir, "FITS Files (*.fits *.fit)")
10126
+ files, _ = QFileDialog.getOpenFileNames(
10127
+ self, "Select Master Dark", last_dir,
10128
+ "Master Calibration (*.fits *.fit *.xisf);;All Files (*)"
10129
+ )
9657
10130
 
9658
10131
  if files:
9659
10132
  self.settings.setValue("last_opened_folder", os.path.dirname(files[0])) # Save last used folder
@@ -9668,7 +10141,10 @@ class StackingSuiteDialog(QDialog):
9668
10141
 
9669
10142
  def load_master_flat(self):
9670
10143
  last_dir = self.settings.value("last_opened_folder", "", type=str)
9671
- files, _ = QFileDialog.getOpenFileNames(self, "Select Master Flat", last_dir, "FITS Files (*.fits *.fit)")
10144
+ files, _ = QFileDialog.getOpenFileNames(
10145
+ self, "Select Master Flat", last_dir,
10146
+ "Master Calibration (*.fits *.fit *.xisf);;All Files (*)"
10147
+ )
9672
10148
 
9673
10149
  if files:
9674
10150
  self.settings.setValue("last_opened_folder", os.path.dirname(files[0]))
@@ -9681,7 +10157,7 @@ class StackingSuiteDialog(QDialog):
9681
10157
  last_dir = self.settings.value("last_opened_folder", "", type=str)
9682
10158
  files, _ = QFileDialog.getOpenFileNames(
9683
10159
  self, title, last_dir,
9684
- "FITS Files (*.fits *.fit *.fts *.fits.gz *.fit.gz *.fz)"
10160
+ "Images (*.fits *.fit *.fts *.fits.gz *.fit.gz *.fz *.xisf);;All Files (*)"
9685
10161
  )
9686
10162
  if not files:
9687
10163
  return
@@ -9760,7 +10236,7 @@ class StackingSuiteDialog(QDialog):
9760
10236
 
9761
10237
  # --- Directory walking ---------------------------------------------------------
9762
10238
  def _collect_fits_paths(self, root: str, recursive: bool = True) -> list[str]:
9763
- exts = (".fits", ".fit", ".fts", ".fits.gz", ".fit.gz", ".fz")
10239
+ exts = (".fits", ".fit", ".fts", ".fits.gz", ".fit.gz", ".fz", ".xisf")
9764
10240
  paths = []
9765
10241
  if recursive:
9766
10242
  for d, _subdirs, files in os.walk(root):
@@ -10203,14 +10679,14 @@ class StackingSuiteDialog(QDialog):
10203
10679
  try:
10204
10680
  expected_type_u = (expected_type or "").upper()
10205
10681
 
10206
- # Ensure caches exist
10207
10682
  if not hasattr(self, "_mismatch_policy") or self._mismatch_policy is None:
10208
10683
  self._mismatch_policy = {}
10209
10684
  if not hasattr(self, "session_tags") or self.session_tags is None:
10210
10685
  self.session_tags = {}
10211
10686
 
10212
- # --- Read header only (fast) ---
10213
- header, _ = get_valid_header(path)
10687
+ header, ok = get_valid_header(path)
10688
+ if not ok or header is None:
10689
+ raise RuntimeError("Header read failed")
10214
10690
 
10215
10691
  # --- Basic image size ---
10216
10692
  try:
@@ -10218,7 +10694,9 @@ class StackingSuiteDialog(QDialog):
10218
10694
  height = int(header.get("NAXIS2", 0))
10219
10695
  image_size = f"{width}x{height}" if (width > 0 and height > 0) else "Unknown"
10220
10696
  except Exception as e:
10221
- self.update_status(self.tr(f"Warning: Could not read dimensions for {os.path.basename(path)}: {e}"))
10697
+ self.update_status(self.tr(
10698
+ f"Warning: Could not read dimensions for {os.path.basename(path)}: {e}"
10699
+ ))
10222
10700
  width = height = None
10223
10701
  image_size = "Unknown"
10224
10702
 
@@ -10235,7 +10713,6 @@ class StackingSuiteDialog(QDialog):
10235
10713
  exposure_text = f"{fexp:g}s"
10236
10714
  except Exception:
10237
10715
  exposure_text = str(exp_val)
10238
-
10239
10716
  # --- Mismatch prompt (redirect/keep/skip with 'apply to all') ---
10240
10717
  if expected_type_u == "DARK":
10241
10718
  forbidden = ["light", "flat"]
@@ -10308,39 +10785,97 @@ class StackingSuiteDialog(QDialog):
10308
10785
 
10309
10786
  # --- Resolve session tag (auto vs keyword-driven) ---
10310
10787
  auto_session = self.settings.value("stacking/auto_session", True, type=bool)
10311
-
10312
10788
  if auto_session:
10313
10789
  session_tag = self._auto_session_from_path(path, header) or "Default"
10314
10790
  else:
10315
- # NOTE: this is a keyword now, not a literal session name
10316
10791
  keyword = self.settings.value("stacking/session_keyword", "Default", type=str)
10317
10792
  session_tag = self._session_from_manual_keyword(path, keyword) or "Default"
10318
10793
 
10319
- # --- Filter name normalization ---
10320
- filter_name_raw = header.get("FILTER", "Unknown")
10321
- filter_name = self._sanitize_name(filter_name_raw)
10794
+ # --- Temperature (fast: header already loaded) ---
10795
+ ccd_temp = header.get("CCD-TEMP", None)
10796
+ set_temp = header.get("SET-TEMP", None)
10797
+
10798
+ def _to_float_temp(v):
10799
+ try:
10800
+ if v is None:
10801
+ return None
10802
+ if isinstance(v, (int, float)):
10803
+ return float(v)
10804
+ s = str(v).strip()
10805
+ s = s.replace("Β°", "").replace("C", "").replace("c", "").strip()
10806
+ return float(s)
10807
+ except Exception:
10808
+ return None
10809
+
10810
+ ccd_temp_f = _to_float_temp(ccd_temp)
10811
+ set_temp_f = _to_float_temp(set_temp)
10812
+ use_temp_f = ccd_temp_f if ccd_temp_f is not None else set_temp_f
10813
+
10814
+ # --- Common metadata string for leaf rows ---
10815
+ meta_text = f"Size: {image_size} | Session: {session_tag}"
10816
+ if use_temp_f is not None:
10817
+ meta_text += f" | Temp: {use_temp_f:.1f}C"
10818
+ if set_temp_f is not None:
10819
+ meta_text += f" (Set: {set_temp_f:.1f}C)"
10322
10820
 
10323
10821
  # --- Common metadata string for leaf rows ---
10324
10822
  meta_text = f"Size: {image_size} | Session: {session_tag}"
10325
10823
 
10326
10824
  # === DARKs ===
10327
10825
  if expected_type_u == "DARK":
10328
- key = f"{exposure_text} ({image_size})"
10329
- self.dark_files.setdefault(key, []).append(path)
10826
+ # --- temperature for grouping (prefer CCD-TEMP else SET-TEMP) ---
10827
+ ccd_t = _get_key_float(header, "CCD-TEMP")
10828
+ set_t = _get_key_float(header, "SET-TEMP")
10829
+ chosen_t = ccd_t if ccd_t is not None else set_t
10830
+
10831
+ temp_step = self.settings.value("stacking/temp_group_step", 1.0, type=float)
10832
+ temp_bucket = self._bucket_temp(chosen_t, step=temp_step)
10833
+ temp_label = self._temp_label(temp_bucket, step=temp_step)
10834
+
10835
+ # --- tree grouping: exposure/size -> temp bucket -> files ---
10836
+ base_key = f"{exposure_text} ({image_size})"
10330
10837
 
10331
- exposure_item = self._dark_group_item.get(key)
10838
+ # ensure caches exist
10839
+ if not hasattr(self, "_dark_group_item") or self._dark_group_item is None:
10840
+ self._dark_group_item = {}
10841
+ if not hasattr(self, "_dark_temp_item") or self._dark_temp_item is None:
10842
+ self._dark_temp_item = {} # (base_key, temp_label) -> QTreeWidgetItem
10843
+
10844
+ # top-level exposure group
10845
+ exposure_item = self._dark_group_item.get(base_key)
10332
10846
  if exposure_item is None:
10333
- exposure_item = QTreeWidgetItem([key])
10847
+ exposure_item = QTreeWidgetItem([base_key, ""])
10334
10848
  tree.addTopLevelItem(exposure_item)
10335
- self._dark_group_item[key] = exposure_item
10336
-
10337
- leaf = QTreeWidgetItem([os.path.basename(path), meta_text])
10849
+ self._dark_group_item[base_key] = exposure_item
10850
+
10851
+ # second-level temp group under that exposure group
10852
+ temp_key = (base_key, temp_label)
10853
+ temp_item = self._dark_temp_item.get(temp_key)
10854
+ if temp_item is None:
10855
+ temp_item = QTreeWidgetItem([temp_label, ""])
10856
+ exposure_item.addChild(temp_item)
10857
+ self._dark_temp_item[temp_key] = temp_item
10858
+
10859
+ # --- store in dict for stacking ---
10860
+ # Key includes session + temp bucket so create_master_dark can split properly.
10861
+ # (We keep compatibility: your create_master_dark already handles tuple keys.)
10862
+ composite_key = (base_key, session_tag, temp_bucket)
10863
+ self.dark_files.setdefault(composite_key, []).append(path)
10864
+
10865
+ # --- leaf row ---
10866
+ # Also add temp info to metadata text so user can see it per file
10867
+ meta_text_dark = f"Size: {image_size} | Session: {session_tag} | {temp_label}"
10868
+ leaf = QTreeWidgetItem([os.path.basename(path), meta_text_dark])
10338
10869
  leaf.setData(0, Qt.ItemDataRole.UserRole, path)
10339
10870
  leaf.setData(0, Qt.ItemDataRole.UserRole + 1, session_tag)
10340
- exposure_item.addChild(leaf)
10871
+ leaf.setData(0, Qt.ItemDataRole.UserRole + 2, temp_bucket) # handy later
10872
+ temp_item.addChild(leaf)
10341
10873
 
10342
10874
  # === FLATs ===
10343
10875
  elif expected_type_u == "FLAT":
10876
+ filter_name_raw = header.get("FILTER") or "Unknown"
10877
+ filter_name = self._sanitize_name(filter_name_raw)
10878
+
10344
10879
  flat_key = f"{filter_name} - {exposure_text} ({image_size})"
10345
10880
  composite_key = (flat_key, session_tag)
10346
10881
  self.flat_files.setdefault(composite_key, []).append(path)
@@ -10368,12 +10903,14 @@ class StackingSuiteDialog(QDialog):
10368
10903
 
10369
10904
  # === LIGHTs ===
10370
10905
  elif expected_type_u == "LIGHT":
10906
+ filter_name_raw = header.get("FILTER") or "Unknown"
10907
+ filter_name = self._sanitize_name(filter_name_raw)
10908
+
10371
10909
  light_key = f"{filter_name} - {exposure_text} ({image_size})"
10372
10910
  composite_key = (light_key, session_tag)
10373
10911
  self.light_files.setdefault(composite_key, []).append(path)
10374
10912
  self.session_tags[path] = session_tag
10375
10913
 
10376
- # Cached filter item
10377
10914
  filter_item = self._light_filter_item.get(filter_name)
10378
10915
  if filter_item is None:
10379
10916
  filter_item = QTreeWidgetItem([filter_name])
@@ -10383,7 +10920,6 @@ class StackingSuiteDialog(QDialog):
10383
10920
  want_label = f"{exposure_text} ({image_size})"
10384
10921
  exp_key = (filter_name, want_label)
10385
10922
 
10386
- # Cached exposure item
10387
10923
  exposure_item = self._light_exp_item.get(exp_key)
10388
10924
  if exposure_item is None:
10389
10925
  exposure_item = QTreeWidgetItem([want_label])
@@ -10391,7 +10927,7 @@ class StackingSuiteDialog(QDialog):
10391
10927
  self._light_exp_item[exp_key] = exposure_item
10392
10928
 
10393
10929
  leaf = QTreeWidgetItem([os.path.basename(path), meta_text])
10394
- leaf.setData(0, Qt.ItemDataRole.UserRole, path) # βœ… keep this
10930
+ leaf.setData(0, Qt.ItemDataRole.UserRole, path)
10395
10931
  leaf.setData(0, Qt.ItemDataRole.UserRole + 1, session_tag)
10396
10932
  exposure_item.addChild(leaf)
10397
10933
 
@@ -10411,7 +10947,7 @@ class StackingSuiteDialog(QDialog):
10411
10947
  for file_path in files:
10412
10948
  try:
10413
10949
  # Read only the FITS header (fast)
10414
- header = fits.getheader(file_path)
10950
+ header, _kind = get_valid_header(file_path)
10415
10951
 
10416
10952
  # Check for both EXPOSURE and EXPTIME
10417
10953
  exposure = header.get("EXPOSURE", header.get("EXPTIME", "Unknown"))
@@ -10427,7 +10963,13 @@ class StackingSuiteDialog(QDialog):
10427
10963
 
10428
10964
  # Construct key based on file type
10429
10965
  if file_type.upper() == "DARK":
10430
- key = f"{exposure}s ({image_size})"
10966
+ try:
10967
+ exposure_f = float(exposure)
10968
+ exposure_text = f"{exposure_f:g}s"
10969
+ except Exception:
10970
+ exposure_text = f"{exposure}s" if str(exposure).endswith("s") else str(exposure)
10971
+
10972
+ key = f"{exposure_text} ({image_size})"
10431
10973
  self.master_files[key] = file_path # Store master dark
10432
10974
  self.master_sizes[file_path] = image_size # Store size
10433
10975
  elif file_type.upper() == "FLAT":
@@ -10489,14 +11031,39 @@ class StackingSuiteDialog(QDialog):
10489
11031
  exposure_tolerance = self.exposure_tolerance_spinbox.value()
10490
11032
 
10491
11033
  # -------------------------------------------------------------------------
10492
- # Group darks by (exposure +/- tolerance, image size string, session)
10493
- # self.dark_files can be either:
10494
- # legacy: exposure_key -> [paths]
10495
- # session: (exposure_key, session) -> [paths]
11034
+ # Temp helpers
10496
11035
  # -------------------------------------------------------------------------
10497
- dark_files_by_group: dict[tuple[float, str, str], list[str]] = {} # (exp, size, session)->list
11036
+ def _bucket_temp(t: float | None, step: float = 3.0) -> float | None:
11037
+ """Round temperature to a stable bucket (e.g. -10.2 -> -10.0 if step=1.0)."""
11038
+ if t is None:
11039
+ return None
11040
+ try:
11041
+ return round(float(t) / step) * step
11042
+ except Exception:
11043
+ return None
11044
+
11045
+ def _read_temp_quick(path: str) -> tuple[float | None, float | None, float | None]:
11046
+ """Fast temp read (CCD, SET, chosen). Uses fits.getheader(memmap=True)."""
11047
+ try:
11048
+ hdr = fits.getheader(path, memmap=True)
11049
+ except Exception:
11050
+ return None, None, None
11051
+ ccd = _get_key_float(hdr, "CCD-TEMP")
11052
+ st = _get_key_float(hdr, "SET-TEMP")
11053
+ chosen = ccd if ccd is not None else st
11054
+ return ccd, st, chosen
11055
+
11056
+ # -------------------------------------------------------------------------
11057
+ # Group darks by (exposure +/- tolerance, image size, session, temp_bucket)
11058
+ # TEMP_STEP is the rounding bucket (1.0C default)
11059
+ # -------------------------------------------------------------------------
11060
+ TEMP_STEP = self.settings.value("stacking/temp_group_step", 1.0, type=float)
11061
+
11062
+ dark_files_by_group: dict[tuple[float, str, str, float | None], list[str]] = {} # (exp,size,session,temp)->list
10498
11063
 
10499
11064
  for key, file_list in (self.dark_files or {}).items():
11065
+ # Support both legacy dark_files (key=str) and newer tuple keys.
11066
+ # We DO NOT assume dark_files already contains temp in key β€” we re-bucket from headers anyway.
10500
11067
  if isinstance(key, tuple) and len(key) >= 2:
10501
11068
  exposure_key = str(key[0])
10502
11069
  session = str(key[1]) if str(key[1]).strip() else "Default"
@@ -10508,10 +11075,9 @@ class StackingSuiteDialog(QDialog):
10508
11075
  exposure_time_str, image_size = exposure_key.split(" (", 1)
10509
11076
  image_size = image_size.rstrip(")")
10510
11077
  except ValueError:
10511
- # If some malformed key got in, skip safely
10512
11078
  continue
10513
11079
 
10514
- if "Unknown" in exposure_time_str:
11080
+ if "Unknown" in (exposure_time_str or ""):
10515
11081
  exposure_time = 0.0
10516
11082
  else:
10517
11083
  try:
@@ -10519,21 +11085,31 @@ class StackingSuiteDialog(QDialog):
10519
11085
  except Exception:
10520
11086
  exposure_time = 0.0
10521
11087
 
10522
- matched_group = None
10523
- for (existing_exposure, existing_size, existing_session) in list(dark_files_by_group.keys()):
10524
- if (
10525
- existing_session == session
10526
- and existing_size == image_size
10527
- and abs(existing_exposure - exposure_time) <= exposure_tolerance
10528
- ):
10529
- matched_group = (existing_exposure, existing_size, existing_session)
10530
- break
11088
+ # Split the incoming list by temp bucket so mixed temps do not merge.
11089
+ bucketed: dict[float | None, list[str]] = {}
11090
+ for p in (file_list or []):
11091
+ _, _, chosen = _read_temp_quick(p)
11092
+ tb = _bucket_temp(chosen, step=TEMP_STEP)
11093
+ bucketed.setdefault(tb, []).append(p)
11094
+
11095
+ # Apply exposure tolerance grouping PER temp bucket
11096
+ for temp_bucket, paths_in_bucket in bucketed.items():
11097
+ matched_group = None
11098
+ for (existing_exposure, existing_size, existing_session, existing_temp) in list(dark_files_by_group.keys()):
11099
+ if (
11100
+ existing_session == session
11101
+ and existing_size == image_size
11102
+ and existing_temp == temp_bucket
11103
+ and abs(existing_exposure - exposure_time) <= exposure_tolerance
11104
+ ):
11105
+ matched_group = (existing_exposure, existing_size, existing_session, existing_temp)
11106
+ break
10531
11107
 
10532
- if matched_group is None:
10533
- matched_group = (exposure_time, image_size, session)
10534
- dark_files_by_group[matched_group] = []
11108
+ if matched_group is None:
11109
+ matched_group = (exposure_time, image_size, session, temp_bucket)
11110
+ dark_files_by_group[matched_group] = []
10535
11111
 
10536
- dark_files_by_group[matched_group].extend(file_list or [])
11112
+ dark_files_by_group[matched_group].extend(paths_in_bucket)
10537
11113
 
10538
11114
  master_dir = os.path.join(self.stacking_directory, "Master_Calibration_Files")
10539
11115
  os.makedirs(master_dir, exist_ok=True)
@@ -10542,11 +11118,11 @@ class StackingSuiteDialog(QDialog):
10542
11118
  # Informative status about discovery
10543
11119
  # -------------------------------------------------------------------------
10544
11120
  try:
10545
- n_groups = sum(1 for _, v in dark_files_by_group.items() if len(v) >= 2)
11121
+ n_groups_eligible = sum(1 for _, v in dark_files_by_group.items() if len(v) >= 2)
10546
11122
  total_files = sum(len(v) for v in dark_files_by_group.values())
10547
11123
  self.update_status(self.tr(
10548
11124
  f"πŸ”Ž Discovered {len(dark_files_by_group)} grouped exposures "
10549
- f"({n_groups} eligible to stack) β€” {total_files} files total."
11125
+ f"({n_groups_eligible} eligible to stack) β€” {total_files} files total."
10550
11126
  ))
10551
11127
  except Exception:
10552
11128
  pass
@@ -10556,12 +11132,12 @@ class StackingSuiteDialog(QDialog):
10556
11132
  # Pre-count tiles for progress bar (per-group safe chunk sizes)
10557
11133
  # -------------------------------------------------------------------------
10558
11134
  total_tiles = 0
10559
- group_shapes: dict[tuple[float, str, str], tuple[int, int, int, int, int]] = {} # (exp,size,session)->(H,W,C,ch,cw)
11135
+ group_shapes: dict[tuple[float, str, str, float | None], tuple[int, int, int, int, int]] = {}
10560
11136
  pref_chunk_h = self.chunk_height
10561
11137
  pref_chunk_w = self.chunk_width
10562
11138
  DTYPE = np.float32
10563
11139
 
10564
- for (exposure_time, image_size, session), file_list in dark_files_by_group.items():
11140
+ for (exposure_time, image_size, session, temp_bucket), file_list in dark_files_by_group.items():
10565
11141
  if len(file_list) < 2:
10566
11142
  continue
10567
11143
 
@@ -10579,7 +11155,8 @@ class StackingSuiteDialog(QDialog):
10579
11155
  except MemoryError:
10580
11156
  chunk_h, chunk_w = pref_chunk_h, pref_chunk_w
10581
11157
 
10582
- group_shapes[(exposure_time, image_size, session)] = (H, W, C, chunk_h, chunk_w)
11158
+ gk = (exposure_time, image_size, session, temp_bucket)
11159
+ group_shapes[gk] = (H, W, C, chunk_h, chunk_w)
10583
11160
  total_tiles += _count_tiles(H, W, chunk_h, chunk_w)
10584
11161
 
10585
11162
  if total_tiles == 0:
@@ -10592,7 +11169,7 @@ class StackingSuiteDialog(QDialog):
10592
11169
  QApplication.processEvents()
10593
11170
 
10594
11171
  # -------------------------------------------------------------------------
10595
- # Local CPU reducers (unchanged)
11172
+ # Local CPU reducers
10596
11173
  # -------------------------------------------------------------------------
10597
11174
  def _select_reducer(kind: str, N: int):
10598
11175
  if kind == "dark":
@@ -10636,10 +11213,10 @@ class StackingSuiteDialog(QDialog):
10636
11213
  # ---------------------------------------------------------------------
10637
11214
  # Per-group stacking loop
10638
11215
  # ---------------------------------------------------------------------
10639
- for (exposure_time, image_size, session), file_list in dark_files_by_group.items():
11216
+ for (exposure_time, image_size, session, temp_bucket), file_list in dark_files_by_group.items():
10640
11217
  if len(file_list) < 2:
10641
11218
  self.update_status(self.tr(
10642
- f"⚠️ Skipping {exposure_time}s ({image_size}) [{session}] - Not enough frames to stack."
11219
+ f"⚠️ Skipping {exposure_time:g}s ({image_size}) [{session}] - Not enough frames to stack."
10643
11220
  ))
10644
11221
  QApplication.processEvents()
10645
11222
  continue
@@ -10648,14 +11225,17 @@ class StackingSuiteDialog(QDialog):
10648
11225
  self.update_status(self.tr("β›” Master Dark creation cancelled."))
10649
11226
  break
10650
11227
 
11228
+ temp_txt = "Unknown" if temp_bucket is None else f"{float(temp_bucket):+.1f}C"
10651
11229
  self.update_status(self.tr(
10652
- f"🟒 Processing {len(file_list)} darks for {exposure_time}s ({image_size}) in session '{session}'…"
11230
+ f"🟒 Processing {len(file_list)} darks for {exposure_time:g}s ({image_size}) "
11231
+ f"in session '{session}' at {temp_txt}…"
10653
11232
  ))
10654
11233
  QApplication.processEvents()
10655
11234
 
10656
11235
  # --- reference shape and per-group chunk size ---
10657
- if (exposure_time, image_size, session) in group_shapes:
10658
- height, width, channels, chunk_height, chunk_width = group_shapes[(exposure_time, image_size, session)]
11236
+ gk = (exposure_time, image_size, session, temp_bucket)
11237
+ if gk in group_shapes:
11238
+ height, width, channels, chunk_height, chunk_width = group_shapes[gk]
10659
11239
  else:
10660
11240
  ref_data, _, _, _ = load_image(file_list[0])
10661
11241
  if ref_data is None:
@@ -10695,8 +11275,11 @@ class StackingSuiteDialog(QDialog):
10695
11275
  QApplication.processEvents()
10696
11276
  continue
10697
11277
 
10698
- # Include session to prevent collisions
10699
- memmap_path = os.path.join(master_dir, f"temp_dark_{session}_{exposure_time}_{image_size}.dat")
11278
+ # Create temp memmap (stem-safe normalization)
11279
+ tb_tag = "notemp" if temp_bucket is None else _temp_to_stem_tag(float(temp_bucket))
11280
+ memmap_base = f"temp_dark_{session}_{exposure_time:g}s_{image_size}_{tb_tag}.dat"
11281
+ memmap_base = self._normalize_master_stem(memmap_base)
11282
+ memmap_path = os.path.join(master_dir, memmap_base)
10700
11283
 
10701
11284
  self.update_status(self.tr(
10702
11285
  f"πŸ—‚οΈ Creating temp memmap: {os.path.basename(memmap_path)} "
@@ -10708,6 +11291,7 @@ class StackingSuiteDialog(QDialog):
10708
11291
 
10709
11292
  tiles = _tile_grid(height, width, chunk_height, chunk_width)
10710
11293
  total_tiles_group = len(tiles)
11294
+
10711
11295
  self.update_status(self.tr(
10712
11296
  f"πŸ“¦ {total_tiles_group} tiles to process for this group (chunk {chunk_height}Γ—{chunk_width})."
10713
11297
  ))
@@ -10749,7 +11333,7 @@ class StackingSuiteDialog(QDialog):
10749
11333
  fut = tp.submit(_read_tile_into, (buf1 if use0 else buf0), ny0, ny1, nx0, nx1)
10750
11334
 
10751
11335
  pd.set_label(
10752
- f"{int(exposure_time)}s ({image_size}) [{session}] β€” "
11336
+ f"{int(exposure_time)}s ({image_size}) [{session}] [{temp_txt}] β€” "
10753
11337
  f"tile {t_idx}/{total_tiles_group} y:{y0}-{y1} x:{x0}-{x1}"
10754
11338
  )
10755
11339
 
@@ -10779,6 +11363,7 @@ class StackingSuiteDialog(QDialog):
10779
11363
 
10780
11364
  if tile_result.ndim == 2:
10781
11365
  tile_result = tile_result[:, :, None]
11366
+
10782
11367
  expected_shape = (th, tw, channels)
10783
11368
  if tile_result.shape != expected_shape:
10784
11369
  if tile_result.shape[:2] == (th, tw):
@@ -10813,37 +11398,115 @@ class StackingSuiteDialog(QDialog):
10813
11398
  pass
10814
11399
  break
10815
11400
 
11401
+ # -------------------------------------------------------------
11402
+ # Materialize final memmap to ndarray for save
11403
+ # -------------------------------------------------------------
10816
11404
  master_dark_data = np.asarray(final_stacked, dtype=np.float32)
10817
- del final_stacked
11405
+ try:
11406
+ del final_stacked
11407
+ except Exception:
11408
+ pass
10818
11409
  gc.collect()
11410
+
10819
11411
  try:
10820
11412
  os.remove(memmap_path)
10821
11413
  except Exception:
10822
11414
  pass
10823
11415
 
10824
- # Include session in output name
10825
- master_dark_stem = f"MasterDark_{session}_{int(exposure_time)}s_{image_size}"
11416
+ # -------------------------------------------------------------
11417
+ # Collect temperature stats from input dark headers
11418
+ # -------------------------------------------------------------
11419
+ temp_info = {}
11420
+ try:
11421
+ temp_info = _collect_temp_stats(file_list) or {}
11422
+ except Exception:
11423
+ temp_info = {}
11424
+
11425
+ # -------------------------------------------------------------
11426
+ # Build output filename (include session + exposure + size + temp bucket tag)
11427
+ # -------------------------------------------------------------
11428
+ temp_tag = ""
11429
+ try:
11430
+ if temp_bucket is not None:
11431
+ temp_tag = "_" + _temp_to_stem_tag(float(temp_bucket))
11432
+ elif temp_info.get("ccd_med") is not None:
11433
+ temp_tag = "_" + _temp_to_stem_tag(float(temp_info["ccd_med"]))
11434
+ elif temp_info.get("set_med") is not None:
11435
+ temp_tag = "_" + _temp_to_stem_tag(float(temp_info["set_med"]), prefix="set")
11436
+ except Exception:
11437
+ temp_tag = ""
11438
+
11439
+ master_dark_stem = f"MasterDark_{session}_{int(exposure_time)}s_{image_size}{temp_tag}"
11440
+ master_dark_stem = self._normalize_master_stem(master_dark_stem)
10826
11441
  master_dark_path = self._build_out(master_dir, master_dark_stem, "fit")
10827
11442
 
11443
+ # -------------------------------------------------------------
11444
+ # Header
11445
+ # -------------------------------------------------------------
10828
11446
  master_header = fits.Header()
10829
11447
  master_header["IMAGETYP"] = "DARK"
10830
- master_header["EXPTIME"] = (exposure_time, "User-specified or from grouping")
10831
- master_header["SESSION"] = (session, "User session tag") # optional but useful
10832
- master_header["NAXIS"] = 3 if channels == 3 else 2
10833
- master_header["NAXIS1"] = master_dark_data.shape[1]
10834
- master_header["NAXIS2"] = master_dark_data.shape[0]
11448
+ master_header["EXPTIME"] = (float(exposure_time), "Exposure time (s)")
11449
+ master_header["SESSION"] = (str(session), "User session tag")
11450
+ master_header["NCOMBINE"] = (int(N), "Number of darks combined")
11451
+ master_header["NSTACK"] = (int(N), "Alias of NCOMBINE (SetiAstro)")
11452
+
11453
+ # Temperature provenance (only write keys that exist)
11454
+ if temp_info.get("ccd_med") is not None:
11455
+ master_header["CCD-TEMP"] = (float(temp_info["ccd_med"]), "Median CCD temp of input darks (C)")
11456
+ if temp_info.get("ccd_min") is not None:
11457
+ master_header["CCDTMIN"] = (float(temp_info["ccd_min"]), "Min CCD temp in input darks (C)")
11458
+ if temp_info.get("ccd_max") is not None:
11459
+ master_header["CCDTMAX"] = (float(temp_info["ccd_max"]), "Max CCD temp in input darks (C)")
11460
+ if temp_info.get("ccd_std") is not None:
11461
+ master_header["CCDTSTD"] = (float(temp_info["ccd_std"]), "Std CCD temp in input darks (C)")
11462
+ if temp_info.get("ccd_n") is not None:
11463
+ master_header["CCDTN"] = (int(temp_info["ccd_n"]), "Count of frames with CCD-TEMP")
11464
+
11465
+ if temp_info.get("set_med") is not None:
11466
+ master_header["SET-TEMP"] = (float(temp_info["set_med"]), "Median setpoint temp of input darks (C)")
11467
+ if temp_info.get("set_min") is not None:
11468
+ master_header["SETTMIN"] = (float(temp_info["set_min"]), "Min setpoint in input darks (C)")
11469
+ if temp_info.get("set_max") is not None:
11470
+ master_header["SETTMAX"] = (float(temp_info["set_max"]), "Max setpoint in input darks (C)")
11471
+ if temp_info.get("set_std") is not None:
11472
+ master_header["SETTSTD"] = (float(temp_info["set_std"]), "Std setpoint in input darks (C)")
11473
+ if temp_info.get("set_n") is not None:
11474
+ master_header["SETTN"] = (int(temp_info["set_n"]), "Count of frames with SET-TEMP")
11475
+
11476
+ # Dimensions (save_image usually writes these, but keep your existing behavior)
11477
+ master_header["NAXIS"] = 3 if channels == 3 else 2
11478
+ master_header["NAXIS1"] = int(master_dark_data.shape[1])
11479
+ master_header["NAXIS2"] = int(master_dark_data.shape[0])
10835
11480
  if channels == 3:
10836
11481
  master_header["NAXIS3"] = 3
10837
11482
 
10838
- save_image(master_dark_data, master_dark_path, "fit", "32-bit floating point", master_header, is_mono=(channels == 1))
11483
+ save_image(
11484
+ master_dark_data,
11485
+ master_dark_path,
11486
+ "fit",
11487
+ "32-bit floating point",
11488
+ master_header,
11489
+ is_mono=(channels == 1)
11490
+ )
11491
+
11492
+ # Tree label includes temp for visibility
11493
+ tree_label = f"{exposure_time:g}s ({image_size}) [{session}]"
11494
+ if temp_info.get("ccd_med") is not None:
11495
+ tree_label += f" [CCD {float(temp_info['ccd_med']):+.1f}C]"
11496
+ elif temp_info.get("set_med") is not None:
11497
+ tree_label += f" [SET {float(temp_info['set_med']):+.1f}C]"
11498
+ elif temp_bucket is not None:
11499
+ tree_label += f" [TEMP {float(temp_bucket):+.1f}C]"
10839
11500
 
10840
- self.add_master_dark_to_tree(f"{exposure_time}s ({image_size}) [{session}]", master_dark_path)
11501
+ self.add_master_dark_to_tree(tree_label, master_dark_path)
10841
11502
  self.update_status(self.tr(f"βœ… Master Dark saved: {master_dark_path}"))
10842
11503
  QApplication.processEvents()
10843
11504
 
11505
+ # Refresh assignments + persistence
10844
11506
  self.assign_best_master_files()
10845
11507
  self.save_master_paths_to_settings()
10846
11508
 
11509
+ # Post pass refresh (unchanged behavior)
10847
11510
  self.assign_best_master_dark()
10848
11511
  self.update_override_dark_combo()
10849
11512
  self.assign_best_master_files()
@@ -10856,7 +11519,6 @@ class StackingSuiteDialog(QDialog):
10856
11519
  logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
10857
11520
  pd.close()
10858
11521
 
10859
-
10860
11522
  def add_master_dark_to_tree(self, exposure_label: str, master_dark_path: str):
10861
11523
  """
10862
11524
  Adds the newly created Master Dark to the Master Dark TreeBox and updates the dropdown.
@@ -11256,22 +11918,17 @@ class StackingSuiteDialog(QDialog):
11256
11918
  dark_data: np.ndarray | None,
11257
11919
  pattern: str,
11258
11920
  ):
11259
- """
11260
- Returns scales shape (N,4): [R, G1, G2, B] where scale = frame_plane_median / group_plane_median.
11261
- """
11262
11921
  pat = (pattern or "RGGB").strip().upper()
11263
11922
  if pat not in ("RGGB", "BGGR", "GRBG", "GBRG"):
11264
11923
  pat = "RGGB"
11265
11924
 
11266
- # Central patch
11267
11925
  th = min(512, H); tw = min(512, W)
11268
11926
  y0 = (H - th) // 2; y1 = y0 + th
11269
11927
  x0 = (W - tw) // 2; x1 = x0 + tw
11270
11928
 
11271
11929
  N = len(file_list)
11272
- meds = np.empty((N, 4), dtype=np.float64) # R,G1,G2,B
11930
+ meds = np.empty((N, 4), dtype=np.float64)
11273
11931
 
11274
- # parity β†’ plane label
11275
11932
  if pat == "RGGB":
11276
11933
  m = {(0,0):"R", (0,1):"G1", (1,0):"G2", (1,1):"B"}
11277
11934
  elif pat == "BGGR":
@@ -11288,9 +11945,24 @@ class StackingSuiteDialog(QDialog):
11288
11945
  d = float(np.median(v))
11289
11946
  return d if np.isfinite(d) and d > 0 else 1.0
11290
11947
 
11948
+ # Make dark/bias subtractor into 2D for bayer mosaics (important for XISF HWC darks)
11949
+ dd2 = None
11950
+ if dark_data is not None:
11951
+ dd2 = dark_data
11952
+ if dd2.ndim == 3:
11953
+ # CHW -> HWC
11954
+ if dd2.shape[0] in (1, 3):
11955
+ dd2 = dd2.transpose(1, 2, 0)
11956
+ # HWC -> take first plane for mosaic subtraction
11957
+ dd2 = dd2[:, :, 0]
11958
+ dd2 = dd2.astype(np.float32, copy=False)
11959
+
11291
11960
  from concurrent.futures import ThreadPoolExecutor, as_completed
11292
11961
  with ThreadPoolExecutor(max_workers=min(os.cpu_count() or 4, 8)) as exe:
11293
- fut2i = {exe.submit(load_fits_tile, fp, y0, y1, x0, x1): i for i, fp in enumerate(file_list)}
11962
+ fut2i = {
11963
+ exe.submit(_read_center_patch_via_mmimage, fp, y0, y1, x0, x1): i
11964
+ for i, fp in enumerate(file_list)
11965
+ }
11294
11966
  for fut in as_completed(fut2i):
11295
11967
  i = fut2i[fut]
11296
11968
  sub = fut.result()
@@ -11299,16 +11971,14 @@ class StackingSuiteDialog(QDialog):
11299
11971
  continue
11300
11972
 
11301
11973
  # Ensure 2D mosaic
11302
- if sub.ndim == 3 and sub.shape[0] in (1, 3):
11303
- sub = sub[0] if sub.shape[0] == 1 else sub.transpose(1, 2, 0)[:, :, 0]
11974
+ if sub.ndim == 3:
11975
+ if sub.shape[0] in (1, 3): # CHW
11976
+ sub = sub.transpose(1, 2, 0)
11977
+ sub = sub[:, :, 0] # first plane
11304
11978
  sub = sub.astype(np.float32, copy=False)
11305
11979
 
11306
- # Dark subtract patch if present
11307
- if dark_data is not None:
11308
- dd = dark_data
11309
- if dd.ndim == 3 and dd.shape[0] in (1, 3):
11310
- dd = dd.transpose(1, 2, 0)[:, :, 0]
11311
- d_tile = dd[y0:y1, x0:x1].astype(np.float32, copy=False)
11980
+ if dd2 is not None:
11981
+ d_tile = dd2[y0:y1, x0:x1].astype(np.float32, copy=False)
11312
11982
  sub = sub - d_tile
11313
11983
 
11314
11984
  planes = {
@@ -11327,15 +11997,11 @@ class StackingSuiteDialog(QDialog):
11327
11997
  gmed = np.median(meds, axis=0)
11328
11998
  gmed = np.where(np.isfinite(gmed) & (gmed > 0), gmed, 1.0)
11329
11999
 
11330
- scales = meds / gmed # (N,4)
11331
- scales = np.clip(scales, 1e-3, 1e3).astype(np.float32)
11332
- return scales
12000
+ scales = meds / gmed
12001
+ return np.clip(scales, 1e-3, 1e3).astype(np.float32)
12002
+
11333
12003
 
11334
12004
  def _estimate_flat_scales(file_list: list[str], H: int, W: int, C: int, dark_data: np.ndarray | None):
11335
- """
11336
- Read one central patch (min(512, H/W)) from each frame, subtract dark (if present),
11337
- compute per-frame median, and normalize scales to overall median.
11338
- """
11339
12005
  th = min(512, H); tw = min(512, W)
11340
12006
  y0 = (H - th) // 2; y1 = y0 + th
11341
12007
  x0 = (W - tw) // 2; x1 = x0 + tw
@@ -11343,9 +12009,20 @@ class StackingSuiteDialog(QDialog):
11343
12009
  N = len(file_list)
11344
12010
  meds = np.empty((N,), dtype=np.float64)
11345
12011
 
12012
+ # Normalize subtractor to HWC or 2D
12013
+ dd = None
12014
+ if dark_data is not None:
12015
+ dd = dark_data
12016
+ if dd.ndim == 3 and dd.shape[0] in (1, 3): # CHW -> HWC
12017
+ dd = dd.transpose(1, 2, 0)
12018
+ dd = dd.astype(np.float32, copy=False)
12019
+
11346
12020
  from concurrent.futures import ThreadPoolExecutor, as_completed
11347
12021
  with ThreadPoolExecutor(max_workers=min(os.cpu_count() or 4, 8)) as exe:
11348
- fut2i = {exe.submit(load_fits_tile, fp, y0, y1, x0, x1): i for i, fp in enumerate(file_list)}
12022
+ fut2i = {
12023
+ exe.submit(_read_center_patch_via_mmimage, fp, y0, y1, x0, x1): i
12024
+ for i, fp in enumerate(file_list)
12025
+ }
11349
12026
  for fut in as_completed(fut2i):
11350
12027
  i = fut2i[fut]
11351
12028
  sub = fut.result()
@@ -11360,22 +12037,22 @@ class StackingSuiteDialog(QDialog):
11360
12037
  sub = sub.transpose(1, 2, 0)
11361
12038
  sub = sub.astype(np.float32, copy=False)
11362
12039
 
11363
- if dark_data is not None:
11364
- dd = dark_data
11365
- if dd.ndim == 3 and dd.shape[0] in (1, 3):
11366
- dd = dd.transpose(1, 2, 0)
11367
- d_tile = dd[y0:y1, x0:x1].astype(np.float32, copy=False)
12040
+ if dd is not None:
12041
+ d_tile = dd[y0:y1, x0:x1]
11368
12042
  if d_tile.ndim == 2 and sub.shape[2] == 3:
11369
12043
  d_tile = np.repeat(d_tile[..., None], 3, axis=2)
11370
- sub = sub - d_tile
12044
+ elif d_tile.ndim == 3 and sub.shape[2] == 1:
12045
+ d_tile = d_tile[:, :, :1]
12046
+ sub = sub - d_tile.astype(np.float32, copy=False)
11371
12047
 
11372
- meds[i] = np.median(sub, axis=(0, 1, 2))
12048
+ meds[i] = float(np.median(sub))
11373
12049
 
11374
- gmed = np.median(meds) if np.all(np.isfinite(meds)) else 1.0
11375
- gmed = 1.0 if gmed == 0.0 else gmed
12050
+ gmed = float(np.median(meds)) if np.all(np.isfinite(meds)) else 1.0
12051
+ if not np.isfinite(gmed) or gmed == 0.0:
12052
+ gmed = 1.0
11376
12053
  scales = meds / gmed
11377
- scales = np.clip(scales, 1e-3, 1e3).astype(np.float32)
11378
- return scales
12054
+ return np.clip(scales, 1e-3, 1e3).astype(np.float32)
12055
+
11379
12056
 
11380
12057
  def _apply_bayer_scales_stack_inplace(ts_np: np.ndarray, scales4: np.ndarray, pat: str, y0: int, x0: int):
11381
12058
  """
@@ -11889,6 +12566,140 @@ class StackingSuiteDialog(QDialog):
11889
12566
  master_item = QTreeWidgetItem([os.path.basename(master_flat_path)])
11890
12567
  filter_item.addChild(master_item)
11891
12568
 
12569
+ def _parse_float(self, v):
12570
+ try:
12571
+ if v is None:
12572
+ return None
12573
+ if isinstance(v, (int, float)):
12574
+ return float(v)
12575
+ s = str(v).strip()
12576
+ # handle " -10.0 C" or "-10.0C"
12577
+ s = s.replace("Β°", "").replace("C", "").replace("c", "").strip()
12578
+ return float(s)
12579
+ except Exception:
12580
+ return None
12581
+
12582
+
12583
+ def _read_ccd_set_temp_from_fits(self, path: str) -> tuple[float|None, float|None]:
12584
+ """Read CCD-TEMP and SET-TEMP from FITS header (primary HDU)."""
12585
+ try:
12586
+ with fits.open(path) as hdul:
12587
+ hdr = hdul[0].header
12588
+ ccd = self._parse_float(hdr.get("CCD-TEMP", None))
12589
+ st = self._parse_float(hdr.get("SET-TEMP", None))
12590
+ return ccd, st
12591
+ except Exception:
12592
+ return None, None
12593
+
12594
+
12595
+ def _temp_for_matching(self, ccd: float|None, st: float|None) -> float|None:
12596
+ """Prefer CCD-TEMP; else SET-TEMP; else None."""
12597
+ return ccd if ccd is not None else (st if st is not None else None)
12598
+
12599
+
12600
+ def _parse_masterdark_name(self, stem: str):
12601
+ """
12602
+ From filename like:
12603
+ MasterDark_Session_300s_4144x2822_m10p0C.fit
12604
+ Return dict fields; temp is optional.
12605
+ """
12606
+ out = {"session": None, "exp": None, "size": None, "temp": None}
12607
+
12608
+ base = os.path.basename(stem)
12609
+ base = os.path.splitext(base)[0]
12610
+
12611
+ # session is between MasterDark_ and _<exp>s_
12612
+ # exp is <num>s
12613
+ # size is <WxH> like 4144x2822
12614
+ m = re.match(r"^MasterDark_(?P<session>.+?)_(?P<exp>[\d._]+)s_(?P<size>\d+x\d+)(?:_(?P<temp>.*))?$", base)
12615
+ if not m:
12616
+ return out
12617
+
12618
+ out["session"] = (m.group("session") or "").strip()
12619
+ # exp might be "2_5" from _normalize_master_stem; convert back
12620
+ exp_txt = (m.group("exp") or "").replace("_", ".")
12621
+ try:
12622
+ out["exp"] = float(exp_txt)
12623
+ except Exception:
12624
+ out["exp"] = None
12625
+
12626
+ out["size"] = m.group("size")
12627
+
12628
+ # temp token like m10p0C / p5p0C / setm10p0C
12629
+ t = (m.group("temp") or "").strip()
12630
+ if t:
12631
+ # pick the first temp-ish token ending in C
12632
+ mt = re.search(r"(set)?([mp])(\d+)p(\d)C", t)
12633
+ if mt:
12634
+ sign = -1.0 if mt.group(2) == "m" else 1.0
12635
+ whole = float(mt.group(3))
12636
+ frac = float(mt.group(4)) / 10.0
12637
+ out["temp"] = sign * (whole + frac)
12638
+
12639
+ return out
12640
+
12641
+
12642
+ def _get_master_dark_meta(self, path: str) -> dict:
12643
+ """
12644
+ Cached metadata for a master dark.
12645
+ Prefers FITS header for temp; falls back to filename temp token.
12646
+ """
12647
+ if not hasattr(self, "_master_dark_meta_cache"):
12648
+ self._master_dark_meta_cache = {}
12649
+ cache = self._master_dark_meta_cache
12650
+
12651
+ p = os.path.normpath(path)
12652
+ if p in cache:
12653
+ return cache[p]
12654
+
12655
+ meta = {"path": p, "session": None, "exp": None, "size": None,
12656
+ "ccd": None, "set": None, "temp": None}
12657
+
12658
+ # filename parse (fast)
12659
+ fn = self._parse_masterdark_name(p)
12660
+ meta["session"] = fn.get("session") or None
12661
+ meta["exp"] = fn.get("exp")
12662
+ meta["size"] = fn.get("size")
12663
+ meta["temp"] = fn.get("temp")
12664
+
12665
+ # header parse (authoritative for temps)
12666
+ ccd, st = self._read_ccd_set_temp_from_fits(p)
12667
+ meta["ccd"] = ccd
12668
+ meta["set"] = st
12669
+ meta["temp"] = self._temp_for_matching(ccd, st) if (ccd is not None or st is not None) else meta["temp"]
12670
+
12671
+ # size from header if missing
12672
+ if not meta["size"]:
12673
+ try:
12674
+ with fits.open(p) as hdul:
12675
+ data = hdul[0].data
12676
+ if data is not None:
12677
+ meta["size"] = f"{data.shape[1]}x{data.shape[0]}"
12678
+ except Exception:
12679
+ pass
12680
+
12681
+ cache[p] = meta
12682
+ return meta
12683
+
12684
+
12685
+ def _get_light_temp(self, light_path: str) -> tuple[float|None, float|None, float|None]:
12686
+ """Return (ccd, set, chosen) with caching."""
12687
+ if not hasattr(self, "_light_temp_cache"):
12688
+ self._light_temp_cache = {}
12689
+ cache = self._light_temp_cache
12690
+
12691
+ p = os.path.normpath(light_path or "")
12692
+ if not p:
12693
+ return None, None, None
12694
+ if p in cache:
12695
+ return cache[p]
12696
+
12697
+ ccd, st = self._read_ccd_set_temp_from_fits(p)
12698
+ chosen = self._temp_for_matching(ccd, st)
12699
+ cache[p] = (ccd, st, chosen)
12700
+ return cache[p]
12701
+
12702
+
11892
12703
  def assign_best_master_files(self, fill_only: bool = True):
11893
12704
  """
11894
12705
  Assign best matching Master Dark and Flat to each Light leaf.
@@ -11948,32 +12759,57 @@ class StackingSuiteDialog(QDialog):
11948
12759
  if fill_only and curr_dark and curr_dark.lower() != "none":
11949
12760
  dark_choice = curr_dark
11950
12761
  else:
11951
- # 3) Auto-pick by size+closest exposure
11952
- best_dark_match = None
11953
- best_dark_diff = float("inf")
11954
- for master_key, master_path in self.master_files.items():
11955
- dmatch = re.match(r"^([\d.]+)s\b", master_key) # darks start with "<exp>s"
11956
- if not dmatch:
12762
+ # 3) Auto-pick by size + closest exposure + closest temperature (and prefer same session)
12763
+ light_path = leaf_item.data(0, Qt.ItemDataRole.UserRole)
12764
+ l_ccd, l_set, l_temp = self._get_light_temp(light_path)
12765
+
12766
+ best_path = None
12767
+ best_score = None
12768
+
12769
+ for mk, mp in (self.master_files or {}).items():
12770
+ if not mp:
11957
12771
  continue
11958
- master_dark_exposure_time = float(dmatch.group(1))
11959
12772
 
11960
- # Ensure size known/cached
11961
- md_size = master_sizes.get(master_path)
11962
- if not md_size:
11963
- try:
11964
- with fits.open(master_path) as hdul:
11965
- md_size = f"{hdul[0].data.shape[1]}x{hdul[0].data.shape[0]}"
11966
- except Exception:
11967
- md_size = "Unknown"
11968
- master_sizes[master_path] = md_size
12773
+ bn = os.path.basename(mp)
12774
+ # Only consider MasterDark_* files (cheap gate)
12775
+ if not bn.startswith("MasterDark_"):
12776
+ continue
12777
+
12778
+ md = self._get_master_dark_meta(mp)
12779
+ md_size = md.get("size") or "Unknown"
12780
+ if md_size != image_size:
12781
+ continue
12782
+
12783
+ md_exp = md.get("exp")
12784
+ if md_exp is None:
12785
+ continue
12786
+
12787
+ # exposure closeness
12788
+ exp_diff = abs(float(md_exp) - float(exposure_time))
11969
12789
 
11970
- if md_size == image_size:
11971
- diff = abs(master_dark_exposure_time - exposure_time)
11972
- if diff < best_dark_diff:
11973
- best_dark_diff = diff
11974
- best_dark_match = master_path
12790
+ # session preference: exact match beats mismatch
12791
+ md_sess = (md.get("session") or "Default").strip()
12792
+ sess_mismatch = 0 if md_sess == session_name else 1
11975
12793
 
11976
- dark_choice = os.path.basename(best_dark_match) if best_dark_match else ("None" if not curr_dark else curr_dark)
12794
+ # temperature closeness (if both known)
12795
+ md_temp = md.get("temp")
12796
+ if (l_temp is not None) and (md_temp is not None):
12797
+ temp_diff = abs(float(md_temp) - float(l_temp))
12798
+ temp_unknown = 0
12799
+ else:
12800
+ # if light has temp but dark doesn't (or vice versa), penalize
12801
+ temp_diff = 9999.0
12802
+ temp_unknown = 1
12803
+
12804
+ # Score tuple: lower is better
12805
+ # Priority: session match -> exposure diff -> temp availability -> temp diff
12806
+ score = (sess_mismatch, exp_diff, temp_unknown, temp_diff)
12807
+
12808
+ if best_score is None or score < best_score:
12809
+ best_score = score
12810
+ best_path = mp
12811
+
12812
+ dark_choice = os.path.basename(best_path) if best_path else ("None" if not curr_dark else curr_dark)
11977
12813
 
11978
12814
  # ---------- FLAT RESOLUTION ----------
11979
12815
  flat_key_full = f"{filter_name_raw} - {exposure_text}"
@@ -12109,22 +12945,57 @@ class StackingSuiteDialog(QDialog):
12109
12945
 
12110
12946
 
12111
12947
  def override_selected_master_dark(self):
12112
- """ Override Dark for selected Light exposure group or individual files. """
12948
+ """Override Dark for selected Light exposure group or individual files."""
12113
12949
  selected_items = self.light_tree.selectedItems()
12114
12950
  if not selected_items:
12115
12951
  print("⚠️ No light item selected for dark frame override.")
12116
12952
  return
12117
12953
 
12118
- file_path, _ = QFileDialog.getOpenFileName(self, "Select Master Dark", "", "FITS Files (*.fits *.fit)")
12954
+ # --- pick a good starting directory ---
12955
+ last_dir = self.settings.value("stacking/last_master_dark_dir", "", type=str) if hasattr(self, "settings") else ""
12956
+ if not last_dir:
12957
+ # try stacking dir
12958
+ last_dir = getattr(self, "stacking_directory", "") or ""
12959
+
12960
+ # try selected leaf path folder (best UX)
12961
+ try:
12962
+ it0 = selected_items[0]
12963
+ # leaf stores path in UserRole, groups do not
12964
+ p0 = it0.data(0, Qt.ItemDataRole.UserRole)
12965
+ if isinstance(p0, str) and os.path.exists(p0):
12966
+ last_dir = os.path.dirname(p0)
12967
+ except Exception:
12968
+ pass
12969
+
12970
+ if not last_dir:
12971
+ last_dir = os.path.expanduser("~")
12972
+
12973
+ file_path, _ = QFileDialog.getOpenFileName(
12974
+ self,
12975
+ "Select Master Dark",
12976
+ last_dir,
12977
+ "Master Calibration (*.fits *.fit *.xisf);;All Files (*)"
12978
+ )
12119
12979
  if not file_path:
12120
12980
  return
12121
12981
 
12982
+ # remember for next time
12983
+ try:
12984
+ if hasattr(self, "settings"):
12985
+ self.settings.setValue("stacking/last_master_dark_dir", os.path.dirname(file_path))
12986
+ except Exception:
12987
+ pass
12988
+
12989
+ # Ensure dict exists
12990
+ if not hasattr(self, "manual_dark_overrides") or self.manual_dark_overrides is None:
12991
+ self.manual_dark_overrides = {}
12992
+
12122
12993
  for item in selected_items:
12123
- # If the user clicked a group (exposure row), push override to all leaves:
12994
+ # If the user clicked an exposure row under a filter
12124
12995
  if item.parent() and item.childCount() > 0:
12125
- # exposure row under a filter
12126
12996
  filter_name = item.parent().text(0)
12127
12997
  exposure_text = item.text(0)
12998
+
12128
12999
  # store override under BOTH keys
12129
13000
  self.manual_dark_overrides[f"{filter_name} - {exposure_text}"] = file_path
12130
13001
  self.manual_dark_overrides[exposure_text] = file_path
@@ -12132,17 +13003,20 @@ class StackingSuiteDialog(QDialog):
12132
13003
  for i in range(item.childCount()):
12133
13004
  leaf = item.child(i)
12134
13005
  leaf.setText(2, os.path.basename(file_path))
12135
- # If the user clicked a leaf, just set that leaf and still store under both keys
13006
+
13007
+ # If the user clicked a leaf under an exposure row
12136
13008
  elif item.parent() and item.parent().parent():
12137
13009
  exposure_item = item.parent()
12138
13010
  filter_name = exposure_item.parent().text(0)
12139
13011
  exposure_text = exposure_item.text(0)
13012
+
12140
13013
  self.manual_dark_overrides[f"{filter_name} - {exposure_text}"] = file_path
12141
13014
  self.manual_dark_overrides[exposure_text] = file_path
12142
13015
  item.setText(2, os.path.basename(file_path))
12143
13016
 
12144
13017
  print("βœ… DEBUG: Light Dark override applied.")
12145
13018
 
13019
+
12146
13020
  def _auto_pick_master_dark(self, image_size: str, exposure_time: float):
12147
13021
  best_path, best_diff = None, float("inf")
12148
13022
  for key, path in self.master_files.items():
@@ -12525,6 +13399,7 @@ class StackingSuiteDialog(QDialog):
12525
13399
 
12526
13400
  # ---------- LOAD LIGHT ----------
12527
13401
  light_data, hdr, bit_depth, is_mono = load_image(light_file)
13402
+ #_print_stats("LIGHT raw", light_data, bit_depth=bit_depth, hdr=hdr)
12528
13403
  if light_data is None or hdr is None:
12529
13404
  self.update_status(self.tr(f"❌ ERROR: Failed to load {os.path.basename(light_file)}"))
12530
13405
  continue
@@ -12549,7 +13424,10 @@ class StackingSuiteDialog(QDialog):
12549
13424
 
12550
13425
  # ---------- APPLY DARK (if resolved) ----------
12551
13426
  if master_dark_path:
12552
- dark_data, _, _, dark_is_mono = load_image(master_dark_path)
13427
+ dark_data, _, dark_bit_depth, dark_is_mono = load_image(master_dark_path)
13428
+ #_print_stats("DARK raw", dark_data, bit_depth=dark_bit_depth)
13429
+ dark_data = _maybe_normalize_16bit_float(dark_data, name=os.path.basename(master_dark_path))
13430
+ #_print_stats("DARK normalized", dark_data, bit_depth=dark_bit_depth)
12553
13431
  if dark_data is not None:
12554
13432
  if not dark_is_mono and dark_data.ndim == 3 and dark_data.shape[-1] == 3:
12555
13433
  dark_data = dark_data.transpose(2, 0, 1) # HWC -> CHW
@@ -12565,7 +13443,10 @@ class StackingSuiteDialog(QDialog):
12565
13443
 
12566
13444
  # ---------- APPLY FLAT (if resolved) ----------
12567
13445
  if master_flat_path:
12568
- flat_data, _, _, flat_is_mono = load_image(master_flat_path)
13446
+ flat_data, _, flat_bit_depth, flat_is_mono = load_image(master_flat_path)
13447
+ #_print_stats("FLAT raw", flat_data, bit_depth=flat_bit_depth)
13448
+ flat_data = _maybe_normalize_16bit_float(flat_data, name=os.path.basename(master_flat_path))
13449
+ #_print_stats("FLAT normalized", flat_data, bit_depth=flat_bit_depth)
12569
13450
  if flat_data is not None:
12570
13451
 
12571
13452
  # Make flat layout match your working light layout:
@@ -12679,13 +13560,19 @@ class StackingSuiteDialog(QDialog):
12679
13560
  max_val = float(np.max(light_data))
12680
13561
  self.update_status(self.tr(f"Before saving: min = {min_val:.4f}, max = {max_val:.4f}"))
12681
13562
  print(f"Before saving: min = {min_val:.4f}, max = {max_val:.4f}")
13563
+
13564
+ _warn_if_units_mismatch(light_data, dark_data if master_dark_path else None, flat_data if master_flat_path else None)
13565
+ _print_stats("LIGHT final", light_data)
12682
13566
  QApplication.processEvents()
12683
-
12684
13567
  # Annotate header
12685
13568
  try:
12686
- hdr['HISTORY'] = 'Calibrated: bias/dark sub, flat division'
12687
- hdr['CALMIN'] = (min_val, 'Min pixel before save (float)')
12688
- hdr['CALMAX'] = (max_val, 'Max pixel before save (float)')
13569
+ if hasattr(hdr, "add_history"):
13570
+ hdr.add_history("Calibrated: bias/dark sub, flat division")
13571
+ else:
13572
+ hdr["HISTORY"] = "Calibrated: bias/dark sub, flat division"
13573
+
13574
+ hdr["CALMIN"] = (min_val, "Min pixel before save (float)")
13575
+ hdr["CALMAX"] = (max_val, "Max pixel before save (float)")
12689
13576
  except Exception:
12690
13577
  pass
12691
13578
 
@@ -13670,23 +14557,23 @@ class StackingSuiteDialog(QDialog):
13670
14557
  self.update_status(self.tr("🧹 Doing a little tidying up..."))
13671
14558
  user_ref_locked = bool(getattr(self, "_user_ref_locked", False))
13672
14559
 
13673
- # Only clear derived geometry/maps when NOT locked
14560
+ # ALWAYS clear derived geometry/maps for this run (mapping is run-specific)
14561
+ self._norm_target_hw = None
14562
+ self._orig2norm = {}
14563
+
14564
+ # Only clear the UI reference label when NOT locked
13674
14565
  if not user_ref_locked:
13675
- self._norm_target_hw = None
13676
- self._orig2norm = {}
13677
14566
  try:
13678
14567
  if hasattr(self, "ref_frame_path") and self.ref_frame_path:
13679
14568
  self.ref_frame_path.setText("Auto (not set)")
13680
14569
  except Exception:
13681
14570
  pass
13682
14571
  else:
13683
- # Keep the UI showing the user’s chosen ref (basename for display)
13684
14572
  try:
13685
14573
  if hasattr(self, "ref_frame_path") and self.ref_frame_path and self.reference_frame:
13686
14574
  self.ref_frame_path.setText(os.path.basename(self.reference_frame))
13687
14575
  except Exception:
13688
14576
  pass
13689
-
13690
14577
  # 🚫 Do NOT remove persisted user ref here; that defeats locking.
13691
14578
  # (No settings.remove() and no reference_frame = None if locked)
13692
14579
 
@@ -14571,7 +15458,28 @@ class StackingSuiteDialog(QDialog):
14571
15458
 
14572
15459
  from os import path
14573
15460
  ref_path = path.normpath(self.reference_frame)
14574
- self.update_status(self.tr(f"πŸ“Œ Reference for alignment (verbatim): {ref_path}"))
15461
+ from os import path
15462
+
15463
+ # Prefer the normalized FIT reference if we produced one
15464
+ ref_key = path.normcase(path.normpath(self.reference_frame))
15465
+ ref_norm = self._orig2norm.get(ref_key)
15466
+
15467
+ # If mapping missing, attempt the predictable filename in norm_dir
15468
+ if not ref_norm:
15469
+ base = os.path.basename(self.reference_frame)
15470
+ if base.lower().endswith(".fits"):
15471
+ n_name = base[:-5] + "_n.fit"
15472
+ elif base.lower().endswith(".fit"):
15473
+ n_name = base[:-4] + "_n.fit"
15474
+ else:
15475
+ n_name = base + "_n.fit"
15476
+ candidate = path.normpath(path.join(norm_dir, n_name))
15477
+ if path.exists(candidate):
15478
+ ref_norm = candidate
15479
+
15480
+ ref_path = path.normpath(ref_norm or self.reference_frame)
15481
+
15482
+ self.update_status(self.tr(f"πŸ“Œ Reference for alignment: {ref_path}"))
14575
15483
  if not path.exists(ref_path):
14576
15484
  self.update_status(self.tr(f"🚨 Reference file does not exist: {ref_path}"))
14577
15485
  return
@@ -14587,6 +15495,14 @@ class StackingSuiteDialog(QDialog):
14587
15495
 
14588
15496
  normalized_files = [path.normpath(p) for p in normalized_files]
14589
15497
 
15498
+ ref_key = path.normcase(path.normpath(self.reference_frame))
15499
+ ref_path = self._orig2norm.get(ref_key, path.normpath(self.reference_frame))
15500
+
15501
+ self.update_status(self.tr(f"πŸ“Œ Reference for alignment (normalized if available): {ref_path}"))
15502
+ if not path.exists(ref_path):
15503
+ self.update_status(self.tr(f"🚨 Reference file does not exist: {ref_path}"))
15504
+ return
15505
+
14590
15506
  self.alignment_thread = StarRegistrationThread(
14591
15507
  ref_path,
14592
15508
  normalized_files,
@@ -15192,6 +16108,41 @@ class StackingSuiteDialog(QDialog):
15192
16108
  # Threshold is only used in normal mode
15193
16109
  accept_thresh = float(self.settings.value("stacking/accept_shift_px", 2.0, type=float))
15194
16110
 
16111
+ def _mf_ref_path_for_masks() -> str | None:
16112
+ """
16113
+ Return the best reference path for MFDeconv star masks:
16114
+ aligned FITS if possible, else normalized FITS, else original.
16115
+ """
16116
+ if not getattr(self, "reference_frame", None):
16117
+ return None
16118
+
16119
+ from os import path
16120
+ ref_orig = path.normpath(self.reference_frame)
16121
+ ref_key = path.normcase(ref_orig)
16122
+
16123
+ # original -> normalized
16124
+ ref_norm = self._orig2norm.get(ref_key)
16125
+
16126
+ # normalized -> aligned
16127
+ ref_aligned = None
16128
+ if ref_norm:
16129
+ ref_aligned = self.valid_transforms.get(path.normpath(ref_norm))
16130
+
16131
+ # If we couldn’t map via orig->norm (e.g. user picked a normalized path already)
16132
+ if not ref_norm and ref_orig in self.valid_transforms:
16133
+ ref_norm = ref_orig
16134
+ ref_aligned = self.valid_transforms.get(ref_norm)
16135
+
16136
+ # Prefer aligned if it exists on disk
16137
+ if ref_aligned and path.exists(ref_aligned):
16138
+ return ref_aligned
16139
+ if ref_norm and path.exists(ref_norm):
16140
+ return ref_norm
16141
+ if path.exists(ref_orig):
16142
+ return ref_orig
16143
+ return None
16144
+
16145
+
15195
16146
  def _accept(k: str) -> bool:
15196
16147
  """Accept criteria for a frame."""
15197
16148
  if all_transforms.get(k) is None:
@@ -15575,7 +16526,9 @@ class StackingSuiteDialog(QDialog):
15575
16526
  }
15576
16527
 
15577
16528
  self._mf_thread = QThread(self)
15578
- star_mask_ref = self.reference_frame if use_star_masks else None
16529
+ star_mask_ref = _mf_ref_path_for_masks() if use_star_masks else None
16530
+ if use_star_masks:
16531
+ self.update_status(self.tr(f"🌟 MFDeconv star-mask reference β†’ {star_mask_ref or '(none)'}"))
15579
16532
 
15580
16533
  # ── choose engine plainly (Normal / cuDNN-free / High Octane) ─────────────
15581
16534
  # Expect a setting saved by your radio buttons: "normal" | "cudnn" | "sport"
@@ -16030,6 +16983,10 @@ class StackingSuiteDialog(QDialog):
16030
16983
  hdr_orig["CREATOR"] = "SetiAstroSuite"
16031
16984
  hdr_orig["DATE-OBS"] = datetime.utcnow().isoformat()
16032
16985
 
16986
+ n_frames_group = len(file_list)
16987
+ hdr_orig["NCOMBINE"] = (int(n_frames_group), "Number of frames combined")
16988
+ hdr_orig["NSTACK"] = (int(n_frames_group), "Alias of NCOMBINE (SetiAstro)")
16989
+
16033
16990
  is_mono_orig = (integrated_image.ndim == 2)
16034
16991
  if is_mono_orig:
16035
16992
  hdr_orig["NAXIS"] = 2
@@ -16149,6 +17106,8 @@ class StackingSuiteDialog(QDialog):
16149
17106
  scale=1.0,
16150
17107
  rect_override=group_rect if group_rect is not None else global_rect
16151
17108
  )
17109
+ hdr_crop["NCOMBINE"] = (int(n_frames_group), "Number of frames combined")
17110
+ hdr_crop["NSTACK"] = (int(n_frames_group), "Alias of NCOMBINE (SetiAstro)")
16152
17111
  is_mono_crop = (cropped_img.ndim == 2)
16153
17112
  Hc, Wc = (cropped_img.shape[:2] if cropped_img.ndim >= 2 else (H, W))
16154
17113
  display_group_crop = self._label_with_dims(group_key, Wc, Hc)
@@ -16292,6 +17251,12 @@ class StackingSuiteDialog(QDialog):
16292
17251
  algo_override=COMET_ALGO # << comet-friendly reducer
16293
17252
  )
16294
17253
 
17254
+ n_usable = int(len(usable))
17255
+ ref_header_c = ref_header_c or ref_header or fits.Header()
17256
+ ref_header_c["NCOMBINE"] = (n_usable, "Number of frames combined (comet)")
17257
+ ref_header_c["NSTACK"] = (n_usable, "Alias of NCOMBINE (SetiAstro)")
17258
+ ref_header_c["COMETFR"] = (n_usable, "Frames used for comet-aligned stack")
17259
+
16295
17260
  # Save CometOnly
16296
17261
  Hc, Wc = comet_only.shape[:2]
16297
17262
  display_group_c = self._label_with_dims(group_key, Wc, Hc)
@@ -16316,6 +17281,10 @@ class StackingSuiteDialog(QDialog):
16316
17281
  scale=1.0,
16317
17282
  rect_override=group_rect if group_rect is not None else global_rect
16318
17283
  )
17284
+ comet_only_crop, hdr_c_crop = self._apply_autocrop(...)
17285
+ hdr_c_crop["NCOMBINE"] = (n_usable, "Number of frames combined (comet)")
17286
+ hdr_c_crop["NSTACK"] = (n_usable, "Alias of NCOMBINE (SetiAstro)")
17287
+ hdr_c_crop["COMETFR"] = (n_usable, "Frames used for comet-aligned stack")
16319
17288
  Hcc, Wcc = comet_only_crop.shape[:2]
16320
17289
  display_group_cc = self._label_with_dims(group_key, Wcc, Hcc)
16321
17290
  comet_path_crop = self._build_out(
@@ -16903,246 +17872,6 @@ class StackingSuiteDialog(QDialog):
16903
17872
  views[p] = np.load(npy, mmap_mode="r") # returns numpy.memmap
16904
17873
  return views
16905
17874
 
16906
-
16907
- def stack_registered_images_chunked(
16908
- self,
16909
- grouped_files,
16910
- frame_weights,
16911
- chunk_height=2048,
16912
- chunk_width=2048
16913
- ):
16914
- self.update_status(self.tr(f"βœ… Chunked stacking {len(grouped_files)} group(s)..."))
16915
- QApplication.processEvents()
16916
-
16917
- all_rejection_coords = []
16918
-
16919
- for group_key, file_list in grouped_files.items():
16920
- num_files = len(file_list)
16921
- self.update_status(self.tr(f"πŸ“Š Group '{group_key}' has {num_files} aligned file(s)."))
16922
- QApplication.processEvents()
16923
- if num_files < 2:
16924
- self.update_status(self.tr(f"⚠️ Group '{group_key}' does not have enough frames to stack."))
16925
- continue
16926
-
16927
- # Reference shape/header (unchanged)
16928
- ref_file = file_list[0]
16929
- if not os.path.exists(ref_file):
16930
- self.update_status(self.tr(f"⚠️ Reference file '{ref_file}' not found, skipping group."))
16931
- continue
16932
-
16933
- ref_data, ref_header, _, _ = load_image(ref_file)
16934
- if ref_data is None:
16935
- self.update_status(self.tr(f"⚠️ Could not load reference '{ref_file}', skipping group."))
16936
- continue
16937
-
16938
- is_color = (ref_data.ndim == 3 and ref_data.shape[2] == 3)
16939
- height, width = ref_data.shape[:2]
16940
- channels = 3 if is_color else 1
16941
-
16942
- # Final output memmap (unchanged)
16943
- memmap_path = self._build_out(self.stacking_directory, f"chunked_{group_key}", "dat")
16944
- final_stacked = np.memmap(memmap_path, dtype=np.float32, mode='w+', shape=(height, width, channels))
16945
-
16946
- # Valid files + weights
16947
- aligned_paths, weights_list = [], []
16948
- for fpath in file_list:
16949
- if os.path.exists(fpath):
16950
- aligned_paths.append(fpath)
16951
- weights_list.append(frame_weights.get(fpath, 1.0))
16952
- else:
16953
- self.update_status(self.tr(f"⚠️ File not found: {fpath}, skipping."))
16954
- if len(aligned_paths) < 2:
16955
- self.update_status(self.tr(f"⚠️ Not enough valid frames in group '{group_key}' to stack."))
16956
- continue
16957
-
16958
- weights_list = np.array(weights_list, dtype=np.float32)
16959
-
16960
- # ⬇️ NEW: open read-only memmaps for all aligned frames (float32 [0..1], HxWxC)
16961
- mm_views = self._open_memmaps_readonly(aligned_paths)
16962
-
16963
- self.update_status(self.tr(f"πŸ“Š Stacking group '{group_key}' with {self.rejection_algorithm}"))
16964
- QApplication.processEvents()
16965
-
16966
- rejection_coords = []
16967
- N = len(aligned_paths)
16968
- DTYPE = self._dtype()
16969
- pref_h = self.chunk_height
16970
- pref_w = self.chunk_width
16971
-
16972
- try:
16973
- chunk_h, chunk_w = compute_safe_chunk(height, width, N, channels, DTYPE, pref_h, pref_w)
16974
- self.update_status(self.tr(f"πŸ”§ Using chunk size {chunk_h}Γ—{chunk_w} for {self._dtype()}"))
16975
- except MemoryError as e:
16976
- self.update_status(self.tr(f"⚠️ {e}"))
16977
- return None, {}, None
16978
-
16979
- # Tile loop (same structure, but tile loading reads from memmaps)
16980
- from concurrent.futures import ThreadPoolExecutor, as_completed
16981
- LOADER_WORKERS = min(max(2, (os.cpu_count() or 4) // 2), 8) # tuned for memory bw
16982
-
16983
- for y_start in range(0, height, chunk_h):
16984
- y_end = min(y_start + chunk_h, height)
16985
- tile_h = y_end - y_start
16986
-
16987
- for x_start in range(0, width, chunk_w):
16988
- x_end = min(x_start + chunk_w, width)
16989
- tile_w = x_end - x_start
16990
-
16991
- # Preallocate tile stack
16992
- tile_stack = np.empty((N, tile_h, tile_w, channels), dtype=np.float32)
16993
-
16994
- # ⬇️ NEW: fill tile_stack from the memmaps (parallel copy)
16995
- def _copy_one(i, path):
16996
- v = mm_views[path][y_start:y_end, x_start:x_end] # view on disk
16997
- if v.ndim == 2:
16998
- # mono memmap stored as (H,W,1); but if legacy mono npy exists as (H,W),
16999
- # make it (H,W,1) here:
17000
- vv = v[..., None]
17001
- else:
17002
- vv = v
17003
- if vv.shape[2] == 1 and channels == 3:
17004
- vv = np.repeat(vv, 3, axis=2)
17005
- tile_stack[i] = vv
17006
-
17007
- with ThreadPoolExecutor(max_workers=LOADER_WORKERS) as exe:
17008
- futs = {exe.submit(_copy_one, i, p): i for i, p in enumerate(aligned_paths)}
17009
- for _ in as_completed(futs):
17010
- pass
17011
-
17012
- # Rejection (unchanged – uses your Numba kernels)
17013
- algo = self.rejection_algorithm
17014
- if algo == "Simple Median (No Rejection)":
17015
- tile_result = np.median(tile_stack, axis=0)
17016
- tile_rej_map = np.zeros(tile_stack.shape[1:3], dtype=np.bool_)
17017
- elif algo == "Simple Average (No Rejection)":
17018
- tile_result = np.average(tile_stack, axis=0, weights=weights_list)
17019
- tile_rej_map = np.zeros(tile_stack.shape[1:3], dtype=np.bool_)
17020
- elif algo == "Weighted Windsorized Sigma Clipping":
17021
- tile_result, tile_rej_map = windsorized_sigma_clip_weighted(
17022
- tile_stack, weights_list, lower=self.sigma_low, upper=self.sigma_high
17023
- )
17024
- elif algo == "Kappa-Sigma Clipping":
17025
- tile_result, tile_rej_map = kappa_sigma_clip_weighted(
17026
- tile_stack, weights_list, kappa=self.kappa, iterations=self.iterations
17027
- )
17028
- elif algo == "Trimmed Mean":
17029
- tile_result, tile_rej_map = trimmed_mean_weighted(
17030
- tile_stack, weights_list, trim_fraction=self.trim_fraction
17031
- )
17032
- elif algo == "Extreme Studentized Deviate (ESD)":
17033
- tile_result, tile_rej_map = esd_clip_weighted(
17034
- tile_stack, weights_list, threshold=self.esd_threshold
17035
- )
17036
- elif algo == "Biweight Estimator":
17037
- tile_result, tile_rej_map = biweight_location_weighted(
17038
- tile_stack, weights_list, tuning_constant=self.biweight_constant
17039
- )
17040
- elif algo == "Modified Z-Score Clipping":
17041
- tile_result, tile_rej_map = modified_zscore_clip_weighted(
17042
- tile_stack, weights_list, threshold=self.modz_threshold
17043
- )
17044
- elif algo == "Max Value":
17045
- tile_result, tile_rej_map = max_value_stack(
17046
- tile_stack, weights_list
17047
- )
17048
- else:
17049
- tile_result, tile_rej_map = windsorized_sigma_clip_weighted(
17050
- tile_stack, weights_list, lower=self.sigma_low, upper=self.sigma_high
17051
- )
17052
-
17053
- # Ensure tile_result has correct shape
17054
- if tile_result.ndim == 2:
17055
- tile_result = tile_result[:, :, None]
17056
- expected_shape = (tile_h, tile_w, channels)
17057
- if tile_result.shape != expected_shape:
17058
- if tile_result.shape[2] == 0:
17059
- tile_result = np.zeros(expected_shape, dtype=np.float32)
17060
- elif tile_result.shape[:2] == (tile_h, tile_w):
17061
- if tile_result.shape[2] > channels:
17062
- tile_result = tile_result[:, :, :channels]
17063
- else:
17064
- tile_result = np.repeat(tile_result, channels, axis=2)[:, :, :channels]
17065
-
17066
- # Commit tile
17067
- final_stacked[y_start:y_end, x_start:x_end, :] = tile_result
17068
-
17069
- # Collect per-tile rejection coords (unchanged logic)
17070
- if tile_rej_map.ndim == 3: # (N, tile_h, tile_w)
17071
- combined_rej = np.any(tile_rej_map, axis=0)
17072
- elif tile_rej_map.ndim == 4: # (N, tile_h, tile_w, C)
17073
- combined_rej = np.any(tile_rej_map, axis=0)
17074
- combined_rej = np.any(combined_rej, axis=-1)
17075
- else:
17076
- combined_rej = np.zeros((tile_h, tile_w), dtype=np.bool_)
17077
-
17078
- ys_tile, xs_tile = np.where(combined_rej)
17079
- for dy, dx in zip(ys_tile, xs_tile):
17080
- rejection_coords.append((x_start + dx, y_start + dy))
17081
-
17082
- # Finish/save (unchanged from your version) …
17083
- final_array = np.array(final_stacked)
17084
- del final_stacked
17085
-
17086
- final_array = self._normalize_stack_01(final_array)
17087
-
17088
- if final_array.ndim == 3 and final_array.shape[-1] == 1:
17089
- final_array = final_array[..., 0]
17090
- is_mono = (final_array.ndim == 2)
17091
-
17092
- if ref_header is None:
17093
- ref_header = fits.Header()
17094
- ref_header["IMAGETYP"] = "MASTER STACK"
17095
- ref_header["BITPIX"] = -32
17096
- ref_header["STACKED"] = (True, "Stacked using chunked approach")
17097
- ref_header["CREATOR"] = "SetiAstroSuite"
17098
- ref_header["DATE-OBS"] = datetime.utcnow().isoformat()
17099
- if is_mono:
17100
- ref_header["NAXIS"] = 2
17101
- ref_header["NAXIS1"] = final_array.shape[1]
17102
- ref_header["NAXIS2"] = final_array.shape[0]
17103
- if "NAXIS3" in ref_header: del ref_header["NAXIS3"]
17104
- else:
17105
- ref_header["NAXIS"] = 3
17106
- ref_header["NAXIS1"] = final_array.shape[1]
17107
- ref_header["NAXIS2"] = final_array.shape[0]
17108
- ref_header["NAXIS3"] = 3
17109
-
17110
- output_stem = f"MasterLight_{group_key}_{len(aligned_paths)}stacked"
17111
- output_path = self._build_out(self.stacking_directory, output_stem, "fit")
17112
-
17113
- save_image(
17114
- img_array=final_array,
17115
- filename=output_path,
17116
- original_format="fit",
17117
- bit_depth="32-bit floating point",
17118
- original_header=ref_header,
17119
- is_mono=is_mono
17120
- )
17121
-
17122
- self.update_status(self.tr(f"βœ… Group '{group_key}' stacked {len(aligned_paths)} frame(s)! Saved: {output_path}"))
17123
-
17124
- print(f"βœ… Master Light saved for group '{group_key}': {output_path}")
17125
-
17126
- # Optionally, you might want to store or log 'rejection_coords' (here appended to all_rejection_coords)
17127
- all_rejection_coords.extend(rejection_coords)
17128
-
17129
- # Clean up memmap file
17130
- try:
17131
- os.remove(memmap_path)
17132
- except OSError:
17133
- pass
17134
-
17135
- QMessageBox.information(
17136
- self,
17137
- "Stacking Complete",
17138
- f"All stacking finished successfully.\n"
17139
- f"Frames per group:\n" +
17140
- "\n".join([f"{group_key}: {len(files)} frame(s)" for group_key, files in grouped_files.items()])
17141
- )
17142
-
17143
- # Optionally, you could return the global rejection coordinate list.
17144
- return all_rejection_coords
17145
-
17146
17875
  def _start_after_align_worker(self, aligned_light_files: dict[str, list[str]]):
17147
17876
  # Snapshot UI settings
17148
17877
  if getattr(self, "_suppress_normal_integration_once", False):
@@ -17316,7 +18045,37 @@ class StackingSuiteDialog(QDialog):
17316
18045
 
17317
18046
  # Thread + worker
17318
18047
  self._mf_thread = QThread(self)
17319
- star_mask_ref = self.reference_frame if use_star_masks else None
18048
+
18049
+ def _pick_mf_ref_from_frames(frames: list[str]) -> str | None:
18050
+ """Pick a reference path for MFDeconv masks from the aligned frames list."""
18051
+ from os import path
18052
+ if not frames:
18053
+ return None
18054
+
18055
+ # Prefer the weighted-best frame if weights exist
18056
+ w = getattr(self, "frame_weights", None) or {}
18057
+ best = None
18058
+ bestw = -1.0
18059
+ for p in frames:
18060
+ pn = path.normpath(p)
18061
+ if not path.exists(pn):
18062
+ continue
18063
+ ww = float(w.get(pn, w.get(p, 0.0)) or 0.0)
18064
+ if ww > bestw:
18065
+ bestw, best = ww, pn
18066
+
18067
+ # Otherwise fall back to first existing frame
18068
+ if best:
18069
+ return best
18070
+ for p in frames:
18071
+ pn = path.normpath(p)
18072
+ if path.exists(pn):
18073
+ return pn
18074
+ return None
18075
+
18076
+ star_mask_ref = _pick_mf_ref_from_frames(frames) if use_star_masks else None
18077
+ if use_star_masks:
18078
+ self.update_status(self.tr(f"🌟 MFDeconv star-mask reference β†’ {star_mask_ref or '(none)'}"))
17320
18079
 
17321
18080
  # ── choose engine plainly (Normal / cuDNN-free / High Octane) ─────────────
17322
18081
  # Expect a setting saved by your radio buttons: "normal" | "cudnn" | "sport"
@@ -17461,6 +18220,87 @@ class StackingSuiteDialog(QDialog):
17461
18220
 
17462
18221
  self.update_status(self.tr(f"πŸ“Š Found {len(cand)} aligned/normalized frames. Measuring in parallel previews…"))
17463
18222
 
18223
+ # ─────────────────────────────────────────────────────────────────────
18224
+ # XISF safety: convert any .xisf to float32 FITS once up-front so the
18225
+ # downstream integration pipeline is guaranteed to be FITS-based.
18226
+ # ─────────────────────────────────────────────────────────────────────
18227
+ prep_dir = os.path.join(self.stacking_directory, "Prepared_Registered")
18228
+ os.makedirs(prep_dir, exist_ok=True)
18229
+
18230
+ orig2prep = {} # optional, for debugging or later mapping
18231
+
18232
+ def _prep_path_for(fp: str) -> str:
18233
+ base = os.path.basename(fp)
18234
+ stem, _ext = os.path.splitext(base)
18235
+ return os.path.normpath(os.path.join(prep_dir, stem + "_prep.fit"))
18236
+
18237
+ prepared = []
18238
+ for fp in cand:
18239
+ ext = os.path.splitext(fp)[1].lower()
18240
+ if ext != ".xisf":
18241
+ prepared.append(fp)
18242
+ continue
18243
+
18244
+ outp = _prep_path_for(fp)
18245
+
18246
+ # reuse if already created this run
18247
+ if os.path.exists(outp):
18248
+ orig2prep[os.path.normcase(os.path.normpath(fp))] = outp
18249
+ prepared.append(outp)
18250
+ continue
18251
+
18252
+ try:
18253
+ img, hdr = self._load_image_any(fp) # must support XISF
18254
+ if img is None:
18255
+ self.update_status(self.tr(f"⚠️ Could not read XISF: {fp}"))
18256
+ continue
18257
+
18258
+ img = _to_writable_f32(img)
18259
+ if img.ndim == 3 and img.shape[-1] == 1:
18260
+ img = np.squeeze(img, axis=-1)
18261
+
18262
+ # Minimal header: keep what you can if hdr is a fits.Header
18263
+ try:
18264
+ h = hdr if isinstance(hdr, fits.Header) else fits.Header()
18265
+ except Exception:
18266
+ h = fits.Header()
18267
+
18268
+ h["SAS_PREP"] = (True, "Prepared from XISF for integration")
18269
+ h["SRCFILE"] = (os.path.basename(fp), "Original source filename")
18270
+ if isinstance(img, np.ndarray) and img.ndim == 3 and img.shape[-1] == 3:
18271
+ h["DEBAYERED"] = (True, "Color frame")
18272
+ else:
18273
+ h["DEBAYERED"] = (False, "Mono frame")
18274
+
18275
+ fits.PrimaryHDU(data=img.astype(np.float32), header=h).writeto(outp, overwrite=True)
18276
+
18277
+ orig2prep[os.path.normcase(os.path.normpath(fp))] = outp
18278
+ prepared.append(outp)
18279
+
18280
+ except Exception as e:
18281
+ self.update_status(self.tr(f"⚠️ XISFβ†’FITS prepare failed for {fp}: {e}"))
18282
+
18283
+ # Swap cand to prepared paths
18284
+ cand = prepared
18285
+
18286
+ # Also update light_files to match these prepared paths so the rest of the
18287
+ # pipeline only ever sees FITS paths.
18288
+ prep_map = orig2prep
18289
+ new_light_files = {}
18290
+ for g, lst in self.light_files.items():
18291
+ out = []
18292
+ for p in lst:
18293
+ k = os.path.normcase(os.path.normpath(p))
18294
+ out.append(prep_map.get(k, p))
18295
+ new_light_files[g] = out
18296
+ self.light_files = new_light_files
18297
+
18298
+ # If reference_frame was set and is XISF, redirect it too
18299
+ if getattr(self, "reference_frame", None):
18300
+ k = os.path.normcase(os.path.normpath(self.reference_frame))
18301
+ if k in prep_map:
18302
+ self.reference_frame = prep_map[k]
18303
+
17464
18304
  # 2) Chunked preview measurement (mean + star count/ecc)
17465
18305
  self.frame_weights = {}
17466
18306
  mean_values = {}
@@ -17490,7 +18330,8 @@ class StackingSuiteDialog(QDialog):
17490
18330
  paths_ok = []
17491
18331
 
17492
18332
  def _preview_job(fp: str):
17493
- return _quick_preview_from_path(fp, target_xbin=1, target_ybin=1)
18333
+ # Use the unified reader (FITS/XISF/TIFF/etc) like registration does
18334
+ return self._quick_preview_any(fp, target_xbin=1, target_ybin=1)
17494
18335
 
17495
18336
  with ThreadPoolExecutor(max_workers=max_workers) as ex:
17496
18337
  futs = {ex.submit(_preview_job, fp): fp for fp in chunk}
@@ -18045,6 +18886,10 @@ class StackingSuiteDialog(QDialog):
18045
18886
  hdr_orig["CREATOR"] = "SetiAstroSuite"
18046
18887
  hdr_orig["DATE-OBS"] = datetime.utcnow().isoformat()
18047
18888
 
18889
+ n_frames = int(len(file_list))
18890
+ hdr_orig["NCOMBINE"] = (n_frames, "Number of frames combined")
18891
+ hdr_orig["NSTACK"] = (n_frames, "Alias of NCOMBINE (SetiAstro)")
18892
+
18048
18893
  if final_drizzle.ndim == 2:
18049
18894
  hdr_orig["NAXIS"] = 2
18050
18895
  hdr_orig["NAXIS1"] = final_drizzle.shape[1]
@@ -18074,10 +18919,12 @@ class StackingSuiteDialog(QDialog):
18074
18919
  cropped_drizzle, hdr_crop = self._apply_autocrop(
18075
18920
  final_drizzle,
18076
18921
  file_list,
18077
- hdr.copy() if hdr is not None else fits.Header(),
18922
+ hdr_orig.copy(),
18078
18923
  scale=float(scale_factor),
18079
18924
  rect_override=rect_override
18080
18925
  )
18926
+ hdr_crop["NCOMBINE"] = (n_frames, "Number of frames combined")
18927
+ hdr_crop["NSTACK"] = (n_frames, "Alias of NCOMBINE (SetiAstro)")
18081
18928
  is_mono_crop = (cropped_drizzle.ndim == 2)
18082
18929
  display_group_driz_crop = self._label_with_dims(group_key, cropped_drizzle.shape[1], cropped_drizzle.shape[0])
18083
18930
  base_crop = f"MasterLight_{display_group_driz_crop}_{len(file_list)}stacked_drizzle_autocrop"