setiastrosuitepro 1.6.7__py3-none-any.whl → 1.6.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of setiastrosuitepro might be problematic. Click here for more details.

Files changed (37) hide show
  1. setiastro/images/abeicon.svg +16 -0
  2. setiastro/images/cosmic.svg +40 -0
  3. setiastro/images/cosmicsat.svg +24 -0
  4. setiastro/images/graxpert.svg +19 -0
  5. setiastro/images/linearfit.svg +32 -0
  6. setiastro/images/pixelmath.svg +42 -0
  7. setiastro/saspro/_generated/build_info.py +2 -2
  8. setiastro/saspro/add_stars.py +29 -5
  9. setiastro/saspro/blink_comparator_pro.py +74 -24
  10. setiastro/saspro/cosmicclarity.py +125 -18
  11. setiastro/saspro/crop_dialog_pro.py +96 -2
  12. setiastro/saspro/curve_editor_pro.py +60 -39
  13. setiastro/saspro/frequency_separation.py +1159 -208
  14. setiastro/saspro/gui/main_window.py +131 -31
  15. setiastro/saspro/gui/mixins/theme_mixin.py +160 -14
  16. setiastro/saspro/gui/mixins/update_mixin.py +121 -33
  17. setiastro/saspro/imageops/stretch.py +531 -62
  18. setiastro/saspro/layers.py +13 -9
  19. setiastro/saspro/layers_dock.py +183 -3
  20. setiastro/saspro/legacy/numba_utils.py +43 -0
  21. setiastro/saspro/live_stacking.py +158 -70
  22. setiastro/saspro/multiscale_decomp.py +47 -12
  23. setiastro/saspro/numba_utils.py +72 -2
  24. setiastro/saspro/ops/commands.py +18 -18
  25. setiastro/saspro/shortcuts.py +122 -12
  26. setiastro/saspro/signature_insert.py +688 -33
  27. setiastro/saspro/stacking_suite.py +523 -316
  28. setiastro/saspro/stat_stretch.py +688 -130
  29. setiastro/saspro/subwindow.py +302 -71
  30. setiastro/saspro/widgets/common_utilities.py +28 -21
  31. setiastro/saspro/widgets/resource_monitor.py +7 -7
  32. {setiastrosuitepro-1.6.7.dist-info → setiastrosuitepro-1.6.10.dist-info}/METADATA +1 -1
  33. {setiastrosuitepro-1.6.7.dist-info → setiastrosuitepro-1.6.10.dist-info}/RECORD +37 -31
  34. {setiastrosuitepro-1.6.7.dist-info → setiastrosuitepro-1.6.10.dist-info}/WHEEL +0 -0
  35. {setiastrosuitepro-1.6.7.dist-info → setiastrosuitepro-1.6.10.dist-info}/entry_points.txt +0 -0
  36. {setiastrosuitepro-1.6.7.dist-info → setiastrosuitepro-1.6.10.dist-info}/licenses/LICENSE +0 -0
  37. {setiastrosuitepro-1.6.7.dist-info → setiastrosuitepro-1.6.10.dist-info}/licenses/license.txt +0 -0
@@ -16,6 +16,7 @@ import hashlib
16
16
  from numpy.lib.format import open_memmap
17
17
  import tzlocal
18
18
  import weakref
19
+ import ast
19
20
  import re
20
21
  import unicodedata
21
22
  import math # used in compute_safe_chunk
@@ -4107,6 +4108,74 @@ def _read_center_patch_via_mmimage(path: str, y0: int, y1: int, x0: int, x1: int
4107
4108
  except Exception:
4108
4109
  pass
4109
4110
 
4111
+ def _get_key_float(hdr: fits.Header, key: str):
4112
+ try:
4113
+ v = hdr.get(key, None)
4114
+ if v is None:
4115
+ return None
4116
+ # handle strings like "-10.0" or "-10 C"
4117
+ if isinstance(v, str):
4118
+ v = v.strip().replace("C", "").replace("°", "").strip()
4119
+ return float(v)
4120
+ except Exception:
4121
+ return None
4122
+
4123
+ def _collect_temp_stats(file_list: list[str]):
4124
+ ccd = []
4125
+ setp = []
4126
+ n_ccd = 0
4127
+ n_set = 0
4128
+
4129
+ for p in file_list:
4130
+ try:
4131
+ hdr = fits.getheader(p, memmap=True)
4132
+ except Exception:
4133
+ continue
4134
+
4135
+ v1 = _get_key_float(hdr, "CCD-TEMP")
4136
+ v2 = _get_key_float(hdr, "SET-TEMP")
4137
+
4138
+ if v1 is not None:
4139
+ ccd.append(v1); n_ccd += 1
4140
+ if v2 is not None:
4141
+ setp.append(v2); n_set += 1
4142
+
4143
+ def _stats(arr):
4144
+ if not arr:
4145
+ return None, None, None, None
4146
+ a = np.asarray(arr, dtype=np.float32)
4147
+ return float(np.median(a)), float(np.min(a)), float(np.max(a)), float(np.std(a))
4148
+
4149
+ c_med, c_min, c_max, c_std = _stats(ccd)
4150
+ s_med, s_min, s_max, s_std = _stats(setp)
4151
+
4152
+ return {
4153
+ "ccd_med": c_med, "ccd_min": c_min, "ccd_max": c_max, "ccd_std": c_std, "ccd_n": n_ccd,
4154
+ "set_med": s_med, "set_min": s_min, "set_max": s_max, "set_std": s_std, "set_n": n_set,
4155
+ "n_files": len(file_list),
4156
+ }
4157
+
4158
+ def _temp_to_stem_tag(temp_c: float, *, prefix: str = "") -> str:
4159
+ """
4160
+ Filename-safe temperature token:
4161
+ -10.0 -> 'm10p0C'
4162
+ +5.25 -> 'p5p3C' (rounded to 0.1C if you pass that in)
4163
+ Uses:
4164
+ m = minus, p = plus/decimal separator
4165
+ Never produces '_-' which your _normalize_master_stem would collapse.
4166
+ """
4167
+ try:
4168
+ t = float(temp_c)
4169
+ except Exception:
4170
+ return ""
4171
+
4172
+ sign = "m" if t < 0 else "p"
4173
+ t_abs = abs(t)
4174
+
4175
+ # keep one decimal place (match your earlier plan)
4176
+ s = f"{t_abs:.1f}" # e.g. "10.0"
4177
+ s = s.replace(".", "p") # e.g. "10p0"
4178
+ return f"{prefix}{sign}{s}C"
4110
4179
 
4111
4180
  class StackingSuiteDialog(QDialog):
4112
4181
  requestRelaunch = pyqtSignal(str, str) # old_dir, new_dir
@@ -6659,6 +6728,22 @@ class StackingSuiteDialog(QDialog):
6659
6728
 
6660
6729
  return tab
6661
6730
 
6731
+ def _bucket_temp(self, t: float | None, step: float = 3.0) -> float | None:
6732
+ """Round to stable bucket. Example: -10.2 -> -10.0 when step=1.0"""
6733
+ if t is None:
6734
+ return None
6735
+ try:
6736
+ return round(float(t) / float(step)) * float(step)
6737
+ except Exception:
6738
+ return None
6739
+
6740
+ def _temp_label(self, t: float | None, step: float = 1.0) -> str:
6741
+ if t is None:
6742
+ return "Temp: Unknown"
6743
+ # show fewer decimals if step is 1.0
6744
+ return f"Temp: {t:+.0f}C" if step >= 1.0 else f"Temp: {t:+.1f}C"
6745
+
6746
+
6662
6747
  def _tree_for_type(self, t: str):
6663
6748
  t = (t or "").upper()
6664
6749
  if t == "LIGHT": return getattr(self, "light_tree", None)
@@ -10485,24 +10570,85 @@ class StackingSuiteDialog(QDialog):
10485
10570
  keyword = self.settings.value("stacking/session_keyword", "Default", type=str)
10486
10571
  session_tag = self._session_from_manual_keyword(path, keyword) or "Default"
10487
10572
 
10573
+ # --- Temperature (fast: header already loaded) ---
10574
+ ccd_temp = header.get("CCD-TEMP", None)
10575
+ set_temp = header.get("SET-TEMP", None)
10576
+
10577
+ def _to_float_temp(v):
10578
+ try:
10579
+ if v is None:
10580
+ return None
10581
+ if isinstance(v, (int, float)):
10582
+ return float(v)
10583
+ s = str(v).strip()
10584
+ s = s.replace("°", "").replace("C", "").replace("c", "").strip()
10585
+ return float(s)
10586
+ except Exception:
10587
+ return None
10588
+
10589
+ ccd_temp_f = _to_float_temp(ccd_temp)
10590
+ set_temp_f = _to_float_temp(set_temp)
10591
+ use_temp_f = ccd_temp_f if ccd_temp_f is not None else set_temp_f
10592
+
10593
+ # --- Common metadata string for leaf rows ---
10594
+ meta_text = f"Size: {image_size} | Session: {session_tag}"
10595
+ if use_temp_f is not None:
10596
+ meta_text += f" | Temp: {use_temp_f:.1f}C"
10597
+ if set_temp_f is not None:
10598
+ meta_text += f" (Set: {set_temp_f:.1f}C)"
10599
+
10488
10600
  # --- Common metadata string for leaf rows ---
10489
10601
  meta_text = f"Size: {image_size} | Session: {session_tag}"
10490
10602
 
10491
10603
  # === DARKs ===
10492
10604
  if expected_type_u == "DARK":
10493
- key = f"{exposure_text} ({image_size})"
10494
- self.dark_files.setdefault(key, []).append(path)
10605
+ # --- temperature for grouping (prefer CCD-TEMP else SET-TEMP) ---
10606
+ ccd_t = _get_key_float(header, "CCD-TEMP")
10607
+ set_t = _get_key_float(header, "SET-TEMP")
10608
+ chosen_t = ccd_t if ccd_t is not None else set_t
10609
+
10610
+ temp_step = self.settings.value("stacking/temp_group_step", 1.0, type=float)
10611
+ temp_bucket = self._bucket_temp(chosen_t, step=temp_step)
10612
+ temp_label = self._temp_label(temp_bucket, step=temp_step)
10495
10613
 
10496
- exposure_item = self._dark_group_item.get(key)
10614
+ # --- tree grouping: exposure/size -> temp bucket -> files ---
10615
+ base_key = f"{exposure_text} ({image_size})"
10616
+
10617
+ # ensure caches exist
10618
+ if not hasattr(self, "_dark_group_item") or self._dark_group_item is None:
10619
+ self._dark_group_item = {}
10620
+ if not hasattr(self, "_dark_temp_item") or self._dark_temp_item is None:
10621
+ self._dark_temp_item = {} # (base_key, temp_label) -> QTreeWidgetItem
10622
+
10623
+ # top-level exposure group
10624
+ exposure_item = self._dark_group_item.get(base_key)
10497
10625
  if exposure_item is None:
10498
- exposure_item = QTreeWidgetItem([key])
10626
+ exposure_item = QTreeWidgetItem([base_key, ""])
10499
10627
  tree.addTopLevelItem(exposure_item)
10500
- self._dark_group_item[key] = exposure_item
10501
-
10502
- leaf = QTreeWidgetItem([os.path.basename(path), meta_text])
10628
+ self._dark_group_item[base_key] = exposure_item
10629
+
10630
+ # second-level temp group under that exposure group
10631
+ temp_key = (base_key, temp_label)
10632
+ temp_item = self._dark_temp_item.get(temp_key)
10633
+ if temp_item is None:
10634
+ temp_item = QTreeWidgetItem([temp_label, ""])
10635
+ exposure_item.addChild(temp_item)
10636
+ self._dark_temp_item[temp_key] = temp_item
10637
+
10638
+ # --- store in dict for stacking ---
10639
+ # Key includes session + temp bucket so create_master_dark can split properly.
10640
+ # (We keep compatibility: your create_master_dark already handles tuple keys.)
10641
+ composite_key = (base_key, session_tag, temp_bucket)
10642
+ self.dark_files.setdefault(composite_key, []).append(path)
10643
+
10644
+ # --- leaf row ---
10645
+ # Also add temp info to metadata text so user can see it per file
10646
+ meta_text_dark = f"Size: {image_size} | Session: {session_tag} | {temp_label}"
10647
+ leaf = QTreeWidgetItem([os.path.basename(path), meta_text_dark])
10503
10648
  leaf.setData(0, Qt.ItemDataRole.UserRole, path)
10504
10649
  leaf.setData(0, Qt.ItemDataRole.UserRole + 1, session_tag)
10505
- exposure_item.addChild(leaf)
10650
+ leaf.setData(0, Qt.ItemDataRole.UserRole + 2, temp_bucket) # handy later
10651
+ temp_item.addChild(leaf)
10506
10652
 
10507
10653
  # === FLATs ===
10508
10654
  elif expected_type_u == "FLAT":
@@ -10664,14 +10810,39 @@ class StackingSuiteDialog(QDialog):
10664
10810
  exposure_tolerance = self.exposure_tolerance_spinbox.value()
10665
10811
 
10666
10812
  # -------------------------------------------------------------------------
10667
- # Group darks by (exposure +/- tolerance, image size string, session)
10668
- # self.dark_files can be either:
10669
- # legacy: exposure_key -> [paths]
10670
- # session: (exposure_key, session) -> [paths]
10813
+ # Temp helpers
10814
+ # -------------------------------------------------------------------------
10815
+ def _bucket_temp(t: float | None, step: float = 3.0) -> float | None:
10816
+ """Round temperature to a stable bucket (e.g. -10.2 -> -10.0 if step=1.0)."""
10817
+ if t is None:
10818
+ return None
10819
+ try:
10820
+ return round(float(t) / step) * step
10821
+ except Exception:
10822
+ return None
10823
+
10824
+ def _read_temp_quick(path: str) -> tuple[float | None, float | None, float | None]:
10825
+ """Fast temp read (CCD, SET, chosen). Uses fits.getheader(memmap=True)."""
10826
+ try:
10827
+ hdr = fits.getheader(path, memmap=True)
10828
+ except Exception:
10829
+ return None, None, None
10830
+ ccd = _get_key_float(hdr, "CCD-TEMP")
10831
+ st = _get_key_float(hdr, "SET-TEMP")
10832
+ chosen = ccd if ccd is not None else st
10833
+ return ccd, st, chosen
10834
+
10835
+ # -------------------------------------------------------------------------
10836
+ # Group darks by (exposure +/- tolerance, image size, session, temp_bucket)
10837
+ # TEMP_STEP is the rounding bucket (1.0C default)
10671
10838
  # -------------------------------------------------------------------------
10672
- dark_files_by_group: dict[tuple[float, str, str], list[str]] = {} # (exp, size, session)->list
10839
+ TEMP_STEP = self.settings.value("stacking/temp_group_step", 1.0, type=float)
10840
+
10841
+ dark_files_by_group: dict[tuple[float, str, str, float | None], list[str]] = {} # (exp,size,session,temp)->list
10673
10842
 
10674
10843
  for key, file_list in (self.dark_files or {}).items():
10844
+ # Support both legacy dark_files (key=str) and newer tuple keys.
10845
+ # We DO NOT assume dark_files already contains temp in key — we re-bucket from headers anyway.
10675
10846
  if isinstance(key, tuple) and len(key) >= 2:
10676
10847
  exposure_key = str(key[0])
10677
10848
  session = str(key[1]) if str(key[1]).strip() else "Default"
@@ -10683,10 +10854,9 @@ class StackingSuiteDialog(QDialog):
10683
10854
  exposure_time_str, image_size = exposure_key.split(" (", 1)
10684
10855
  image_size = image_size.rstrip(")")
10685
10856
  except ValueError:
10686
- # If some malformed key got in, skip safely
10687
10857
  continue
10688
10858
 
10689
- if "Unknown" in exposure_time_str:
10859
+ if "Unknown" in (exposure_time_str or ""):
10690
10860
  exposure_time = 0.0
10691
10861
  else:
10692
10862
  try:
@@ -10694,21 +10864,31 @@ class StackingSuiteDialog(QDialog):
10694
10864
  except Exception:
10695
10865
  exposure_time = 0.0
10696
10866
 
10697
- matched_group = None
10698
- for (existing_exposure, existing_size, existing_session) in list(dark_files_by_group.keys()):
10699
- if (
10700
- existing_session == session
10701
- and existing_size == image_size
10702
- and abs(existing_exposure - exposure_time) <= exposure_tolerance
10703
- ):
10704
- matched_group = (existing_exposure, existing_size, existing_session)
10705
- break
10867
+ # Split the incoming list by temp bucket so mixed temps do not merge.
10868
+ bucketed: dict[float | None, list[str]] = {}
10869
+ for p in (file_list or []):
10870
+ _, _, chosen = _read_temp_quick(p)
10871
+ tb = _bucket_temp(chosen, step=TEMP_STEP)
10872
+ bucketed.setdefault(tb, []).append(p)
10873
+
10874
+ # Apply exposure tolerance grouping PER temp bucket
10875
+ for temp_bucket, paths_in_bucket in bucketed.items():
10876
+ matched_group = None
10877
+ for (existing_exposure, existing_size, existing_session, existing_temp) in list(dark_files_by_group.keys()):
10878
+ if (
10879
+ existing_session == session
10880
+ and existing_size == image_size
10881
+ and existing_temp == temp_bucket
10882
+ and abs(existing_exposure - exposure_time) <= exposure_tolerance
10883
+ ):
10884
+ matched_group = (existing_exposure, existing_size, existing_session, existing_temp)
10885
+ break
10706
10886
 
10707
- if matched_group is None:
10708
- matched_group = (exposure_time, image_size, session)
10709
- dark_files_by_group[matched_group] = []
10887
+ if matched_group is None:
10888
+ matched_group = (exposure_time, image_size, session, temp_bucket)
10889
+ dark_files_by_group[matched_group] = []
10710
10890
 
10711
- dark_files_by_group[matched_group].extend(file_list or [])
10891
+ dark_files_by_group[matched_group].extend(paths_in_bucket)
10712
10892
 
10713
10893
  master_dir = os.path.join(self.stacking_directory, "Master_Calibration_Files")
10714
10894
  os.makedirs(master_dir, exist_ok=True)
@@ -10717,11 +10897,11 @@ class StackingSuiteDialog(QDialog):
10717
10897
  # Informative status about discovery
10718
10898
  # -------------------------------------------------------------------------
10719
10899
  try:
10720
- n_groups = sum(1 for _, v in dark_files_by_group.items() if len(v) >= 2)
10900
+ n_groups_eligible = sum(1 for _, v in dark_files_by_group.items() if len(v) >= 2)
10721
10901
  total_files = sum(len(v) for v in dark_files_by_group.values())
10722
10902
  self.update_status(self.tr(
10723
10903
  f"🔎 Discovered {len(dark_files_by_group)} grouped exposures "
10724
- f"({n_groups} eligible to stack) — {total_files} files total."
10904
+ f"({n_groups_eligible} eligible to stack) — {total_files} files total."
10725
10905
  ))
10726
10906
  except Exception:
10727
10907
  pass
@@ -10731,12 +10911,12 @@ class StackingSuiteDialog(QDialog):
10731
10911
  # Pre-count tiles for progress bar (per-group safe chunk sizes)
10732
10912
  # -------------------------------------------------------------------------
10733
10913
  total_tiles = 0
10734
- group_shapes: dict[tuple[float, str, str], tuple[int, int, int, int, int]] = {} # (exp,size,session)->(H,W,C,ch,cw)
10914
+ group_shapes: dict[tuple[float, str, str, float | None], tuple[int, int, int, int, int]] = {}
10735
10915
  pref_chunk_h = self.chunk_height
10736
10916
  pref_chunk_w = self.chunk_width
10737
10917
  DTYPE = np.float32
10738
10918
 
10739
- for (exposure_time, image_size, session), file_list in dark_files_by_group.items():
10919
+ for (exposure_time, image_size, session, temp_bucket), file_list in dark_files_by_group.items():
10740
10920
  if len(file_list) < 2:
10741
10921
  continue
10742
10922
 
@@ -10754,7 +10934,8 @@ class StackingSuiteDialog(QDialog):
10754
10934
  except MemoryError:
10755
10935
  chunk_h, chunk_w = pref_chunk_h, pref_chunk_w
10756
10936
 
10757
- group_shapes[(exposure_time, image_size, session)] = (H, W, C, chunk_h, chunk_w)
10937
+ gk = (exposure_time, image_size, session, temp_bucket)
10938
+ group_shapes[gk] = (H, W, C, chunk_h, chunk_w)
10758
10939
  total_tiles += _count_tiles(H, W, chunk_h, chunk_w)
10759
10940
 
10760
10941
  if total_tiles == 0:
@@ -10767,7 +10948,7 @@ class StackingSuiteDialog(QDialog):
10767
10948
  QApplication.processEvents()
10768
10949
 
10769
10950
  # -------------------------------------------------------------------------
10770
- # Local CPU reducers (unchanged)
10951
+ # Local CPU reducers
10771
10952
  # -------------------------------------------------------------------------
10772
10953
  def _select_reducer(kind: str, N: int):
10773
10954
  if kind == "dark":
@@ -10811,10 +10992,10 @@ class StackingSuiteDialog(QDialog):
10811
10992
  # ---------------------------------------------------------------------
10812
10993
  # Per-group stacking loop
10813
10994
  # ---------------------------------------------------------------------
10814
- for (exposure_time, image_size, session), file_list in dark_files_by_group.items():
10995
+ for (exposure_time, image_size, session, temp_bucket), file_list in dark_files_by_group.items():
10815
10996
  if len(file_list) < 2:
10816
10997
  self.update_status(self.tr(
10817
- f"⚠️ Skipping {exposure_time}s ({image_size}) [{session}] - Not enough frames to stack."
10998
+ f"⚠️ Skipping {exposure_time:g}s ({image_size}) [{session}] - Not enough frames to stack."
10818
10999
  ))
10819
11000
  QApplication.processEvents()
10820
11001
  continue
@@ -10823,14 +11004,17 @@ class StackingSuiteDialog(QDialog):
10823
11004
  self.update_status(self.tr("⛔ Master Dark creation cancelled."))
10824
11005
  break
10825
11006
 
11007
+ temp_txt = "Unknown" if temp_bucket is None else f"{float(temp_bucket):+.1f}C"
10826
11008
  self.update_status(self.tr(
10827
- f"🟢 Processing {len(file_list)} darks for {exposure_time}s ({image_size}) in session '{session}'…"
11009
+ f"🟢 Processing {len(file_list)} darks for {exposure_time:g}s ({image_size}) "
11010
+ f"in session '{session}' at {temp_txt}…"
10828
11011
  ))
10829
11012
  QApplication.processEvents()
10830
11013
 
10831
11014
  # --- reference shape and per-group chunk size ---
10832
- if (exposure_time, image_size, session) in group_shapes:
10833
- height, width, channels, chunk_height, chunk_width = group_shapes[(exposure_time, image_size, session)]
11015
+ gk = (exposure_time, image_size, session, temp_bucket)
11016
+ if gk in group_shapes:
11017
+ height, width, channels, chunk_height, chunk_width = group_shapes[gk]
10834
11018
  else:
10835
11019
  ref_data, _, _, _ = load_image(file_list[0])
10836
11020
  if ref_data is None:
@@ -10870,8 +11054,11 @@ class StackingSuiteDialog(QDialog):
10870
11054
  QApplication.processEvents()
10871
11055
  continue
10872
11056
 
10873
- # Include session to prevent collisions
10874
- memmap_path = os.path.join(master_dir, f"temp_dark_{session}_{exposure_time}_{image_size}.dat")
11057
+ # Create temp memmap (stem-safe normalization)
11058
+ tb_tag = "notemp" if temp_bucket is None else _temp_to_stem_tag(float(temp_bucket))
11059
+ memmap_base = f"temp_dark_{session}_{exposure_time:g}s_{image_size}_{tb_tag}.dat"
11060
+ memmap_base = self._normalize_master_stem(memmap_base)
11061
+ memmap_path = os.path.join(master_dir, memmap_base)
10875
11062
 
10876
11063
  self.update_status(self.tr(
10877
11064
  f"🗂️ Creating temp memmap: {os.path.basename(memmap_path)} "
@@ -10883,6 +11070,7 @@ class StackingSuiteDialog(QDialog):
10883
11070
 
10884
11071
  tiles = _tile_grid(height, width, chunk_height, chunk_width)
10885
11072
  total_tiles_group = len(tiles)
11073
+
10886
11074
  self.update_status(self.tr(
10887
11075
  f"📦 {total_tiles_group} tiles to process for this group (chunk {chunk_height}×{chunk_width})."
10888
11076
  ))
@@ -10924,7 +11112,7 @@ class StackingSuiteDialog(QDialog):
10924
11112
  fut = tp.submit(_read_tile_into, (buf1 if use0 else buf0), ny0, ny1, nx0, nx1)
10925
11113
 
10926
11114
  pd.set_label(
10927
- f"{int(exposure_time)}s ({image_size}) [{session}] — "
11115
+ f"{int(exposure_time)}s ({image_size}) [{session}] [{temp_txt}] — "
10928
11116
  f"tile {t_idx}/{total_tiles_group} y:{y0}-{y1} x:{x0}-{x1}"
10929
11117
  )
10930
11118
 
@@ -10954,6 +11142,7 @@ class StackingSuiteDialog(QDialog):
10954
11142
 
10955
11143
  if tile_result.ndim == 2:
10956
11144
  tile_result = tile_result[:, :, None]
11145
+
10957
11146
  expected_shape = (th, tw, channels)
10958
11147
  if tile_result.shape != expected_shape:
10959
11148
  if tile_result.shape[:2] == (th, tw):
@@ -10988,37 +11177,115 @@ class StackingSuiteDialog(QDialog):
10988
11177
  pass
10989
11178
  break
10990
11179
 
11180
+ # -------------------------------------------------------------
11181
+ # Materialize final memmap to ndarray for save
11182
+ # -------------------------------------------------------------
10991
11183
  master_dark_data = np.asarray(final_stacked, dtype=np.float32)
10992
- del final_stacked
11184
+ try:
11185
+ del final_stacked
11186
+ except Exception:
11187
+ pass
10993
11188
  gc.collect()
11189
+
10994
11190
  try:
10995
11191
  os.remove(memmap_path)
10996
11192
  except Exception:
10997
11193
  pass
10998
11194
 
10999
- # Include session in output name
11000
- master_dark_stem = f"MasterDark_{session}_{int(exposure_time)}s_{image_size}"
11195
+ # -------------------------------------------------------------
11196
+ # Collect temperature stats from input dark headers
11197
+ # -------------------------------------------------------------
11198
+ temp_info = {}
11199
+ try:
11200
+ temp_info = _collect_temp_stats(file_list) or {}
11201
+ except Exception:
11202
+ temp_info = {}
11203
+
11204
+ # -------------------------------------------------------------
11205
+ # Build output filename (include session + exposure + size + temp bucket tag)
11206
+ # -------------------------------------------------------------
11207
+ temp_tag = ""
11208
+ try:
11209
+ if temp_bucket is not None:
11210
+ temp_tag = "_" + _temp_to_stem_tag(float(temp_bucket))
11211
+ elif temp_info.get("ccd_med") is not None:
11212
+ temp_tag = "_" + _temp_to_stem_tag(float(temp_info["ccd_med"]))
11213
+ elif temp_info.get("set_med") is not None:
11214
+ temp_tag = "_" + _temp_to_stem_tag(float(temp_info["set_med"]), prefix="set")
11215
+ except Exception:
11216
+ temp_tag = ""
11217
+
11218
+ master_dark_stem = f"MasterDark_{session}_{int(exposure_time)}s_{image_size}{temp_tag}"
11219
+ master_dark_stem = self._normalize_master_stem(master_dark_stem)
11001
11220
  master_dark_path = self._build_out(master_dir, master_dark_stem, "fit")
11002
11221
 
11222
+ # -------------------------------------------------------------
11223
+ # Header
11224
+ # -------------------------------------------------------------
11003
11225
  master_header = fits.Header()
11004
11226
  master_header["IMAGETYP"] = "DARK"
11005
- master_header["EXPTIME"] = (exposure_time, "User-specified or from grouping")
11006
- master_header["SESSION"] = (session, "User session tag") # optional but useful
11007
- master_header["NAXIS"] = 3 if channels == 3 else 2
11008
- master_header["NAXIS1"] = master_dark_data.shape[1]
11009
- master_header["NAXIS2"] = master_dark_data.shape[0]
11227
+ master_header["EXPTIME"] = (float(exposure_time), "Exposure time (s)")
11228
+ master_header["SESSION"] = (str(session), "User session tag")
11229
+ master_header["NCOMBINE"] = (int(N), "Number of darks combined")
11230
+ master_header["NSTACK"] = (int(N), "Alias of NCOMBINE (SetiAstro)")
11231
+
11232
+ # Temperature provenance (only write keys that exist)
11233
+ if temp_info.get("ccd_med") is not None:
11234
+ master_header["CCD-TEMP"] = (float(temp_info["ccd_med"]), "Median CCD temp of input darks (C)")
11235
+ if temp_info.get("ccd_min") is not None:
11236
+ master_header["CCDTMIN"] = (float(temp_info["ccd_min"]), "Min CCD temp in input darks (C)")
11237
+ if temp_info.get("ccd_max") is not None:
11238
+ master_header["CCDTMAX"] = (float(temp_info["ccd_max"]), "Max CCD temp in input darks (C)")
11239
+ if temp_info.get("ccd_std") is not None:
11240
+ master_header["CCDTSTD"] = (float(temp_info["ccd_std"]), "Std CCD temp in input darks (C)")
11241
+ if temp_info.get("ccd_n") is not None:
11242
+ master_header["CCDTN"] = (int(temp_info["ccd_n"]), "Count of frames with CCD-TEMP")
11243
+
11244
+ if temp_info.get("set_med") is not None:
11245
+ master_header["SET-TEMP"] = (float(temp_info["set_med"]), "Median setpoint temp of input darks (C)")
11246
+ if temp_info.get("set_min") is not None:
11247
+ master_header["SETTMIN"] = (float(temp_info["set_min"]), "Min setpoint in input darks (C)")
11248
+ if temp_info.get("set_max") is not None:
11249
+ master_header["SETTMAX"] = (float(temp_info["set_max"]), "Max setpoint in input darks (C)")
11250
+ if temp_info.get("set_std") is not None:
11251
+ master_header["SETTSTD"] = (float(temp_info["set_std"]), "Std setpoint in input darks (C)")
11252
+ if temp_info.get("set_n") is not None:
11253
+ master_header["SETTN"] = (int(temp_info["set_n"]), "Count of frames with SET-TEMP")
11254
+
11255
+ # Dimensions (save_image usually writes these, but keep your existing behavior)
11256
+ master_header["NAXIS"] = 3 if channels == 3 else 2
11257
+ master_header["NAXIS1"] = int(master_dark_data.shape[1])
11258
+ master_header["NAXIS2"] = int(master_dark_data.shape[0])
11010
11259
  if channels == 3:
11011
11260
  master_header["NAXIS3"] = 3
11012
11261
 
11013
- save_image(master_dark_data, master_dark_path, "fit", "32-bit floating point", master_header, is_mono=(channels == 1))
11262
+ save_image(
11263
+ master_dark_data,
11264
+ master_dark_path,
11265
+ "fit",
11266
+ "32-bit floating point",
11267
+ master_header,
11268
+ is_mono=(channels == 1)
11269
+ )
11014
11270
 
11015
- self.add_master_dark_to_tree(f"{exposure_time}s ({image_size}) [{session}]", master_dark_path)
11271
+ # Tree label includes temp for visibility
11272
+ tree_label = f"{exposure_time:g}s ({image_size}) [{session}]"
11273
+ if temp_info.get("ccd_med") is not None:
11274
+ tree_label += f" [CCD {float(temp_info['ccd_med']):+.1f}C]"
11275
+ elif temp_info.get("set_med") is not None:
11276
+ tree_label += f" [SET {float(temp_info['set_med']):+.1f}C]"
11277
+ elif temp_bucket is not None:
11278
+ tree_label += f" [TEMP {float(temp_bucket):+.1f}C]"
11279
+
11280
+ self.add_master_dark_to_tree(tree_label, master_dark_path)
11016
11281
  self.update_status(self.tr(f"✅ Master Dark saved: {master_dark_path}"))
11017
11282
  QApplication.processEvents()
11018
11283
 
11284
+ # Refresh assignments + persistence
11019
11285
  self.assign_best_master_files()
11020
11286
  self.save_master_paths_to_settings()
11021
11287
 
11288
+ # Post pass refresh (unchanged behavior)
11022
11289
  self.assign_best_master_dark()
11023
11290
  self.update_override_dark_combo()
11024
11291
  self.assign_best_master_files()
@@ -11031,7 +11298,6 @@ class StackingSuiteDialog(QDialog):
11031
11298
  logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
11032
11299
  pd.close()
11033
11300
 
11034
-
11035
11301
  def add_master_dark_to_tree(self, exposure_label: str, master_dark_path: str):
11036
11302
  """
11037
11303
  Adds the newly created Master Dark to the Master Dark TreeBox and updates the dropdown.
@@ -12079,6 +12345,140 @@ class StackingSuiteDialog(QDialog):
12079
12345
  master_item = QTreeWidgetItem([os.path.basename(master_flat_path)])
12080
12346
  filter_item.addChild(master_item)
12081
12347
 
12348
+ def _parse_float(self, v):
12349
+ try:
12350
+ if v is None:
12351
+ return None
12352
+ if isinstance(v, (int, float)):
12353
+ return float(v)
12354
+ s = str(v).strip()
12355
+ # handle " -10.0 C" or "-10.0C"
12356
+ s = s.replace("°", "").replace("C", "").replace("c", "").strip()
12357
+ return float(s)
12358
+ except Exception:
12359
+ return None
12360
+
12361
+
12362
+ def _read_ccd_set_temp_from_fits(self, path: str) -> tuple[float|None, float|None]:
12363
+ """Read CCD-TEMP and SET-TEMP from FITS header (primary HDU)."""
12364
+ try:
12365
+ with fits.open(path) as hdul:
12366
+ hdr = hdul[0].header
12367
+ ccd = self._parse_float(hdr.get("CCD-TEMP", None))
12368
+ st = self._parse_float(hdr.get("SET-TEMP", None))
12369
+ return ccd, st
12370
+ except Exception:
12371
+ return None, None
12372
+
12373
+
12374
+ def _temp_for_matching(self, ccd: float|None, st: float|None) -> float|None:
12375
+ """Prefer CCD-TEMP; else SET-TEMP; else None."""
12376
+ return ccd if ccd is not None else (st if st is not None else None)
12377
+
12378
+
12379
+ def _parse_masterdark_name(self, stem: str):
12380
+ """
12381
+ From filename like:
12382
+ MasterDark_Session_300s_4144x2822_m10p0C.fit
12383
+ Return dict fields; temp is optional.
12384
+ """
12385
+ out = {"session": None, "exp": None, "size": None, "temp": None}
12386
+
12387
+ base = os.path.basename(stem)
12388
+ base = os.path.splitext(base)[0]
12389
+
12390
+ # session is between MasterDark_ and _<exp>s_
12391
+ # exp is <num>s
12392
+ # size is <WxH> like 4144x2822
12393
+ m = re.match(r"^MasterDark_(?P<session>.+?)_(?P<exp>[\d._]+)s_(?P<size>\d+x\d+)(?:_(?P<temp>.*))?$", base)
12394
+ if not m:
12395
+ return out
12396
+
12397
+ out["session"] = (m.group("session") or "").strip()
12398
+ # exp might be "2_5" from _normalize_master_stem; convert back
12399
+ exp_txt = (m.group("exp") or "").replace("_", ".")
12400
+ try:
12401
+ out["exp"] = float(exp_txt)
12402
+ except Exception:
12403
+ out["exp"] = None
12404
+
12405
+ out["size"] = m.group("size")
12406
+
12407
+ # temp token like m10p0C / p5p0C / setm10p0C
12408
+ t = (m.group("temp") or "").strip()
12409
+ if t:
12410
+ # pick the first temp-ish token ending in C
12411
+ mt = re.search(r"(set)?([mp])(\d+)p(\d)C", t)
12412
+ if mt:
12413
+ sign = -1.0 if mt.group(2) == "m" else 1.0
12414
+ whole = float(mt.group(3))
12415
+ frac = float(mt.group(4)) / 10.0
12416
+ out["temp"] = sign * (whole + frac)
12417
+
12418
+ return out
12419
+
12420
+
12421
+ def _get_master_dark_meta(self, path: str) -> dict:
12422
+ """
12423
+ Cached metadata for a master dark.
12424
+ Prefers FITS header for temp; falls back to filename temp token.
12425
+ """
12426
+ if not hasattr(self, "_master_dark_meta_cache"):
12427
+ self._master_dark_meta_cache = {}
12428
+ cache = self._master_dark_meta_cache
12429
+
12430
+ p = os.path.normpath(path)
12431
+ if p in cache:
12432
+ return cache[p]
12433
+
12434
+ meta = {"path": p, "session": None, "exp": None, "size": None,
12435
+ "ccd": None, "set": None, "temp": None}
12436
+
12437
+ # filename parse (fast)
12438
+ fn = self._parse_masterdark_name(p)
12439
+ meta["session"] = fn.get("session") or None
12440
+ meta["exp"] = fn.get("exp")
12441
+ meta["size"] = fn.get("size")
12442
+ meta["temp"] = fn.get("temp")
12443
+
12444
+ # header parse (authoritative for temps)
12445
+ ccd, st = self._read_ccd_set_temp_from_fits(p)
12446
+ meta["ccd"] = ccd
12447
+ meta["set"] = st
12448
+ meta["temp"] = self._temp_for_matching(ccd, st) if (ccd is not None or st is not None) else meta["temp"]
12449
+
12450
+ # size from header if missing
12451
+ if not meta["size"]:
12452
+ try:
12453
+ with fits.open(p) as hdul:
12454
+ data = hdul[0].data
12455
+ if data is not None:
12456
+ meta["size"] = f"{data.shape[1]}x{data.shape[0]}"
12457
+ except Exception:
12458
+ pass
12459
+
12460
+ cache[p] = meta
12461
+ return meta
12462
+
12463
+
12464
+ def _get_light_temp(self, light_path: str) -> tuple[float|None, float|None, float|None]:
12465
+ """Return (ccd, set, chosen) with caching."""
12466
+ if not hasattr(self, "_light_temp_cache"):
12467
+ self._light_temp_cache = {}
12468
+ cache = self._light_temp_cache
12469
+
12470
+ p = os.path.normpath(light_path or "")
12471
+ if not p:
12472
+ return None, None, None
12473
+ if p in cache:
12474
+ return cache[p]
12475
+
12476
+ ccd, st = self._read_ccd_set_temp_from_fits(p)
12477
+ chosen = self._temp_for_matching(ccd, st)
12478
+ cache[p] = (ccd, st, chosen)
12479
+ return cache[p]
12480
+
12481
+
12082
12482
  def assign_best_master_files(self, fill_only: bool = True):
12083
12483
  """
12084
12484
  Assign best matching Master Dark and Flat to each Light leaf.
@@ -12138,32 +12538,57 @@ class StackingSuiteDialog(QDialog):
12138
12538
  if fill_only and curr_dark and curr_dark.lower() != "none":
12139
12539
  dark_choice = curr_dark
12140
12540
  else:
12141
- # 3) Auto-pick by size+closest exposure
12142
- best_dark_match = None
12143
- best_dark_diff = float("inf")
12144
- for master_key, master_path in self.master_files.items():
12145
- dmatch = re.match(r"^([\d.]+)s\b", master_key) # darks start with "<exp>s"
12146
- if not dmatch:
12541
+ # 3) Auto-pick by size + closest exposure + closest temperature (and prefer same session)
12542
+ light_path = leaf_item.data(0, Qt.ItemDataRole.UserRole)
12543
+ l_ccd, l_set, l_temp = self._get_light_temp(light_path)
12544
+
12545
+ best_path = None
12546
+ best_score = None
12547
+
12548
+ for mk, mp in (self.master_files or {}).items():
12549
+ if not mp:
12147
12550
  continue
12148
- master_dark_exposure_time = float(dmatch.group(1))
12149
12551
 
12150
- # Ensure size known/cached
12151
- md_size = master_sizes.get(master_path)
12152
- if not md_size:
12153
- try:
12154
- with fits.open(master_path) as hdul:
12155
- md_size = f"{hdul[0].data.shape[1]}x{hdul[0].data.shape[0]}"
12156
- except Exception:
12157
- md_size = "Unknown"
12158
- master_sizes[master_path] = md_size
12552
+ bn = os.path.basename(mp)
12553
+ # Only consider MasterDark_* files (cheap gate)
12554
+ if not bn.startswith("MasterDark_"):
12555
+ continue
12556
+
12557
+ md = self._get_master_dark_meta(mp)
12558
+ md_size = md.get("size") or "Unknown"
12559
+ if md_size != image_size:
12560
+ continue
12561
+
12562
+ md_exp = md.get("exp")
12563
+ if md_exp is None:
12564
+ continue
12565
+
12566
+ # exposure closeness
12567
+ exp_diff = abs(float(md_exp) - float(exposure_time))
12568
+
12569
+ # session preference: exact match beats mismatch
12570
+ md_sess = (md.get("session") or "Default").strip()
12571
+ sess_mismatch = 0 if md_sess == session_name else 1
12572
+
12573
+ # temperature closeness (if both known)
12574
+ md_temp = md.get("temp")
12575
+ if (l_temp is not None) and (md_temp is not None):
12576
+ temp_diff = abs(float(md_temp) - float(l_temp))
12577
+ temp_unknown = 0
12578
+ else:
12579
+ # if light has temp but dark doesn't (or vice versa), penalize
12580
+ temp_diff = 9999.0
12581
+ temp_unknown = 1
12582
+
12583
+ # Score tuple: lower is better
12584
+ # Priority: session match -> exposure diff -> temp availability -> temp diff
12585
+ score = (sess_mismatch, exp_diff, temp_unknown, temp_diff)
12159
12586
 
12160
- if md_size == image_size:
12161
- diff = abs(master_dark_exposure_time - exposure_time)
12162
- if diff < best_dark_diff:
12163
- best_dark_diff = diff
12164
- best_dark_match = master_path
12587
+ if best_score is None or score < best_score:
12588
+ best_score = score
12589
+ best_path = mp
12165
12590
 
12166
- dark_choice = os.path.basename(best_dark_match) if best_dark_match else ("None" if not curr_dark else curr_dark)
12591
+ dark_choice = os.path.basename(best_path) if best_path else ("None" if not curr_dark else curr_dark)
12167
12592
 
12168
12593
  # ---------- FLAT RESOLUTION ----------
12169
12594
  flat_key_full = f"{filter_name_raw} - {exposure_text}"
@@ -16328,6 +16753,10 @@ class StackingSuiteDialog(QDialog):
16328
16753
  hdr_orig["CREATOR"] = "SetiAstroSuite"
16329
16754
  hdr_orig["DATE-OBS"] = datetime.utcnow().isoformat()
16330
16755
 
16756
+ n_frames_group = len(file_list)
16757
+ hdr_orig["NCOMBINE"] = (int(n_frames_group), "Number of frames combined")
16758
+ hdr_orig["NSTACK"] = (int(n_frames_group), "Alias of NCOMBINE (SetiAstro)")
16759
+
16331
16760
  is_mono_orig = (integrated_image.ndim == 2)
16332
16761
  if is_mono_orig:
16333
16762
  hdr_orig["NAXIS"] = 2
@@ -16447,6 +16876,8 @@ class StackingSuiteDialog(QDialog):
16447
16876
  scale=1.0,
16448
16877
  rect_override=group_rect if group_rect is not None else global_rect
16449
16878
  )
16879
+ hdr_crop["NCOMBINE"] = (int(n_frames_group), "Number of frames combined")
16880
+ hdr_crop["NSTACK"] = (int(n_frames_group), "Alias of NCOMBINE (SetiAstro)")
16450
16881
  is_mono_crop = (cropped_img.ndim == 2)
16451
16882
  Hc, Wc = (cropped_img.shape[:2] if cropped_img.ndim >= 2 else (H, W))
16452
16883
  display_group_crop = self._label_with_dims(group_key, Wc, Hc)
@@ -16590,6 +17021,12 @@ class StackingSuiteDialog(QDialog):
16590
17021
  algo_override=COMET_ALGO # << comet-friendly reducer
16591
17022
  )
16592
17023
 
17024
+ n_usable = int(len(usable))
17025
+ ref_header_c = ref_header_c or ref_header or fits.Header()
17026
+ ref_header_c["NCOMBINE"] = (n_usable, "Number of frames combined (comet)")
17027
+ ref_header_c["NSTACK"] = (n_usable, "Alias of NCOMBINE (SetiAstro)")
17028
+ ref_header_c["COMETFR"] = (n_usable, "Frames used for comet-aligned stack")
17029
+
16593
17030
  # Save CometOnly
16594
17031
  Hc, Wc = comet_only.shape[:2]
16595
17032
  display_group_c = self._label_with_dims(group_key, Wc, Hc)
@@ -16614,6 +17051,10 @@ class StackingSuiteDialog(QDialog):
16614
17051
  scale=1.0,
16615
17052
  rect_override=group_rect if group_rect is not None else global_rect
16616
17053
  )
17054
+ comet_only_crop, hdr_c_crop = self._apply_autocrop(...)
17055
+ hdr_c_crop["NCOMBINE"] = (n_usable, "Number of frames combined (comet)")
17056
+ hdr_c_crop["NSTACK"] = (n_usable, "Alias of NCOMBINE (SetiAstro)")
17057
+ hdr_c_crop["COMETFR"] = (n_usable, "Frames used for comet-aligned stack")
16617
17058
  Hcc, Wcc = comet_only_crop.shape[:2]
16618
17059
  display_group_cc = self._label_with_dims(group_key, Wcc, Hcc)
16619
17060
  comet_path_crop = self._build_out(
@@ -17201,246 +17642,6 @@ class StackingSuiteDialog(QDialog):
17201
17642
  views[p] = np.load(npy, mmap_mode="r") # returns numpy.memmap
17202
17643
  return views
17203
17644
 
17204
-
17205
- def stack_registered_images_chunked(
17206
- self,
17207
- grouped_files,
17208
- frame_weights,
17209
- chunk_height=2048,
17210
- chunk_width=2048
17211
- ):
17212
- self.update_status(self.tr(f"✅ Chunked stacking {len(grouped_files)} group(s)..."))
17213
- QApplication.processEvents()
17214
-
17215
- all_rejection_coords = []
17216
-
17217
- for group_key, file_list in grouped_files.items():
17218
- num_files = len(file_list)
17219
- self.update_status(self.tr(f"📊 Group '{group_key}' has {num_files} aligned file(s)."))
17220
- QApplication.processEvents()
17221
- if num_files < 2:
17222
- self.update_status(self.tr(f"⚠️ Group '{group_key}' does not have enough frames to stack."))
17223
- continue
17224
-
17225
- # Reference shape/header (unchanged)
17226
- ref_file = file_list[0]
17227
- if not os.path.exists(ref_file):
17228
- self.update_status(self.tr(f"⚠️ Reference file '{ref_file}' not found, skipping group."))
17229
- continue
17230
-
17231
- ref_data, ref_header, _, _ = load_image(ref_file)
17232
- if ref_data is None:
17233
- self.update_status(self.tr(f"⚠️ Could not load reference '{ref_file}', skipping group."))
17234
- continue
17235
-
17236
- is_color = (ref_data.ndim == 3 and ref_data.shape[2] == 3)
17237
- height, width = ref_data.shape[:2]
17238
- channels = 3 if is_color else 1
17239
-
17240
- # Final output memmap (unchanged)
17241
- memmap_path = self._build_out(self.stacking_directory, f"chunked_{group_key}", "dat")
17242
- final_stacked = np.memmap(memmap_path, dtype=np.float32, mode='w+', shape=(height, width, channels))
17243
-
17244
- # Valid files + weights
17245
- aligned_paths, weights_list = [], []
17246
- for fpath in file_list:
17247
- if os.path.exists(fpath):
17248
- aligned_paths.append(fpath)
17249
- weights_list.append(frame_weights.get(fpath, 1.0))
17250
- else:
17251
- self.update_status(self.tr(f"⚠️ File not found: {fpath}, skipping."))
17252
- if len(aligned_paths) < 2:
17253
- self.update_status(self.tr(f"⚠️ Not enough valid frames in group '{group_key}' to stack."))
17254
- continue
17255
-
17256
- weights_list = np.array(weights_list, dtype=np.float32)
17257
-
17258
- # ⬇️ NEW: open read-only memmaps for all aligned frames (float32 [0..1], HxWxC)
17259
- mm_views = self._open_memmaps_readonly(aligned_paths)
17260
-
17261
- self.update_status(self.tr(f"📊 Stacking group '{group_key}' with {self.rejection_algorithm}"))
17262
- QApplication.processEvents()
17263
-
17264
- rejection_coords = []
17265
- N = len(aligned_paths)
17266
- DTYPE = self._dtype()
17267
- pref_h = self.chunk_height
17268
- pref_w = self.chunk_width
17269
-
17270
- try:
17271
- chunk_h, chunk_w = compute_safe_chunk(height, width, N, channels, DTYPE, pref_h, pref_w)
17272
- self.update_status(self.tr(f"🔧 Using chunk size {chunk_h}×{chunk_w} for {self._dtype()}"))
17273
- except MemoryError as e:
17274
- self.update_status(self.tr(f"⚠️ {e}"))
17275
- return None, {}, None
17276
-
17277
- # Tile loop (same structure, but tile loading reads from memmaps)
17278
- from concurrent.futures import ThreadPoolExecutor, as_completed
17279
- LOADER_WORKERS = min(max(2, (os.cpu_count() or 4) // 2), 8) # tuned for memory bw
17280
-
17281
- for y_start in range(0, height, chunk_h):
17282
- y_end = min(y_start + chunk_h, height)
17283
- tile_h = y_end - y_start
17284
-
17285
- for x_start in range(0, width, chunk_w):
17286
- x_end = min(x_start + chunk_w, width)
17287
- tile_w = x_end - x_start
17288
-
17289
- # Preallocate tile stack
17290
- tile_stack = np.empty((N, tile_h, tile_w, channels), dtype=np.float32)
17291
-
17292
- # ⬇️ NEW: fill tile_stack from the memmaps (parallel copy)
17293
- def _copy_one(i, path):
17294
- v = mm_views[path][y_start:y_end, x_start:x_end] # view on disk
17295
- if v.ndim == 2:
17296
- # mono memmap stored as (H,W,1); but if legacy mono npy exists as (H,W),
17297
- # make it (H,W,1) here:
17298
- vv = v[..., None]
17299
- else:
17300
- vv = v
17301
- if vv.shape[2] == 1 and channels == 3:
17302
- vv = np.repeat(vv, 3, axis=2)
17303
- tile_stack[i] = vv
17304
-
17305
- with ThreadPoolExecutor(max_workers=LOADER_WORKERS) as exe:
17306
- futs = {exe.submit(_copy_one, i, p): i for i, p in enumerate(aligned_paths)}
17307
- for _ in as_completed(futs):
17308
- pass
17309
-
17310
- # Rejection (unchanged – uses your Numba kernels)
17311
- algo = self.rejection_algorithm
17312
- if algo == "Simple Median (No Rejection)":
17313
- tile_result = np.median(tile_stack, axis=0)
17314
- tile_rej_map = np.zeros(tile_stack.shape[1:3], dtype=np.bool_)
17315
- elif algo == "Simple Average (No Rejection)":
17316
- tile_result = np.average(tile_stack, axis=0, weights=weights_list)
17317
- tile_rej_map = np.zeros(tile_stack.shape[1:3], dtype=np.bool_)
17318
- elif algo == "Weighted Windsorized Sigma Clipping":
17319
- tile_result, tile_rej_map = windsorized_sigma_clip_weighted(
17320
- tile_stack, weights_list, lower=self.sigma_low, upper=self.sigma_high
17321
- )
17322
- elif algo == "Kappa-Sigma Clipping":
17323
- tile_result, tile_rej_map = kappa_sigma_clip_weighted(
17324
- tile_stack, weights_list, kappa=self.kappa, iterations=self.iterations
17325
- )
17326
- elif algo == "Trimmed Mean":
17327
- tile_result, tile_rej_map = trimmed_mean_weighted(
17328
- tile_stack, weights_list, trim_fraction=self.trim_fraction
17329
- )
17330
- elif algo == "Extreme Studentized Deviate (ESD)":
17331
- tile_result, tile_rej_map = esd_clip_weighted(
17332
- tile_stack, weights_list, threshold=self.esd_threshold
17333
- )
17334
- elif algo == "Biweight Estimator":
17335
- tile_result, tile_rej_map = biweight_location_weighted(
17336
- tile_stack, weights_list, tuning_constant=self.biweight_constant
17337
- )
17338
- elif algo == "Modified Z-Score Clipping":
17339
- tile_result, tile_rej_map = modified_zscore_clip_weighted(
17340
- tile_stack, weights_list, threshold=self.modz_threshold
17341
- )
17342
- elif algo == "Max Value":
17343
- tile_result, tile_rej_map = max_value_stack(
17344
- tile_stack, weights_list
17345
- )
17346
- else:
17347
- tile_result, tile_rej_map = windsorized_sigma_clip_weighted(
17348
- tile_stack, weights_list, lower=self.sigma_low, upper=self.sigma_high
17349
- )
17350
-
17351
- # Ensure tile_result has correct shape
17352
- if tile_result.ndim == 2:
17353
- tile_result = tile_result[:, :, None]
17354
- expected_shape = (tile_h, tile_w, channels)
17355
- if tile_result.shape != expected_shape:
17356
- if tile_result.shape[2] == 0:
17357
- tile_result = np.zeros(expected_shape, dtype=np.float32)
17358
- elif tile_result.shape[:2] == (tile_h, tile_w):
17359
- if tile_result.shape[2] > channels:
17360
- tile_result = tile_result[:, :, :channels]
17361
- else:
17362
- tile_result = np.repeat(tile_result, channels, axis=2)[:, :, :channels]
17363
-
17364
- # Commit tile
17365
- final_stacked[y_start:y_end, x_start:x_end, :] = tile_result
17366
-
17367
- # Collect per-tile rejection coords (unchanged logic)
17368
- if tile_rej_map.ndim == 3: # (N, tile_h, tile_w)
17369
- combined_rej = np.any(tile_rej_map, axis=0)
17370
- elif tile_rej_map.ndim == 4: # (N, tile_h, tile_w, C)
17371
- combined_rej = np.any(tile_rej_map, axis=0)
17372
- combined_rej = np.any(combined_rej, axis=-1)
17373
- else:
17374
- combined_rej = np.zeros((tile_h, tile_w), dtype=np.bool_)
17375
-
17376
- ys_tile, xs_tile = np.where(combined_rej)
17377
- for dy, dx in zip(ys_tile, xs_tile):
17378
- rejection_coords.append((x_start + dx, y_start + dy))
17379
-
17380
- # Finish/save (unchanged from your version) …
17381
- final_array = np.array(final_stacked)
17382
- del final_stacked
17383
-
17384
- final_array = self._normalize_stack_01(final_array)
17385
-
17386
- if final_array.ndim == 3 and final_array.shape[-1] == 1:
17387
- final_array = final_array[..., 0]
17388
- is_mono = (final_array.ndim == 2)
17389
-
17390
- if ref_header is None:
17391
- ref_header = fits.Header()
17392
- ref_header["IMAGETYP"] = "MASTER STACK"
17393
- ref_header["BITPIX"] = -32
17394
- ref_header["STACKED"] = (True, "Stacked using chunked approach")
17395
- ref_header["CREATOR"] = "SetiAstroSuite"
17396
- ref_header["DATE-OBS"] = datetime.utcnow().isoformat()
17397
- if is_mono:
17398
- ref_header["NAXIS"] = 2
17399
- ref_header["NAXIS1"] = final_array.shape[1]
17400
- ref_header["NAXIS2"] = final_array.shape[0]
17401
- if "NAXIS3" in ref_header: del ref_header["NAXIS3"]
17402
- else:
17403
- ref_header["NAXIS"] = 3
17404
- ref_header["NAXIS1"] = final_array.shape[1]
17405
- ref_header["NAXIS2"] = final_array.shape[0]
17406
- ref_header["NAXIS3"] = 3
17407
-
17408
- output_stem = f"MasterLight_{group_key}_{len(aligned_paths)}stacked"
17409
- output_path = self._build_out(self.stacking_directory, output_stem, "fit")
17410
-
17411
- save_image(
17412
- img_array=final_array,
17413
- filename=output_path,
17414
- original_format="fit",
17415
- bit_depth="32-bit floating point",
17416
- original_header=ref_header,
17417
- is_mono=is_mono
17418
- )
17419
-
17420
- self.update_status(self.tr(f"✅ Group '{group_key}' stacked {len(aligned_paths)} frame(s)! Saved: {output_path}"))
17421
-
17422
- print(f"✅ Master Light saved for group '{group_key}': {output_path}")
17423
-
17424
- # Optionally, you might want to store or log 'rejection_coords' (here appended to all_rejection_coords)
17425
- all_rejection_coords.extend(rejection_coords)
17426
-
17427
- # Clean up memmap file
17428
- try:
17429
- os.remove(memmap_path)
17430
- except OSError:
17431
- pass
17432
-
17433
- QMessageBox.information(
17434
- self,
17435
- "Stacking Complete",
17436
- f"All stacking finished successfully.\n"
17437
- f"Frames per group:\n" +
17438
- "\n".join([f"{group_key}: {len(files)} frame(s)" for group_key, files in grouped_files.items()])
17439
- )
17440
-
17441
- # Optionally, you could return the global rejection coordinate list.
17442
- return all_rejection_coords
17443
-
17444
17645
  def _start_after_align_worker(self, aligned_light_files: dict[str, list[str]]):
17445
17646
  # Snapshot UI settings
17446
17647
  if getattr(self, "_suppress_normal_integration_once", False):
@@ -18455,6 +18656,10 @@ class StackingSuiteDialog(QDialog):
18455
18656
  hdr_orig["CREATOR"] = "SetiAstroSuite"
18456
18657
  hdr_orig["DATE-OBS"] = datetime.utcnow().isoformat()
18457
18658
 
18659
+ n_frames = int(len(file_list))
18660
+ hdr_orig["NCOMBINE"] = (n_frames, "Number of frames combined")
18661
+ hdr_orig["NSTACK"] = (n_frames, "Alias of NCOMBINE (SetiAstro)")
18662
+
18458
18663
  if final_drizzle.ndim == 2:
18459
18664
  hdr_orig["NAXIS"] = 2
18460
18665
  hdr_orig["NAXIS1"] = final_drizzle.shape[1]
@@ -18484,10 +18689,12 @@ class StackingSuiteDialog(QDialog):
18484
18689
  cropped_drizzle, hdr_crop = self._apply_autocrop(
18485
18690
  final_drizzle,
18486
18691
  file_list,
18487
- hdr.copy() if hdr is not None else fits.Header(),
18692
+ hdr_orig.copy(),
18488
18693
  scale=float(scale_factor),
18489
18694
  rect_override=rect_override
18490
18695
  )
18696
+ hdr_crop["NCOMBINE"] = (n_frames, "Number of frames combined")
18697
+ hdr_crop["NSTACK"] = (n_frames, "Alias of NCOMBINE (SetiAstro)")
18491
18698
  is_mono_crop = (cropped_drizzle.ndim == 2)
18492
18699
  display_group_driz_crop = self._label_with_dims(group_key, cropped_drizzle.shape[1], cropped_drizzle.shape[0])
18493
18700
  base_crop = f"MasterLight_{display_group_driz_crop}_{len(file_list)}stacked_drizzle_autocrop"