setiastrosuitepro 1.6.1.post1__py3-none-any.whl → 1.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- setiastro/images/Background_startup.jpg +0 -0
- setiastro/qml/ResourceMonitor.qml +126 -0
- setiastro/saspro/__main__.py +162 -25
- setiastro/saspro/_generated/build_info.py +2 -1
- setiastro/saspro/abe.py +62 -11
- setiastro/saspro/aberration_ai.py +3 -3
- setiastro/saspro/add_stars.py +5 -2
- setiastro/saspro/astrobin_exporter.py +3 -0
- setiastro/saspro/astrospike_python.py +3 -1
- setiastro/saspro/autostretch.py +4 -2
- setiastro/saspro/backgroundneutral.py +52 -10
- setiastro/saspro/batch_convert.py +3 -0
- setiastro/saspro/batch_renamer.py +3 -0
- setiastro/saspro/blemish_blaster.py +3 -0
- setiastro/saspro/cheat_sheet.py +50 -15
- setiastro/saspro/clahe.py +27 -1
- setiastro/saspro/comet_stacking.py +103 -38
- setiastro/saspro/convo.py +3 -0
- setiastro/saspro/copyastro.py +3 -0
- setiastro/saspro/cosmicclarity.py +70 -45
- setiastro/saspro/crop_dialog_pro.py +17 -0
- setiastro/saspro/curve_editor_pro.py +18 -0
- setiastro/saspro/debayer.py +3 -0
- setiastro/saspro/doc_manager.py +39 -16
- setiastro/saspro/fitsmodifier.py +3 -0
- setiastro/saspro/frequency_separation.py +8 -2
- setiastro/saspro/function_bundle.py +2 -0
- setiastro/saspro/generate_translations.py +715 -1
- setiastro/saspro/ghs_dialog_pro.py +3 -0
- setiastro/saspro/graxpert.py +3 -0
- setiastro/saspro/gui/main_window.py +272 -29
- setiastro/saspro/gui/mixins/dock_mixin.py +100 -1
- setiastro/saspro/gui/mixins/file_mixin.py +7 -0
- setiastro/saspro/gui/mixins/menu_mixin.py +28 -0
- setiastro/saspro/gui/statistics_dialog.py +47 -0
- setiastro/saspro/halobgon.py +29 -3
- setiastro/saspro/histogram.py +3 -0
- setiastro/saspro/history_explorer.py +2 -0
- setiastro/saspro/i18n.py +22 -10
- setiastro/saspro/image_combine.py +3 -0
- setiastro/saspro/image_peeker_pro.py +3 -0
- setiastro/saspro/imageops/stretch.py +5 -13
- setiastro/saspro/isophote.py +3 -0
- setiastro/saspro/legacy/numba_utils.py +64 -47
- setiastro/saspro/linear_fit.py +3 -0
- setiastro/saspro/live_stacking.py +13 -2
- setiastro/saspro/mask_creation.py +3 -0
- setiastro/saspro/mfdeconv.py +5 -0
- setiastro/saspro/morphology.py +30 -5
- setiastro/saspro/multiscale_decomp.py +3 -0
- setiastro/saspro/nbtorgb_stars.py +12 -2
- setiastro/saspro/numba_utils.py +148 -47
- setiastro/saspro/ops/scripts.py +77 -17
- setiastro/saspro/ops/settings.py +1 -43
- setiastro/saspro/perfect_palette_picker.py +1 -0
- setiastro/saspro/pixelmath.py +6 -2
- setiastro/saspro/plate_solver.py +1 -0
- setiastro/saspro/remove_green.py +18 -1
- setiastro/saspro/remove_stars.py +136 -162
- setiastro/saspro/resources.py +7 -0
- setiastro/saspro/rgb_combination.py +1 -0
- setiastro/saspro/rgbalign.py +4 -4
- setiastro/saspro/save_options.py +1 -0
- setiastro/saspro/sfcc.py +50 -8
- setiastro/saspro/signature_insert.py +3 -0
- setiastro/saspro/stacking_suite.py +630 -341
- setiastro/saspro/star_alignment.py +16 -1
- setiastro/saspro/star_spikes.py +116 -32
- setiastro/saspro/star_stretch.py +38 -1
- setiastro/saspro/stat_stretch.py +35 -3
- setiastro/saspro/subwindow.py +63 -2
- setiastro/saspro/supernovaasteroidhunter.py +3 -0
- setiastro/saspro/translations/all_source_strings.json +3654 -0
- setiastro/saspro/translations/ar_translations.py +3865 -0
- setiastro/saspro/translations/de_translations.py +16 -0
- setiastro/saspro/translations/es_translations.py +16 -0
- setiastro/saspro/translations/fr_translations.py +16 -0
- setiastro/saspro/translations/hi_translations.py +3571 -0
- setiastro/saspro/translations/integrate_translations.py +36 -0
- setiastro/saspro/translations/it_translations.py +16 -0
- setiastro/saspro/translations/ja_translations.py +16 -0
- setiastro/saspro/translations/pt_translations.py +16 -0
- setiastro/saspro/translations/ru_translations.py +2848 -0
- setiastro/saspro/translations/saspro_ar.qm +0 -0
- setiastro/saspro/translations/saspro_ar.ts +255 -0
- setiastro/saspro/translations/saspro_de.qm +0 -0
- setiastro/saspro/translations/saspro_de.ts +3 -3
- setiastro/saspro/translations/saspro_es.qm +0 -0
- setiastro/saspro/translations/saspro_es.ts +3 -3
- setiastro/saspro/translations/saspro_fr.qm +0 -0
- setiastro/saspro/translations/saspro_fr.ts +3 -3
- setiastro/saspro/translations/saspro_hi.qm +0 -0
- setiastro/saspro/translations/saspro_hi.ts +257 -0
- setiastro/saspro/translations/saspro_it.qm +0 -0
- setiastro/saspro/translations/saspro_it.ts +3 -3
- setiastro/saspro/translations/saspro_ja.qm +0 -0
- setiastro/saspro/translations/saspro_ja.ts +4 -4
- setiastro/saspro/translations/saspro_pt.qm +0 -0
- setiastro/saspro/translations/saspro_pt.ts +3 -3
- setiastro/saspro/translations/saspro_ru.qm +0 -0
- setiastro/saspro/translations/saspro_ru.ts +237 -0
- setiastro/saspro/translations/saspro_sw.qm +0 -0
- setiastro/saspro/translations/saspro_sw.ts +257 -0
- setiastro/saspro/translations/saspro_uk.qm +0 -0
- setiastro/saspro/translations/saspro_uk.ts +10771 -0
- setiastro/saspro/translations/saspro_zh.qm +0 -0
- setiastro/saspro/translations/saspro_zh.ts +3 -3
- setiastro/saspro/translations/sw_translations.py +3671 -0
- setiastro/saspro/translations/uk_translations.py +3700 -0
- setiastro/saspro/translations/zh_translations.py +16 -0
- setiastro/saspro/versioning.py +36 -5
- setiastro/saspro/view_bundle.py +3 -0
- setiastro/saspro/wavescale_hdr.py +22 -1
- setiastro/saspro/wavescalede.py +23 -1
- setiastro/saspro/whitebalance.py +39 -3
- setiastro/saspro/widgets/minigame/game.js +986 -0
- setiastro/saspro/widgets/minigame/index.html +53 -0
- setiastro/saspro/widgets/minigame/style.css +241 -0
- setiastro/saspro/widgets/resource_monitor.py +237 -0
- setiastro/saspro/widgets/wavelet_utils.py +52 -20
- setiastro/saspro/wimi.py +35 -15
- {setiastrosuitepro-1.6.1.post1.dist-info → setiastrosuitepro-1.6.2.dist-info}/METADATA +15 -4
- {setiastrosuitepro-1.6.1.post1.dist-info → setiastrosuitepro-1.6.2.dist-info}/RECORD +127 -104
- {setiastrosuitepro-1.6.1.post1.dist-info → setiastrosuitepro-1.6.2.dist-info}/WHEEL +0 -0
- {setiastrosuitepro-1.6.1.post1.dist-info → setiastrosuitepro-1.6.2.dist-info}/entry_points.txt +0 -0
- {setiastrosuitepro-1.6.1.post1.dist-info → setiastrosuitepro-1.6.2.dist-info}/licenses/LICENSE +0 -0
- {setiastrosuitepro-1.6.1.post1.dist-info → setiastrosuitepro-1.6.2.dist-info}/licenses/license.txt +0 -0
|
@@ -83,11 +83,12 @@ from setiastro.saspro.legacy.numba_utils import (
|
|
|
83
83
|
finalize_drizzle_2d,
|
|
84
84
|
finalize_drizzle_3d,
|
|
85
85
|
)
|
|
86
|
-
from setiastro.saspro.
|
|
86
|
+
from setiastro.saspro.numba_utils import (
|
|
87
87
|
bulk_cosmetic_correction_numba,
|
|
88
88
|
drizzle_deposit_numba_naive,
|
|
89
89
|
drizzle_deposit_color_naive,
|
|
90
|
-
bulk_cosmetic_correction_bayer
|
|
90
|
+
bulk_cosmetic_correction_bayer,
|
|
91
|
+
gradient_descent_to_dim_spot_numba
|
|
91
92
|
)
|
|
92
93
|
from setiastro.saspro.legacy.image_manager import load_image, save_image, get_valid_header
|
|
93
94
|
from setiastro.saspro.star_alignment import StarRegistrationWorker, StarRegistrationThread, IDENTITY_2x3
|
|
@@ -704,12 +705,33 @@ def normalize_images(stack: np.ndarray,
|
|
|
704
705
|
print(f"Normalizing {i}")
|
|
705
706
|
f = stack[i].astype(np.float32, copy=False)
|
|
706
707
|
L = _L(f)
|
|
708
|
+
|
|
709
|
+
# Optimization: Don't allocate f0 and L0. Use math properties.
|
|
710
|
+
# fmin = min(L)
|
|
711
|
+
# f0 = f - fmin
|
|
712
|
+
# L0 = L(f0) = L(f - fmin) = L(f) - fmin (since L is linear sum of channels)
|
|
713
|
+
# median(L0) = median(L - fmin) = median(L) - fmin
|
|
714
|
+
|
|
715
|
+
# Calculate stats on original L
|
|
716
|
+
# Note: nanmin/nanmedian are used to be safe against bad pixels
|
|
707
717
|
fmin = float(np.nanmin(L))
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
718
|
+
lmed_original = float(np.nanmedian(L))
|
|
719
|
+
|
|
720
|
+
# The median of the zero-shifted image
|
|
721
|
+
fmed = lmed_original - fmin
|
|
722
|
+
|
|
711
723
|
gain = (target_median / max(fmed, eps)) if target_median > 0 else 1.0
|
|
712
|
-
|
|
724
|
+
|
|
725
|
+
# Combine subtraction and multiplication into one operation for 'out'
|
|
726
|
+
# out = (f - fmin) * gain
|
|
727
|
+
# This avoids creating the large temporary array 'f0'
|
|
728
|
+
|
|
729
|
+
# We can implement this as: out[i] = f * gain - (fmin * gain)
|
|
730
|
+
# But we must be careful with precision. Typically fine.
|
|
731
|
+
# Or just: np.subtract(f, fmin, out=out[i]); np.multiply(out[i], gain, out=out[i])
|
|
732
|
+
|
|
733
|
+
# Using direct assignment is cleaner and numpy optimizes it well enough
|
|
734
|
+
out[i] = (f - fmin) * gain
|
|
713
735
|
|
|
714
736
|
return np.ascontiguousarray(out, dtype=np.float32)
|
|
715
737
|
|
|
@@ -864,6 +886,11 @@ def _to_Luma(img: np.ndarray) -> np.ndarray:
|
|
|
864
886
|
if img.ndim == 2:
|
|
865
887
|
return img.astype(np.float32, copy=False)
|
|
866
888
|
# HWC RGB
|
|
889
|
+
if img.shape[2] == 3:
|
|
890
|
+
try:
|
|
891
|
+
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY).astype(np.float32, copy=False)
|
|
892
|
+
except Exception:
|
|
893
|
+
pass # fallback
|
|
867
894
|
r, g, b = img[..., 0].astype(np.float32), img[..., 1].astype(np.float32), img[..., 2].astype(np.float32)
|
|
868
895
|
return 0.2989 * r + 0.5870 * g + 0.1140 * b
|
|
869
896
|
|
|
@@ -898,27 +925,8 @@ def _exclude_bright_regions(gray_small: np.ndarray, exclusion_fraction: float =
|
|
|
898
925
|
|
|
899
926
|
|
|
900
927
|
def _gradient_descent_to_dim_spot(gray_small: np.ndarray, x: int, y: int, patch: int) -> tuple[int, int]:
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
def patch_median(px, py):
|
|
904
|
-
x0, x1 = max(0, px - half), min(W, px + half + 1)
|
|
905
|
-
y0, y1 = max(0, py - half), min(H, py + half + 1)
|
|
906
|
-
return float(np.median(gray_small[y0:y1, x0:x1]))
|
|
907
|
-
cx, cy = int(np.clip(x, 0, W-1)), int(np.clip(y, 0, H-1))
|
|
908
|
-
for _ in range(60):
|
|
909
|
-
cur = patch_median(cx, cy)
|
|
910
|
-
best = (cx, cy); best_val = cur
|
|
911
|
-
for nx in (cx-1, cx, cx+1):
|
|
912
|
-
for ny in (cy-1, cy, cy+1):
|
|
913
|
-
if nx == cx and ny == cy: continue
|
|
914
|
-
if nx < 0 or ny < 0 or nx >= W or ny >= H: continue
|
|
915
|
-
val = patch_median(nx, ny)
|
|
916
|
-
if val < best_val:
|
|
917
|
-
best_val = val; best = (nx, ny)
|
|
918
|
-
if best == (cx, cy):
|
|
919
|
-
break
|
|
920
|
-
cx, cy = best
|
|
921
|
-
return cx, cy
|
|
928
|
+
# Delegate to Numba optimized version
|
|
929
|
+
return gradient_descent_to_dim_spot_numba(gray_small, int(x), int(y), int(patch))
|
|
922
930
|
|
|
923
931
|
def _generate_sample_points_small(
|
|
924
932
|
img_small: np.ndarray,
|
|
@@ -6340,7 +6348,7 @@ class StackingSuiteDialog(QDialog):
|
|
|
6340
6348
|
dark_frames_layout.addLayout(btn_layout)
|
|
6341
6349
|
|
|
6342
6350
|
self.clear_dark_selection_btn = QPushButton(self.tr("Clear Selection"))
|
|
6343
|
-
self.clear_dark_selection_btn.clicked.connect(lambda: self.
|
|
6351
|
+
self.clear_dark_selection_btn.clicked.connect(lambda: self.clear_tree_selection_dark(self.dark_tree, self.dark_files))
|
|
6344
6352
|
dark_frames_layout.addWidget(self.clear_dark_selection_btn)
|
|
6345
6353
|
|
|
6346
6354
|
darks_layout.addLayout(dark_frames_layout, 2) # Dark Frames Tree takes more space
|
|
@@ -6413,6 +6421,9 @@ class StackingSuiteDialog(QDialog):
|
|
|
6413
6421
|
)
|
|
6414
6422
|
main_layout.addWidget(self.clear_master_dark_selection_btn)
|
|
6415
6423
|
|
|
6424
|
+
self.dark_tree.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)
|
|
6425
|
+
self.dark_tree.customContextMenuRequested.connect(self.dark_tree_context_menu)
|
|
6426
|
+
|
|
6416
6427
|
return tab
|
|
6417
6428
|
|
|
6418
6429
|
def _tree_for_type(self, t: str):
|
|
@@ -6644,6 +6655,23 @@ class StackingSuiteDialog(QDialog):
|
|
|
6644
6655
|
|
|
6645
6656
|
return tab
|
|
6646
6657
|
|
|
6658
|
+
def dark_tree_context_menu(self, pos):
|
|
6659
|
+
item = self.dark_tree.itemAt(pos)
|
|
6660
|
+
if not item:
|
|
6661
|
+
return
|
|
6662
|
+
|
|
6663
|
+
# ✅ same selection behavior
|
|
6664
|
+
if not item.isSelected():
|
|
6665
|
+
if not (QApplication.keyboardModifiers() & (Qt.KeyboardModifier.ControlModifier | Qt.KeyboardModifier.ShiftModifier)):
|
|
6666
|
+
self.dark_tree.clearSelection()
|
|
6667
|
+
item.setSelected(True)
|
|
6668
|
+
|
|
6669
|
+
menu = QMenu(self.dark_tree)
|
|
6670
|
+
set_session_action = menu.addAction(self.tr("Set Session Tag..."))
|
|
6671
|
+
|
|
6672
|
+
action = menu.exec(self.dark_tree.viewport().mapToGlobal(pos))
|
|
6673
|
+
if action == set_session_action:
|
|
6674
|
+
self.prompt_set_session(item, "DARK")
|
|
6647
6675
|
|
|
6648
6676
|
|
|
6649
6677
|
def flat_tree_context_menu(self, pos):
|
|
@@ -6938,27 +6966,27 @@ class StackingSuiteDialog(QDialog):
|
|
|
6938
6966
|
name = (self.settings.value("stacking/session_keyword", "Default", type=str) or "").strip()
|
|
6939
6967
|
return name or "Default"
|
|
6940
6968
|
|
|
6941
|
-
def
|
|
6942
|
-
|
|
6969
|
+
def _is_leaf(it):
|
|
6970
|
+
# leaf == no children AND looks like a file row (has a filename)
|
|
6971
|
+
if not it or it.childCount() != 0:
|
|
6972
|
+
return False
|
|
6973
|
+
name = (it.text(0) or "").strip()
|
|
6974
|
+
# file rows in your UI are actual filenames
|
|
6975
|
+
return bool(name) and "." in name
|
|
6976
|
+
|
|
6943
6977
|
|
|
6944
6978
|
def _iter_leaf_descendants(self, it: QTreeWidgetItem):
|
|
6945
|
-
"""Yield all leaf grandchildren under a filter row or exposure row."""
|
|
6946
6979
|
if not it:
|
|
6947
6980
|
return
|
|
6948
|
-
|
|
6949
|
-
|
|
6950
|
-
|
|
6951
|
-
|
|
6952
|
-
|
|
6953
|
-
|
|
6954
|
-
|
|
6955
|
-
|
|
6956
|
-
|
|
6957
|
-
elif it.parent() and it.parent().parent() is None and it.childCount() > 0:
|
|
6958
|
-
for k in range(it.childCount()):
|
|
6959
|
-
leaf = it.child(k)
|
|
6960
|
-
if self._is_leaf_item(leaf):
|
|
6961
|
-
yield leaf
|
|
6981
|
+
stack = [it]
|
|
6982
|
+
while stack:
|
|
6983
|
+
cur = stack.pop()
|
|
6984
|
+
if self._is_leaf_item(cur):
|
|
6985
|
+
yield cur
|
|
6986
|
+
continue
|
|
6987
|
+
for i in range(cur.childCount()):
|
|
6988
|
+
stack.append(cur.child(i))
|
|
6989
|
+
|
|
6962
6990
|
|
|
6963
6991
|
def _collect_target_leaves(self, tree: QTreeWidget, clicked_item: QTreeWidgetItem | None = None) -> list[QTreeWidgetItem]:
|
|
6964
6992
|
"""
|
|
@@ -7074,43 +7102,59 @@ class StackingSuiteDialog(QDialog):
|
|
|
7074
7102
|
return kw
|
|
7075
7103
|
|
|
7076
7104
|
def prompt_set_session(self, item, frame_type):
|
|
7077
|
-
text, ok = QInputDialog.getText(
|
|
7105
|
+
text, ok = QInputDialog.getText(
|
|
7106
|
+
self,
|
|
7107
|
+
self.tr("Set Session Tag"),
|
|
7108
|
+
self.tr("Enter session name:")
|
|
7109
|
+
)
|
|
7078
7110
|
if not (ok and (text or "").strip()):
|
|
7079
7111
|
return
|
|
7080
7112
|
session_name = text.strip()
|
|
7081
7113
|
|
|
7082
|
-
|
|
7083
|
-
|
|
7084
|
-
|
|
7114
|
+
ft = (frame_type or "").upper()
|
|
7115
|
+
is_flat = (ft == "FLAT")
|
|
7116
|
+
is_light = (ft == "LIGHT")
|
|
7117
|
+
is_dark = (ft == "DARK")
|
|
7118
|
+
|
|
7119
|
+
if is_flat:
|
|
7120
|
+
tree = self.flat_tree
|
|
7121
|
+
target_dict = self.flat_files
|
|
7122
|
+
elif is_light:
|
|
7123
|
+
tree = self.light_tree
|
|
7124
|
+
target_dict = self.light_files
|
|
7125
|
+
elif is_dark:
|
|
7126
|
+
tree = self.dark_tree
|
|
7127
|
+
target_dict = self.dark_files
|
|
7128
|
+
else:
|
|
7129
|
+
return
|
|
7085
7130
|
|
|
7086
7131
|
if not hasattr(self, "session_tags") or self.session_tags is None:
|
|
7087
7132
|
self.session_tags = {}
|
|
7088
7133
|
|
|
7089
7134
|
# --- helper: identify a "leaf" row in your tree (file row) ---
|
|
7090
7135
|
def _is_leaf(it):
|
|
7091
|
-
|
|
7136
|
+
# leaf == no children AND looks like a file row (has a filename)
|
|
7137
|
+
if not it or it.childCount() != 0:
|
|
7138
|
+
return False
|
|
7139
|
+
name = (it.text(0) or "").strip()
|
|
7140
|
+
# file rows in your UI are actual filenames
|
|
7141
|
+
return bool(name) and "." in name
|
|
7142
|
+
|
|
7092
7143
|
|
|
7093
7144
|
def _iter_leaf_descendants(parent_item):
|
|
7094
|
-
"""Yield all leaf file rows under
|
|
7145
|
+
"""Yield all leaf file rows under any parent row (any depth)."""
|
|
7095
7146
|
if not parent_item:
|
|
7096
7147
|
return
|
|
7097
|
-
|
|
7098
|
-
|
|
7099
|
-
|
|
7100
|
-
|
|
7101
|
-
|
|
7102
|
-
|
|
7103
|
-
|
|
7104
|
-
|
|
7105
|
-
# exposure row
|
|
7106
|
-
elif parent_item.parent() and parent_item.parent().parent() is None and parent_item.childCount() > 0:
|
|
7107
|
-
for k in range(parent_item.childCount()):
|
|
7108
|
-
leaf = parent_item.child(k)
|
|
7109
|
-
if _is_leaf(leaf):
|
|
7110
|
-
yield leaf
|
|
7148
|
+
stack = [parent_item]
|
|
7149
|
+
while stack:
|
|
7150
|
+
cur = stack.pop()
|
|
7151
|
+
if _is_leaf(cur):
|
|
7152
|
+
yield cur
|
|
7153
|
+
continue
|
|
7154
|
+
for j in range(cur.childCount()):
|
|
7155
|
+
stack.append(cur.child(j))
|
|
7111
7156
|
|
|
7112
7157
|
def _session_from_leaf(leaf):
|
|
7113
|
-
# Prefer cached value (we’ll set it during ingest/retag)
|
|
7114
7158
|
try:
|
|
7115
7159
|
s = leaf.data(0, Qt.ItemDataRole.UserRole + 1)
|
|
7116
7160
|
if isinstance(s, str) and s.strip():
|
|
@@ -7134,42 +7178,65 @@ class StackingSuiteDialog(QDialog):
|
|
|
7134
7178
|
except Exception:
|
|
7135
7179
|
pass
|
|
7136
7180
|
|
|
7137
|
-
def _rekey_session_for_path(
|
|
7181
|
+
def _rekey_session_for_path(target_dict: dict, fpath: str, new_session: str, *, group_key_hint: str | None = None):
|
|
7138
7182
|
"""
|
|
7139
|
-
Move fpath from (group_key, old_session)
|
|
7140
|
-
|
|
7183
|
+
Move fpath from whatever (group_key, old_session) bucket(s) it's currently in
|
|
7184
|
+
to (same_group_key, new_session).
|
|
7185
|
+
|
|
7186
|
+
This is robust even if the tree-derived group_key string doesn't exactly match
|
|
7187
|
+
the dict key[0] that was used when the file was added.
|
|
7141
7188
|
"""
|
|
7142
|
-
|
|
7143
|
-
|
|
7144
|
-
|
|
7145
|
-
|
|
7146
|
-
|
|
7147
|
-
|
|
7148
|
-
|
|
7149
|
-
|
|
7150
|
-
|
|
7151
|
-
|
|
7152
|
-
|
|
7153
|
-
|
|
7154
|
-
|
|
7155
|
-
|
|
7156
|
-
|
|
7157
|
-
|
|
7158
|
-
|
|
7159
|
-
|
|
7160
|
-
|
|
7161
|
-
|
|
7162
|
-
|
|
7189
|
+
new_session = (new_session or "Default").strip() or "Default"
|
|
7190
|
+
f_norm = os.path.normcase(os.path.abspath(fpath))
|
|
7191
|
+
|
|
7192
|
+
# 1) Find all tuple-keys containing this file, regardless of session
|
|
7193
|
+
found_group_keys: list[str] = []
|
|
7194
|
+
keys_to_delete = []
|
|
7195
|
+
|
|
7196
|
+
for key, lst in list(target_dict.items()):
|
|
7197
|
+
if not (isinstance(key, tuple) and len(key) >= 2):
|
|
7198
|
+
continue
|
|
7199
|
+
|
|
7200
|
+
# check if file exists in this bucket
|
|
7201
|
+
keep = []
|
|
7202
|
+
removed_here = False
|
|
7203
|
+
for p in (lst or []):
|
|
7204
|
+
if os.path.normcase(os.path.abspath(p)) == f_norm:
|
|
7205
|
+
removed_here = True
|
|
7206
|
+
else:
|
|
7207
|
+
keep.append(p)
|
|
7208
|
+
|
|
7209
|
+
if removed_here:
|
|
7210
|
+
gk = str(key[0])
|
|
7211
|
+
if gk not in found_group_keys:
|
|
7212
|
+
found_group_keys.append(gk)
|
|
7213
|
+
|
|
7214
|
+
# write back / delete empty
|
|
7215
|
+
if removed_here:
|
|
7216
|
+
if keep:
|
|
7217
|
+
target_dict[key] = keep
|
|
7218
|
+
else:
|
|
7219
|
+
keys_to_delete.append(key)
|
|
7220
|
+
|
|
7221
|
+
for k in keys_to_delete:
|
|
7222
|
+
target_dict.pop(k, None)
|
|
7223
|
+
|
|
7224
|
+
# If not found anywhere (rare), fall back to hint so at least it gets added
|
|
7225
|
+
if not found_group_keys and group_key_hint:
|
|
7226
|
+
found_group_keys = [group_key_hint]
|
|
7227
|
+
|
|
7228
|
+
# 2) Add to new-session bucket(s)
|
|
7229
|
+
for gk in found_group_keys:
|
|
7230
|
+
new_key = (gk, new_session)
|
|
7231
|
+
cur = list(target_dict.get(new_key, []) or [])
|
|
7232
|
+
cur_norms = {os.path.normcase(os.path.abspath(p)) for p in cur}
|
|
7233
|
+
if f_norm not in cur_norms:
|
|
7234
|
+
cur.append(fpath)
|
|
7235
|
+
target_dict[new_key] = cur
|
|
7163
7236
|
|
|
7164
|
-
# add to new key (avoid dupes)
|
|
7165
|
-
target_dict.setdefault(new_ck, [])
|
|
7166
|
-
if fpath not in target_dict[new_ck]:
|
|
7167
|
-
target_dict[new_ck].append(fpath)
|
|
7168
7237
|
|
|
7169
7238
|
# --- Build the set of leaf rows to retag ---
|
|
7170
7239
|
selected = list(tree.selectedItems() or [])
|
|
7171
|
-
|
|
7172
|
-
# Include the right-clicked item even if it wasn’t selected
|
|
7173
7240
|
if item and item not in selected:
|
|
7174
7241
|
selected.append(item)
|
|
7175
7242
|
|
|
@@ -7203,35 +7270,64 @@ class StackingSuiteDialog(QDialog):
|
|
|
7203
7270
|
# fallback once for legacy rows missing UserRole
|
|
7204
7271
|
if not fpath:
|
|
7205
7272
|
filename = leaf.text(0).lstrip("⚠️ ").strip()
|
|
7206
|
-
|
|
7207
|
-
|
|
7208
|
-
|
|
7209
|
-
|
|
7210
|
-
|
|
7273
|
+
# NOTE: this only works for tuple-keyed dicts; that's fine for flats/lights
|
|
7274
|
+
try:
|
|
7275
|
+
fpath = next(
|
|
7276
|
+
(p for (gk, sess), lst in target_dict.items() for p in (lst or [])
|
|
7277
|
+
if os.path.basename(p) == filename),
|
|
7278
|
+
None
|
|
7279
|
+
)
|
|
7280
|
+
except Exception:
|
|
7281
|
+
fpath = None
|
|
7211
7282
|
if fpath:
|
|
7212
7283
|
leaf.setData(0, Qt.ItemDataRole.UserRole, fpath)
|
|
7213
7284
|
|
|
7214
7285
|
if not fpath:
|
|
7215
7286
|
continue
|
|
7216
7287
|
|
|
7217
|
-
|
|
7218
|
-
|
|
7219
|
-
|
|
7220
|
-
if
|
|
7288
|
+
parent = leaf.parent()
|
|
7289
|
+
grand = parent.parent() if parent else None
|
|
7290
|
+
|
|
7291
|
+
if parent is None:
|
|
7221
7292
|
continue
|
|
7222
7293
|
|
|
7223
|
-
|
|
7224
|
-
|
|
7294
|
+
if is_dark:
|
|
7295
|
+
# DARK tree is 2-level: group -> file
|
|
7296
|
+
group_key = parent.text(0)
|
|
7225
7297
|
|
|
7226
|
-
|
|
7227
|
-
|
|
7298
|
+
elif is_flat:
|
|
7299
|
+
# FLAT tree is (typically) 3-level: filter -> group -> file
|
|
7300
|
+
# Your create_master_flat groups by EXACT string: "{filter} - {group}"
|
|
7301
|
+
# where group is like "Unknown (4096x4096)" (what the middle node shows).
|
|
7302
|
+
if grand is None:
|
|
7303
|
+
# If your flat tree is actually 2-level in some configs, fall back safely
|
|
7304
|
+
group_key = parent.text(0)
|
|
7305
|
+
else:
|
|
7306
|
+
group_key = f"{grand.text(0)} - {parent.text(0)}"
|
|
7228
7307
|
|
|
7308
|
+
elif is_light:
|
|
7309
|
+
# LIGHT is 3-level: filter -> exposure -> file
|
|
7310
|
+
if grand is None:
|
|
7311
|
+
continue
|
|
7312
|
+
group_key = f"{grand.text(0)} - {parent.text(0)}"
|
|
7313
|
+
else:
|
|
7314
|
+
continue
|
|
7315
|
+
|
|
7316
|
+
|
|
7317
|
+
# We still compute group_key for a fallback hint, but removal is now bucket-scan based.
|
|
7318
|
+
if _session_from_leaf(leaf) != session_name:
|
|
7319
|
+
_rekey_session_for_path(target_dict, fpath, session_name, group_key_hint=group_key)
|
|
7320
|
+
|
|
7321
|
+
|
|
7322
|
+
# Tag always updates UI + cache
|
|
7229
7323
|
self.session_tags[fpath] = session_name
|
|
7230
7324
|
_set_leaf_session_text(leaf, session_name)
|
|
7231
7325
|
changed += 1
|
|
7232
7326
|
|
|
7233
|
-
|
|
7234
|
-
|
|
7327
|
+
self._normalize_sessioned_files_map(target_dict)
|
|
7328
|
+
|
|
7329
|
+
# Only LIGHT needs reassignment of best master files
|
|
7330
|
+
if is_light:
|
|
7235
7331
|
try:
|
|
7236
7332
|
self.assign_best_master_files(fill_only=True)
|
|
7237
7333
|
except Exception:
|
|
@@ -7240,7 +7336,6 @@ class StackingSuiteDialog(QDialog):
|
|
|
7240
7336
|
tree.viewport().update()
|
|
7241
7337
|
self.update_status(self.tr(f"🟢 Assigned session '{session_name}' to {changed} file(s)."))
|
|
7242
7338
|
|
|
7243
|
-
|
|
7244
7339
|
def _quad_coverage_add(self, cov: np.ndarray, quad: np.ndarray):
|
|
7245
7340
|
"""
|
|
7246
7341
|
Rasterize a convex quad (4x2 float array of (x,y) in aligned coords) into 'cov' by +1 filling.
|
|
@@ -8467,6 +8562,28 @@ class StackingSuiteDialog(QDialog):
|
|
|
8467
8562
|
self.settings.setValue("stacking/master_darks", dark_paths)
|
|
8468
8563
|
self.settings.setValue("stacking/master_flats", flat_paths)
|
|
8469
8564
|
|
|
8565
|
+
def _purge_removed_paths(self, removed_paths: list[str]):
|
|
8566
|
+
if not removed_paths:
|
|
8567
|
+
return
|
|
8568
|
+
# purge session override cache
|
|
8569
|
+
if hasattr(self, "session_tags") and isinstance(self.session_tags, dict):
|
|
8570
|
+
for p in removed_paths:
|
|
8571
|
+
self.session_tags.pop(p, None)
|
|
8572
|
+
|
|
8573
|
+
# If you have any "ingested" caches, clear those too:
|
|
8574
|
+
if hasattr(self, "_ingested_paths") and isinstance(self._ingested_paths, set):
|
|
8575
|
+
for p in removed_paths:
|
|
8576
|
+
self._ingested_paths.discard(p)
|
|
8577
|
+
|
|
8578
|
+
if hasattr(self, "manual_flat_files") and isinstance(self.manual_flat_files, list):
|
|
8579
|
+
dead = {os.path.normcase(os.path.abspath(p)) for p in removed_paths if isinstance(p, str)}
|
|
8580
|
+
self.manual_flat_files = [p for p in self.manual_flat_files if os.path.normcase(os.path.abspath(p)) not in dead]
|
|
8581
|
+
|
|
8582
|
+
if hasattr(self, "manual_light_files") and isinstance(self.manual_light_files, list):
|
|
8583
|
+
dead = {os.path.normcase(os.path.abspath(p)) for p in removed_paths if isinstance(p, str)}
|
|
8584
|
+
self.manual_light_files = [p for p in self.manual_light_files if os.path.normcase(os.path.abspath(p)) not in dead]
|
|
8585
|
+
|
|
8586
|
+
|
|
8470
8587
|
def clear_tree_selection(self, tree, file_dict):
|
|
8471
8588
|
"""Clears selected items from a simple (non-tuple-keyed) tree like Master Darks or Darks tab."""
|
|
8472
8589
|
selected_items = tree.selectedItems()
|
|
@@ -8491,96 +8608,249 @@ class StackingSuiteDialog(QDialog):
|
|
|
8491
8608
|
del file_dict[key]
|
|
8492
8609
|
parent.removeChild(item)
|
|
8493
8610
|
|
|
8494
|
-
|
|
8495
|
-
def clear_tree_selection_light(self, tree):
|
|
8496
|
-
"""Clears the selection in the light tree and updates self.light_files accordingly."""
|
|
8611
|
+
def clear_tree_selection_dark(self, tree, file_dict):
|
|
8497
8612
|
selected_items = tree.selectedItems()
|
|
8498
8613
|
if not selected_items:
|
|
8499
8614
|
return
|
|
8500
8615
|
|
|
8616
|
+
removed_paths = []
|
|
8617
|
+
|
|
8501
8618
|
for item in selected_items:
|
|
8502
8619
|
parent = item.parent()
|
|
8620
|
+
|
|
8503
8621
|
if parent is None:
|
|
8504
|
-
#
|
|
8505
|
-
|
|
8506
|
-
|
|
8507
|
-
|
|
8508
|
-
|
|
8509
|
-
for
|
|
8510
|
-
|
|
8622
|
+
# top-level exposure group
|
|
8623
|
+
gk = item.text(0)
|
|
8624
|
+
|
|
8625
|
+
# remove ALL sessions for this exposure group
|
|
8626
|
+
keys_to_remove = []
|
|
8627
|
+
for k in list(file_dict.keys()):
|
|
8628
|
+
if isinstance(k, tuple) and len(k) >= 2:
|
|
8629
|
+
if str(k[0]) == gk:
|
|
8630
|
+
keys_to_remove.append(k)
|
|
8631
|
+
else:
|
|
8632
|
+
if str(k) == gk:
|
|
8633
|
+
keys_to_remove.append(k)
|
|
8634
|
+
|
|
8635
|
+
for k in keys_to_remove:
|
|
8636
|
+
for p in file_dict.get(k, []) or []:
|
|
8637
|
+
removed_paths.append(p)
|
|
8638
|
+
del file_dict[k]
|
|
8639
|
+
|
|
8511
8640
|
tree.takeTopLevelItem(tree.indexOfTopLevelItem(item))
|
|
8512
|
-
|
|
8513
|
-
|
|
8514
|
-
|
|
8515
|
-
|
|
8516
|
-
|
|
8517
|
-
|
|
8518
|
-
|
|
8519
|
-
|
|
8520
|
-
|
|
8521
|
-
|
|
8522
|
-
|
|
8641
|
+
continue
|
|
8642
|
+
|
|
8643
|
+
# leaf file node under exposure group
|
|
8644
|
+
gk = parent.text(0)
|
|
8645
|
+
fpath = item.data(0, Qt.ItemDataRole.UserRole)
|
|
8646
|
+
filename = item.text(0).lstrip("⚠️ ").strip()
|
|
8647
|
+
|
|
8648
|
+
keys_to_check = []
|
|
8649
|
+
for k in list(file_dict.keys()):
|
|
8650
|
+
if isinstance(k, tuple) and len(k) >= 2:
|
|
8651
|
+
if str(k[0]) == gk:
|
|
8652
|
+
keys_to_check.append(k)
|
|
8523
8653
|
else:
|
|
8524
|
-
|
|
8525
|
-
|
|
8526
|
-
|
|
8527
|
-
|
|
8528
|
-
|
|
8654
|
+
if str(k) == gk:
|
|
8655
|
+
keys_to_check.append(k)
|
|
8656
|
+
|
|
8657
|
+
for k in keys_to_check:
|
|
8658
|
+
lst = file_dict.get(k, []) or []
|
|
8659
|
+
new_lst = []
|
|
8660
|
+
for p in lst:
|
|
8661
|
+
if fpath and p == fpath:
|
|
8662
|
+
removed_paths.append(p)
|
|
8663
|
+
continue
|
|
8664
|
+
if (not fpath) and os.path.basename(p) == filename:
|
|
8665
|
+
removed_paths.append(p)
|
|
8666
|
+
continue
|
|
8667
|
+
new_lst.append(p)
|
|
8668
|
+
if new_lst:
|
|
8669
|
+
file_dict[k] = new_lst
|
|
8670
|
+
else:
|
|
8671
|
+
del file_dict[k]
|
|
8672
|
+
|
|
8673
|
+
parent.removeChild(item)
|
|
8674
|
+
|
|
8675
|
+
self._purge_removed_paths(removed_paths)
|
|
8676
|
+
|
|
8677
|
+
# normalize if sessioned (or if legacy)
|
|
8678
|
+
self._normalize_sessioned_files_map(file_dict)
|
|
8679
|
+
|
|
8680
|
+
def clear_tree_selection_light(self, tree):
|
|
8681
|
+
selected_items = tree.selectedItems()
|
|
8682
|
+
if not selected_items:
|
|
8683
|
+
return
|
|
8529
8684
|
|
|
8530
|
-
|
|
8531
|
-
if isinstance(key, tuple) and key[0] == group_key]
|
|
8685
|
+
removed_paths = []
|
|
8532
8686
|
|
|
8533
|
-
|
|
8534
|
-
|
|
8535
|
-
f for f in self.light_files[key] if os.path.basename(f) != filename
|
|
8536
|
-
]
|
|
8537
|
-
if not self.light_files[key]:
|
|
8538
|
-
del self.light_files[key]
|
|
8539
|
-
parent.removeChild(item)
|
|
8687
|
+
def _norm(p: str) -> str:
|
|
8688
|
+
return os.path.normcase(os.path.abspath(p))
|
|
8540
8689
|
|
|
8541
|
-
|
|
8690
|
+
def _remove_path_everywhere(fpath: str):
|
|
8691
|
+
if not fpath:
|
|
8692
|
+
return
|
|
8693
|
+
f_norm = _norm(fpath)
|
|
8694
|
+
keys_to_delete = []
|
|
8695
|
+
for k, lst in list(self.light_files.items()):
|
|
8696
|
+
if not (isinstance(k, tuple) and len(k) >= 2):
|
|
8697
|
+
continue
|
|
8698
|
+
keep = []
|
|
8699
|
+
removed = False
|
|
8700
|
+
for p in (lst or []):
|
|
8701
|
+
if _norm(p) == f_norm:
|
|
8702
|
+
removed = True
|
|
8703
|
+
else:
|
|
8704
|
+
keep.append(p)
|
|
8705
|
+
if removed:
|
|
8706
|
+
removed_paths.append(fpath)
|
|
8707
|
+
if keep:
|
|
8708
|
+
self.light_files[k] = keep
|
|
8709
|
+
else:
|
|
8710
|
+
keys_to_delete.append(k)
|
|
8711
|
+
for k in keys_to_delete:
|
|
8712
|
+
self.light_files.pop(k, None)
|
|
8713
|
+
|
|
8714
|
+
def _collect_leaf_paths_under(node):
|
|
8715
|
+
out = []
|
|
8716
|
+
stack = [node]
|
|
8717
|
+
while stack:
|
|
8718
|
+
cur = stack.pop()
|
|
8719
|
+
if cur.childCount() == 0:
|
|
8720
|
+
fp = cur.data(0, Qt.ItemDataRole.UserRole)
|
|
8721
|
+
if isinstance(fp, str) and fp.strip():
|
|
8722
|
+
out.append(fp)
|
|
8723
|
+
continue
|
|
8724
|
+
for j in range(cur.childCount()):
|
|
8725
|
+
stack.append(cur.child(j))
|
|
8726
|
+
return out
|
|
8727
|
+
|
|
8728
|
+
for item in selected_items:
|
|
8729
|
+
parent = item.parent()
|
|
8730
|
+
|
|
8731
|
+
if parent is None:
|
|
8732
|
+
for fp in _collect_leaf_paths_under(item):
|
|
8733
|
+
_remove_path_everywhere(fp)
|
|
8734
|
+
idx = tree.indexOfTopLevelItem(item)
|
|
8735
|
+
if idx >= 0:
|
|
8736
|
+
tree.takeTopLevelItem(idx)
|
|
8737
|
+
continue
|
|
8738
|
+
|
|
8739
|
+
for fp in _collect_leaf_paths_under(item):
|
|
8740
|
+
_remove_path_everywhere(fp)
|
|
8741
|
+
|
|
8742
|
+
parent.removeChild(item)
|
|
8743
|
+
|
|
8744
|
+
self._purge_removed_paths(removed_paths)
|
|
8745
|
+
self._normalize_sessioned_files_map(self.light_files)
|
|
8746
|
+
|
|
8747
|
+
try:
|
|
8748
|
+
self.rebuild_light_tree()
|
|
8749
|
+
except Exception:
|
|
8750
|
+
try:
|
|
8751
|
+
self._refresh_light_tree_summaries()
|
|
8752
|
+
except Exception:
|
|
8753
|
+
pass
|
|
8754
|
+
|
|
8542
8755
|
|
|
8543
8756
|
def clear_tree_selection_flat(self, tree, file_dict):
|
|
8544
|
-
"""
|
|
8757
|
+
"""
|
|
8758
|
+
Clears selection in FLATS tree and removes from (group_key, session)->[paths].
|
|
8759
|
+
|
|
8760
|
+
Works for BOTH layouts:
|
|
8761
|
+
- 2-level: group -> file leaves (current rebuild_flat_tree)
|
|
8762
|
+
- 3-level: filter -> exposure -> file leaves (older layout)
|
|
8763
|
+
"""
|
|
8545
8764
|
selected_items = tree.selectedItems()
|
|
8546
8765
|
if not selected_items:
|
|
8547
8766
|
return
|
|
8548
8767
|
|
|
8768
|
+
removed_paths = []
|
|
8769
|
+
|
|
8770
|
+
def _norm(p: str) -> str:
|
|
8771
|
+
return os.path.normcase(os.path.abspath(p))
|
|
8772
|
+
|
|
8773
|
+
def _remove_path_everywhere(fpath: str):
|
|
8774
|
+
"""Remove fpath from ALL buckets in file_dict (robust against group_key mismatches)."""
|
|
8775
|
+
if not fpath:
|
|
8776
|
+
return
|
|
8777
|
+
f_norm = _norm(fpath)
|
|
8778
|
+
|
|
8779
|
+
keys_to_delete = []
|
|
8780
|
+
for k, lst in list(file_dict.items()):
|
|
8781
|
+
if not (isinstance(k, tuple) and len(k) >= 2):
|
|
8782
|
+
continue
|
|
8783
|
+
keep = []
|
|
8784
|
+
removed = False
|
|
8785
|
+
for p in (lst or []):
|
|
8786
|
+
if _norm(p) == f_norm:
|
|
8787
|
+
removed = True
|
|
8788
|
+
else:
|
|
8789
|
+
keep.append(p)
|
|
8790
|
+
|
|
8791
|
+
if removed:
|
|
8792
|
+
removed_paths.append(fpath)
|
|
8793
|
+
if keep:
|
|
8794
|
+
file_dict[k] = keep
|
|
8795
|
+
else:
|
|
8796
|
+
keys_to_delete.append(k)
|
|
8797
|
+
|
|
8798
|
+
for k in keys_to_delete:
|
|
8799
|
+
file_dict.pop(k, None)
|
|
8800
|
+
|
|
8801
|
+
def _collect_leaf_paths_under(node):
|
|
8802
|
+
"""Return all descendant leaf file paths under a node (supports group nodes)."""
|
|
8803
|
+
out = []
|
|
8804
|
+
stack = [node]
|
|
8805
|
+
while stack:
|
|
8806
|
+
cur = stack.pop()
|
|
8807
|
+
if cur.childCount() == 0:
|
|
8808
|
+
fp = cur.data(0, Qt.ItemDataRole.UserRole)
|
|
8809
|
+
if isinstance(fp, str) and fp.strip():
|
|
8810
|
+
out.append(fp)
|
|
8811
|
+
continue
|
|
8812
|
+
for j in range(cur.childCount()):
|
|
8813
|
+
stack.append(cur.child(j))
|
|
8814
|
+
return out
|
|
8815
|
+
|
|
8816
|
+
# We’ll delete dict entries by file paths (most robust), then rebuild UI.
|
|
8549
8817
|
for item in selected_items:
|
|
8550
8818
|
parent = item.parent()
|
|
8551
8819
|
|
|
8552
|
-
if parent:
|
|
8553
|
-
#
|
|
8554
|
-
|
|
8555
|
-
|
|
8556
|
-
|
|
8557
|
-
group_key = f"{filter_name} - {exposure_text}"
|
|
8558
|
-
else:
|
|
8559
|
-
# Exposure level
|
|
8560
|
-
filter_name = parent.text(0)
|
|
8561
|
-
exposure_text = item.text(0)
|
|
8562
|
-
group_key = f"{filter_name} - {exposure_text}"
|
|
8820
|
+
if parent is None:
|
|
8821
|
+
# Selected a top-level node (either "group" in 2-level, or "filter" in 3-level).
|
|
8822
|
+
# Remove every leaf path under it from the dict.
|
|
8823
|
+
for fp in _collect_leaf_paths_under(item):
|
|
8824
|
+
_remove_path_everywhere(fp)
|
|
8563
8825
|
|
|
8564
|
-
|
|
8826
|
+
# Remove UI node
|
|
8827
|
+
idx = tree.indexOfTopLevelItem(item)
|
|
8828
|
+
if idx >= 0:
|
|
8829
|
+
tree.takeTopLevelItem(idx)
|
|
8830
|
+
continue
|
|
8565
8831
|
|
|
8566
|
-
|
|
8567
|
-
|
|
8568
|
-
|
|
8832
|
+
# Selected a leaf or mid-level node; remove all descendant leaf paths
|
|
8833
|
+
for fp in _collect_leaf_paths_under(item):
|
|
8834
|
+
_remove_path_everywhere(fp)
|
|
8835
|
+
|
|
8836
|
+
# Remove UI node
|
|
8837
|
+
parent.removeChild(item)
|
|
8838
|
+
|
|
8839
|
+
# purge caches + normalize
|
|
8840
|
+
self._purge_removed_paths(removed_paths)
|
|
8841
|
+
self._normalize_sessioned_files_map(file_dict)
|
|
8842
|
+
|
|
8843
|
+
# Rebuild from dict (this ensures UI reflects the dict truth)
|
|
8844
|
+
try:
|
|
8845
|
+
self.rebuild_flat_tree()
|
|
8846
|
+
except Exception:
|
|
8847
|
+
# If you really don't want rebuild here, at least:
|
|
8848
|
+
try:
|
|
8849
|
+
self._refresh_flat_tree_summaries()
|
|
8850
|
+
except Exception:
|
|
8851
|
+
pass
|
|
8569
8852
|
|
|
8570
|
-
for key in keys_to_check:
|
|
8571
|
-
file_dict[key] = [f for f in file_dict[key] if os.path.basename(f) != filename]
|
|
8572
|
-
if not file_dict[key]:
|
|
8573
|
-
del file_dict[key]
|
|
8574
8853
|
|
|
8575
|
-
parent.removeChild(item)
|
|
8576
|
-
else:
|
|
8577
|
-
# Top-level (filter group) selected
|
|
8578
|
-
filter_name = item.text(0)
|
|
8579
|
-
keys_to_remove = [key for key in list(file_dict.keys())
|
|
8580
|
-
if isinstance(key, tuple) and key[0].startswith(f"{filter_name} - ")]
|
|
8581
|
-
for key in keys_to_remove:
|
|
8582
|
-
del file_dict[key]
|
|
8583
|
-
tree.takeTopLevelItem(tree.indexOfTopLevelItem(item))
|
|
8584
8854
|
|
|
8585
8855
|
def _sync_group_userrole(self, top_item: QTreeWidgetItem):
|
|
8586
8856
|
paths = []
|
|
@@ -8657,19 +8927,19 @@ class StackingSuiteDialog(QDialog):
|
|
|
8657
8927
|
# Keep parent's stored list in sync (your helper)
|
|
8658
8928
|
self._sync_group_userrole(parent)
|
|
8659
8929
|
|
|
8660
|
-
#
|
|
8661
|
-
|
|
8930
|
+
# --- DO NOT persist exclusions for manual removals in reg tab ---
|
|
8931
|
+
# If you want a separate "Exclude" feature later, keep _reg_excluded_files for that.
|
|
8932
|
+
# For now, removing should be reversible via "Add Light Files".
|
|
8662
8933
|
|
|
8663
|
-
#
|
|
8664
|
-
for p in removed_paths:
|
|
8665
|
-
if p not in self.deleted_calibrated_files:
|
|
8666
|
-
self.deleted_calibrated_files.append(p)
|
|
8667
|
-
|
|
8668
|
-
# Also prune manual list so it doesn't re-inject removed files
|
|
8934
|
+
# Also prune manual list so it doesn't re-inject removed files *in this session*
|
|
8669
8935
|
if hasattr(self, "manual_light_files") and self.manual_light_files:
|
|
8670
|
-
|
|
8936
|
+
dead = {os.path.normcase(os.path.abspath(p)) for p in removed_paths if isinstance(p, str)}
|
|
8937
|
+
self.manual_light_files = [
|
|
8938
|
+
p for p in self.manual_light_files
|
|
8939
|
+
if os.path.normcase(os.path.abspath(p)) not in dead
|
|
8940
|
+
]
|
|
8671
8941
|
|
|
8672
|
-
#
|
|
8942
|
+
# refresh UI
|
|
8673
8943
|
self.populate_calibrated_lights()
|
|
8674
8944
|
self._refresh_reg_tree_summaries()
|
|
8675
8945
|
|
|
@@ -9186,6 +9456,58 @@ class StackingSuiteDialog(QDialog):
|
|
|
9186
9456
|
self.add_directory(self.light_tree, "Select Light Directory", "LIGHT")
|
|
9187
9457
|
self.assign_best_master_files()
|
|
9188
9458
|
|
|
9459
|
+
def _normalize_sessioned_files_map(self, files_map: dict):
|
|
9460
|
+
"""
|
|
9461
|
+
Canonicalize dict that should be keyed like: (group_key, session) -> [paths]
|
|
9462
|
+
|
|
9463
|
+
- Drops empty lists
|
|
9464
|
+
- Dedupe paths
|
|
9465
|
+
- Coerces keys to (str, str)
|
|
9466
|
+
"""
|
|
9467
|
+
if not isinstance(files_map, dict):
|
|
9468
|
+
return
|
|
9469
|
+
|
|
9470
|
+
new_map = {}
|
|
9471
|
+
for k, lst in list(files_map.items()):
|
|
9472
|
+
if not lst:
|
|
9473
|
+
continue
|
|
9474
|
+
|
|
9475
|
+
# Coerce key to (group_key, session)
|
|
9476
|
+
if isinstance(k, tuple) and len(k) >= 2:
|
|
9477
|
+
gk = str(k[0])
|
|
9478
|
+
sess = str(k[1])
|
|
9479
|
+
else:
|
|
9480
|
+
# legacy/no-session dict; keep but force Default
|
|
9481
|
+
gk = str(k)
|
|
9482
|
+
sess = "Default"
|
|
9483
|
+
|
|
9484
|
+
# Deduplicate paths while preserving order
|
|
9485
|
+
seen = set()
|
|
9486
|
+
out = []
|
|
9487
|
+
for p in lst:
|
|
9488
|
+
if not p:
|
|
9489
|
+
continue
|
|
9490
|
+
p = str(p)
|
|
9491
|
+
if p in seen:
|
|
9492
|
+
continue
|
|
9493
|
+
seen.add(p)
|
|
9494
|
+
out.append(p)
|
|
9495
|
+
|
|
9496
|
+
if not out:
|
|
9497
|
+
continue
|
|
9498
|
+
|
|
9499
|
+
ck = (gk, sess)
|
|
9500
|
+
if ck not in new_map:
|
|
9501
|
+
new_map[ck] = out
|
|
9502
|
+
else:
|
|
9503
|
+
# merge
|
|
9504
|
+
for p in out:
|
|
9505
|
+
if p not in new_map[ck]:
|
|
9506
|
+
new_map[ck].append(p)
|
|
9507
|
+
|
|
9508
|
+
files_map.clear()
|
|
9509
|
+
files_map.update(new_map)
|
|
9510
|
+
|
|
9189
9511
|
|
|
9190
9512
|
def prompt_session_before_adding(self, frame_type, directory_mode=False):
|
|
9191
9513
|
# Respect auto-detect; do nothing here if auto is ON
|
|
@@ -10051,32 +10373,58 @@ class StackingSuiteDialog(QDialog):
|
|
|
10051
10373
|
QMessageBox.warning(self, "Error", "Output directory is not set.")
|
|
10052
10374
|
return
|
|
10053
10375
|
|
|
10054
|
-
# Keep both paths available; we'll override algo selection per group.
|
|
10055
10376
|
ui_algo = getattr(self, "calib_rejection_algorithm", "Windsorized Sigma Clipping")
|
|
10056
10377
|
if ui_algo == "Weighted Windsorized Sigma Clipping":
|
|
10057
10378
|
ui_algo = "Windsorized Sigma Clipping"
|
|
10058
10379
|
|
|
10059
10380
|
exposure_tolerance = self.exposure_tolerance_spinbox.value()
|
|
10060
|
-
dark_files_by_group: dict[tuple[float, str], list[str]] = {}
|
|
10061
10381
|
|
|
10062
10382
|
# -------------------------------------------------------------------------
|
|
10063
|
-
# Group darks by (exposure +/- tolerance, image size string)
|
|
10383
|
+
# Group darks by (exposure +/- tolerance, image size string, session)
|
|
10384
|
+
# self.dark_files can be either:
|
|
10385
|
+
# legacy: exposure_key -> [paths]
|
|
10386
|
+
# session: (exposure_key, session) -> [paths]
|
|
10064
10387
|
# -------------------------------------------------------------------------
|
|
10065
|
-
|
|
10066
|
-
|
|
10067
|
-
|
|
10068
|
-
|
|
10069
|
-
|
|
10388
|
+
dark_files_by_group: dict[tuple[float, str, str], list[str]] = {} # (exp, size, session)->list
|
|
10389
|
+
|
|
10390
|
+
for key, file_list in (self.dark_files or {}).items():
|
|
10391
|
+
if isinstance(key, tuple) and len(key) >= 2:
|
|
10392
|
+
exposure_key = str(key[0])
|
|
10393
|
+
session = str(key[1]) if str(key[1]).strip() else "Default"
|
|
10394
|
+
else:
|
|
10395
|
+
exposure_key = str(key)
|
|
10396
|
+
session = "Default"
|
|
10397
|
+
|
|
10398
|
+
try:
|
|
10399
|
+
exposure_time_str, image_size = exposure_key.split(" (", 1)
|
|
10400
|
+
image_size = image_size.rstrip(")")
|
|
10401
|
+
except ValueError:
|
|
10402
|
+
# If some malformed key got in, skip safely
|
|
10403
|
+
continue
|
|
10404
|
+
|
|
10405
|
+
if "Unknown" in exposure_time_str:
|
|
10406
|
+
exposure_time = 0.0
|
|
10407
|
+
else:
|
|
10408
|
+
try:
|
|
10409
|
+
exposure_time = float(exposure_time_str.replace("s", "").strip())
|
|
10410
|
+
except Exception:
|
|
10411
|
+
exposure_time = 0.0
|
|
10070
10412
|
|
|
10071
10413
|
matched_group = None
|
|
10072
|
-
for (existing_exposure, existing_size) in dark_files_by_group.keys():
|
|
10073
|
-
if
|
|
10074
|
-
|
|
10414
|
+
for (existing_exposure, existing_size, existing_session) in list(dark_files_by_group.keys()):
|
|
10415
|
+
if (
|
|
10416
|
+
existing_session == session
|
|
10417
|
+
and existing_size == image_size
|
|
10418
|
+
and abs(existing_exposure - exposure_time) <= exposure_tolerance
|
|
10419
|
+
):
|
|
10420
|
+
matched_group = (existing_exposure, existing_size, existing_session)
|
|
10075
10421
|
break
|
|
10422
|
+
|
|
10076
10423
|
if matched_group is None:
|
|
10077
|
-
matched_group = (exposure_time, image_size)
|
|
10424
|
+
matched_group = (exposure_time, image_size, session)
|
|
10078
10425
|
dark_files_by_group[matched_group] = []
|
|
10079
|
-
|
|
10426
|
+
|
|
10427
|
+
dark_files_by_group[matched_group].extend(file_list or [])
|
|
10080
10428
|
|
|
10081
10429
|
master_dir = os.path.join(self.stacking_directory, "Master_Calibration_Files")
|
|
10082
10430
|
os.makedirs(master_dir, exist_ok=True)
|
|
@@ -10085,7 +10433,7 @@ class StackingSuiteDialog(QDialog):
|
|
|
10085
10433
|
# Informative status about discovery
|
|
10086
10434
|
# -------------------------------------------------------------------------
|
|
10087
10435
|
try:
|
|
10088
|
-
n_groups = sum(1 for
|
|
10436
|
+
n_groups = sum(1 for _, v in dark_files_by_group.items() if len(v) >= 2)
|
|
10089
10437
|
total_files = sum(len(v) for v in dark_files_by_group.values())
|
|
10090
10438
|
self.update_status(self.tr(
|
|
10091
10439
|
f"🔎 Discovered {len(dark_files_by_group)} grouped exposures "
|
|
@@ -10096,15 +10444,15 @@ class StackingSuiteDialog(QDialog):
|
|
|
10096
10444
|
QApplication.processEvents()
|
|
10097
10445
|
|
|
10098
10446
|
# -------------------------------------------------------------------------
|
|
10099
|
-
# Pre-count tiles for progress bar (
|
|
10447
|
+
# Pre-count tiles for progress bar (per-group safe chunk sizes)
|
|
10100
10448
|
# -------------------------------------------------------------------------
|
|
10101
10449
|
total_tiles = 0
|
|
10102
|
-
group_shapes: dict[tuple[float, str], tuple[int, int, int, int, int]] = {}
|
|
10450
|
+
group_shapes: dict[tuple[float, str, str], tuple[int, int, int, int, int]] = {} # (exp,size,session)->(H,W,C,ch,cw)
|
|
10103
10451
|
pref_chunk_h = self.chunk_height
|
|
10104
10452
|
pref_chunk_w = self.chunk_width
|
|
10105
|
-
DTYPE = np.float32
|
|
10453
|
+
DTYPE = np.float32
|
|
10106
10454
|
|
|
10107
|
-
for (exposure_time, image_size), file_list in dark_files_by_group.items():
|
|
10455
|
+
for (exposure_time, image_size, session), file_list in dark_files_by_group.items():
|
|
10108
10456
|
if len(file_list) < 2:
|
|
10109
10457
|
continue
|
|
10110
10458
|
|
|
@@ -10117,16 +10465,12 @@ class StackingSuiteDialog(QDialog):
|
|
|
10117
10465
|
C = max(1, C)
|
|
10118
10466
|
N = len(file_list)
|
|
10119
10467
|
|
|
10120
|
-
# Use the same safe-chunk logic as normal integration
|
|
10121
10468
|
try:
|
|
10122
|
-
chunk_h, chunk_w = compute_safe_chunk(
|
|
10123
|
-
H, W, N, C, DTYPE, pref_chunk_h, pref_chunk_w
|
|
10124
|
-
)
|
|
10469
|
+
chunk_h, chunk_w = compute_safe_chunk(H, W, N, C, DTYPE, pref_chunk_h, pref_chunk_w)
|
|
10125
10470
|
except MemoryError:
|
|
10126
|
-
# Fall back to user chunk config if memory check failed
|
|
10127
10471
|
chunk_h, chunk_w = pref_chunk_h, pref_chunk_w
|
|
10128
10472
|
|
|
10129
|
-
group_shapes[(exposure_time, image_size)] = (H, W, C, chunk_h, chunk_w)
|
|
10473
|
+
group_shapes[(exposure_time, image_size, session)] = (H, W, C, chunk_h, chunk_w)
|
|
10130
10474
|
total_tiles += _count_tiles(H, W, chunk_h, chunk_w)
|
|
10131
10475
|
|
|
10132
10476
|
if total_tiles == 0:
|
|
@@ -10139,7 +10483,7 @@ class StackingSuiteDialog(QDialog):
|
|
|
10139
10483
|
QApplication.processEvents()
|
|
10140
10484
|
|
|
10141
10485
|
# -------------------------------------------------------------------------
|
|
10142
|
-
# Local CPU reducers
|
|
10486
|
+
# Local CPU reducers (unchanged)
|
|
10143
10487
|
# -------------------------------------------------------------------------
|
|
10144
10488
|
def _select_reducer(kind: str, N: int):
|
|
10145
10489
|
if kind == "dark":
|
|
@@ -10149,8 +10493,7 @@ class StackingSuiteDialog(QDialog):
|
|
|
10149
10493
|
return ("Simple Median (No Rejection)", {}, "median")
|
|
10150
10494
|
else:
|
|
10151
10495
|
return ("Trimmed Mean", {"trim_fraction": 0.05}, "trimmed")
|
|
10152
|
-
|
|
10153
|
-
raise ValueError("wrong kind")
|
|
10496
|
+
raise ValueError("wrong kind")
|
|
10154
10497
|
|
|
10155
10498
|
def _cpu_tile_median(ts4: np.ndarray) -> np.ndarray:
|
|
10156
10499
|
return np.median(ts4, axis=0).astype(np.float32, copy=False)
|
|
@@ -10178,17 +10521,16 @@ class StackingSuiteDialog(QDialog):
|
|
|
10178
10521
|
return out.astype(np.float32, copy=False)
|
|
10179
10522
|
|
|
10180
10523
|
pd = _Progress(self, "Create Master Darks", total_tiles)
|
|
10181
|
-
|
|
10182
10524
|
from concurrent.futures import ThreadPoolExecutor
|
|
10183
10525
|
|
|
10184
10526
|
try:
|
|
10185
10527
|
# ---------------------------------------------------------------------
|
|
10186
10528
|
# Per-group stacking loop
|
|
10187
10529
|
# ---------------------------------------------------------------------
|
|
10188
|
-
for (exposure_time, image_size), file_list in dark_files_by_group.items():
|
|
10530
|
+
for (exposure_time, image_size, session), file_list in dark_files_by_group.items():
|
|
10189
10531
|
if len(file_list) < 2:
|
|
10190
10532
|
self.update_status(self.tr(
|
|
10191
|
-
f"⚠️ Skipping {exposure_time}s ({image_size}) - Not enough frames to stack."
|
|
10533
|
+
f"⚠️ Skipping {exposure_time}s ({image_size}) [{session}] - Not enough frames to stack."
|
|
10192
10534
|
))
|
|
10193
10535
|
QApplication.processEvents()
|
|
10194
10536
|
continue
|
|
@@ -10198,21 +10540,17 @@ class StackingSuiteDialog(QDialog):
|
|
|
10198
10540
|
break
|
|
10199
10541
|
|
|
10200
10542
|
self.update_status(self.tr(
|
|
10201
|
-
f"🟢 Processing {len(file_list)} darks for {exposure_time}s ({image_size})
|
|
10543
|
+
f"🟢 Processing {len(file_list)} darks for {exposure_time}s ({image_size}) in session '{session}'…"
|
|
10202
10544
|
))
|
|
10203
10545
|
QApplication.processEvents()
|
|
10204
10546
|
|
|
10205
10547
|
# --- reference shape and per-group chunk size ---
|
|
10206
|
-
if (exposure_time, image_size) in group_shapes:
|
|
10207
|
-
height, width, channels, chunk_height, chunk_width = group_shapes[
|
|
10208
|
-
(exposure_time, image_size)
|
|
10209
|
-
]
|
|
10548
|
+
if (exposure_time, image_size, session) in group_shapes:
|
|
10549
|
+
height, width, channels, chunk_height, chunk_width = group_shapes[(exposure_time, image_size, session)]
|
|
10210
10550
|
else:
|
|
10211
10551
|
ref_data, _, _, _ = load_image(file_list[0])
|
|
10212
10552
|
if ref_data is None:
|
|
10213
|
-
self.update_status(self.tr(
|
|
10214
|
-
f"❌ Failed to load reference {os.path.basename(file_list[0])}"
|
|
10215
|
-
))
|
|
10553
|
+
self.update_status(self.tr(f"❌ Failed to load reference {os.path.basename(file_list[0])}"))
|
|
10216
10554
|
continue
|
|
10217
10555
|
height, width = ref_data.shape[:2]
|
|
10218
10556
|
channels = 1 if ref_data.ndim == 2 else 3
|
|
@@ -10220,31 +10558,25 @@ class StackingSuiteDialog(QDialog):
|
|
|
10220
10558
|
N_tmp = len(file_list)
|
|
10221
10559
|
try:
|
|
10222
10560
|
chunk_height, chunk_width = compute_safe_chunk(
|
|
10223
|
-
height, width, N_tmp, channels, DTYPE,
|
|
10224
|
-
pref_chunk_h, pref_chunk_w
|
|
10561
|
+
height, width, N_tmp, channels, DTYPE, pref_chunk_h, pref_chunk_w
|
|
10225
10562
|
)
|
|
10226
10563
|
except MemoryError:
|
|
10227
10564
|
chunk_height, chunk_width = pref_chunk_h, pref_chunk_w
|
|
10228
10565
|
|
|
10229
|
-
channels = max(1, channels)
|
|
10230
10566
|
N = len(file_list)
|
|
10231
10567
|
|
|
10232
|
-
# --- choose reducer adaptively ---
|
|
10233
10568
|
algo_name, params, cpu_label = _select_reducer("dark", N)
|
|
10234
10569
|
use_gpu = bool(self._hw_accel_enabled()) and _torch_ok() and _gpu_algo_supported(algo_name)
|
|
10235
10570
|
algo_brief = ("GPU" if use_gpu else "CPU") + " " + algo_name
|
|
10236
|
-
self.update_status(self.tr(
|
|
10237
|
-
f"⚙️ {algo_brief} selected for {N} frames (channels={channels})"
|
|
10238
|
-
))
|
|
10571
|
+
self.update_status(self.tr(f"⚙️ {algo_brief} selected for {N} frames (channels={channels})"))
|
|
10239
10572
|
QApplication.processEvents()
|
|
10240
10573
|
|
|
10241
|
-
# --- open
|
|
10574
|
+
# --- open sources ---
|
|
10242
10575
|
sources = []
|
|
10243
10576
|
try:
|
|
10244
10577
|
for p in file_list:
|
|
10245
|
-
sources.append(_MMImage(p))
|
|
10578
|
+
sources.append(_MMImage(p))
|
|
10246
10579
|
except Exception as e:
|
|
10247
|
-
# Clean up any partially opened sources
|
|
10248
10580
|
for s in sources:
|
|
10249
10581
|
try:
|
|
10250
10582
|
s.close()
|
|
@@ -10254,93 +10586,64 @@ class StackingSuiteDialog(QDialog):
|
|
|
10254
10586
|
QApplication.processEvents()
|
|
10255
10587
|
continue
|
|
10256
10588
|
|
|
10257
|
-
#
|
|
10258
|
-
memmap_path = os.path.join(
|
|
10259
|
-
|
|
10260
|
-
)
|
|
10589
|
+
# Include session to prevent collisions
|
|
10590
|
+
memmap_path = os.path.join(master_dir, f"temp_dark_{session}_{exposure_time}_{image_size}.dat")
|
|
10591
|
+
|
|
10261
10592
|
self.update_status(self.tr(
|
|
10262
10593
|
f"🗂️ Creating temp memmap: {os.path.basename(memmap_path)} "
|
|
10263
10594
|
f"(shape={height}×{width}×{channels}, dtype=float32)"
|
|
10264
10595
|
))
|
|
10265
10596
|
QApplication.processEvents()
|
|
10266
|
-
final_stacked = np.memmap(
|
|
10267
|
-
memmap_path,
|
|
10268
|
-
dtype=np.float32,
|
|
10269
|
-
mode="w+",
|
|
10270
|
-
shape=(height, width, channels),
|
|
10271
|
-
)
|
|
10272
10597
|
|
|
10273
|
-
|
|
10598
|
+
final_stacked = np.memmap(memmap_path, dtype=np.float32, mode="w+", shape=(height, width, channels))
|
|
10599
|
+
|
|
10274
10600
|
tiles = _tile_grid(height, width, chunk_height, chunk_width)
|
|
10275
10601
|
total_tiles_group = len(tiles)
|
|
10276
10602
|
self.update_status(self.tr(
|
|
10277
|
-
f"📦 {total_tiles_group} tiles to process for this group "
|
|
10278
|
-
f"(chunk {chunk_height}×{chunk_width})."
|
|
10603
|
+
f"📦 {total_tiles_group} tiles to process for this group (chunk {chunk_height}×{chunk_width})."
|
|
10279
10604
|
))
|
|
10280
10605
|
QApplication.processEvents()
|
|
10281
10606
|
|
|
10282
|
-
|
|
10283
|
-
buf0 = np.empty(
|
|
10284
|
-
(N, chunk_height, chunk_width, channels),
|
|
10285
|
-
dtype=np.float32,
|
|
10286
|
-
order="C",
|
|
10287
|
-
)
|
|
10607
|
+
buf0 = np.empty((N, chunk_height, chunk_width, channels), dtype=np.float32, order="C")
|
|
10288
10608
|
buf1 = np.empty_like(buf0)
|
|
10289
10609
|
|
|
10290
|
-
# Helper: read one tile into the given buffer from all memmapped sources
|
|
10291
10610
|
def _read_tile_into(buf, y0, y1, x0, x1):
|
|
10292
10611
|
th = y1 - y0
|
|
10293
10612
|
tw = x1 - x0
|
|
10294
10613
|
ts = buf[:N, :th, :tw, :channels]
|
|
10295
10614
|
for i, src in enumerate(sources):
|
|
10296
|
-
sub = src.read_tile(y0, y1, x0, x1)
|
|
10615
|
+
sub = src.read_tile(y0, y1, x0, x1)
|
|
10297
10616
|
if sub.ndim == 2:
|
|
10298
|
-
if channels == 3
|
|
10299
|
-
sub = sub[:, :, None].repeat(3, axis=2)
|
|
10300
|
-
else:
|
|
10301
|
-
sub = sub[:, :, None]
|
|
10617
|
+
sub = sub[:, :, None] if channels == 1 else sub[:, :, None].repeat(3, axis=2)
|
|
10302
10618
|
ts[i, :, :, :] = sub
|
|
10303
|
-
return th, tw
|
|
10619
|
+
return th, tw
|
|
10304
10620
|
|
|
10305
10621
|
tp = ThreadPoolExecutor(max_workers=1)
|
|
10306
10622
|
|
|
10307
|
-
# Prime first read
|
|
10308
10623
|
(y0, y1, x0, x1) = tiles[0]
|
|
10309
10624
|
fut = tp.submit(_read_tile_into, buf0, y0, y1, x0, x1)
|
|
10310
10625
|
use0 = True
|
|
10311
|
-
|
|
10312
|
-
# Uniform weights for darks (no quality weighting)
|
|
10313
10626
|
weights_np = np.ones((N,), dtype=np.float32)
|
|
10314
10627
|
|
|
10315
|
-
# --- per-tile loop ---
|
|
10316
10628
|
cancelled_group = False
|
|
10317
10629
|
for t_idx, (y0, y1, x0, x1) in enumerate(tiles, start=1):
|
|
10318
10630
|
if pd.cancelled:
|
|
10319
10631
|
cancelled_group = True
|
|
10320
|
-
self.update_status(self.tr(
|
|
10321
|
-
"⛔ Master Dark creation cancelled during tile processing."
|
|
10322
|
-
))
|
|
10632
|
+
self.update_status(self.tr("⛔ Master Dark creation cancelled during tile processing."))
|
|
10323
10633
|
break
|
|
10324
10634
|
|
|
10325
10635
|
th, tw = fut.result()
|
|
10326
10636
|
ts_np = (buf0 if use0 else buf1)[:N, :th, :tw, :channels]
|
|
10327
10637
|
|
|
10328
|
-
# Prefetch next tile
|
|
10329
10638
|
if t_idx < total_tiles_group:
|
|
10330
10639
|
ny0, ny1, nx0, nx1 = tiles[t_idx]
|
|
10331
|
-
fut = tp.submit(
|
|
10332
|
-
_read_tile_into,
|
|
10333
|
-
(buf1 if use0 else buf0),
|
|
10334
|
-
ny0, ny1, nx0, nx1,
|
|
10335
|
-
)
|
|
10640
|
+
fut = tp.submit(_read_tile_into, (buf1 if use0 else buf0), ny0, ny1, nx0, nx1)
|
|
10336
10641
|
|
|
10337
10642
|
pd.set_label(
|
|
10338
|
-
f"{int(exposure_time)}s ({image_size}) — "
|
|
10339
|
-
f"tile {t_idx}/{total_tiles_group} "
|
|
10340
|
-
f"y:{y0}-{y1} x:{x0}-{x1}"
|
|
10643
|
+
f"{int(exposure_time)}s ({image_size}) [{session}] — "
|
|
10644
|
+
f"tile {t_idx}/{total_tiles_group} y:{y0}-{y1} x:{x0}-{x1}"
|
|
10341
10645
|
)
|
|
10342
10646
|
|
|
10343
|
-
# ---- reduction (GPU or CPU) ----
|
|
10344
10647
|
if use_gpu:
|
|
10345
10648
|
tile_result, _ = _torch_reduce_tile(
|
|
10346
10649
|
ts_np,
|
|
@@ -10350,59 +10653,39 @@ class StackingSuiteDialog(QDialog):
|
|
|
10350
10653
|
iterations=int(params.get("iterations", getattr(self, "iterations", 1))),
|
|
10351
10654
|
sigma_low=float(getattr(self, "sigma_low", 2.5)),
|
|
10352
10655
|
sigma_high=float(getattr(self, "sigma_high", 2.5)),
|
|
10353
|
-
trim_fraction=float(
|
|
10354
|
-
params.get("trim_fraction", getattr(self, "trim_fraction", 0.05))
|
|
10355
|
-
),
|
|
10656
|
+
trim_fraction=float(params.get("trim_fraction", getattr(self, "trim_fraction", 0.05))),
|
|
10356
10657
|
esd_threshold=float(getattr(self, "esd_threshold", 3.0)),
|
|
10357
|
-
biweight_constant=float(
|
|
10358
|
-
getattr(self, "biweight_constant", 6.0)
|
|
10359
|
-
),
|
|
10658
|
+
biweight_constant=float(getattr(self, "biweight_constant", 6.0)),
|
|
10360
10659
|
modz_threshold=float(getattr(self, "modz_threshold", 3.5)),
|
|
10361
|
-
comet_hclip_k=float(
|
|
10362
|
-
|
|
10363
|
-
),
|
|
10364
|
-
comet_hclip_p=float(
|
|
10365
|
-
self.settings.value("stacking/comet_hclip_p", 25.0, type=float)
|
|
10366
|
-
),
|
|
10660
|
+
comet_hclip_k=float(self.settings.value("stacking/comet_hclip_k", 1.30, type=float)),
|
|
10661
|
+
comet_hclip_p=float(self.settings.value("stacking/comet_hclip_p", 25.0, type=float)),
|
|
10367
10662
|
)
|
|
10368
10663
|
else:
|
|
10369
10664
|
if cpu_label == "median":
|
|
10370
10665
|
tile_result = _cpu_tile_median(ts_np)
|
|
10371
10666
|
elif cpu_label == "trimmed":
|
|
10372
|
-
tile_result = _cpu_tile_trimmed_mean(
|
|
10373
|
-
|
|
10374
|
-
|
|
10375
|
-
)
|
|
10376
|
-
else: # 'kappa1'
|
|
10377
|
-
tile_result = _cpu_tile_kappa_sigma_1iter(
|
|
10378
|
-
ts_np,
|
|
10379
|
-
float(params.get("kappa", 3.0)),
|
|
10380
|
-
)
|
|
10667
|
+
tile_result = _cpu_tile_trimmed_mean(ts_np, float(params.get("trim_fraction", 0.05)))
|
|
10668
|
+
else:
|
|
10669
|
+
tile_result = _cpu_tile_kappa_sigma_1iter(ts_np, float(params.get("kappa", 3.0)))
|
|
10381
10670
|
|
|
10382
|
-
# Ensure tile_result has correct shape (th, tw, channels)
|
|
10383
10671
|
if tile_result.ndim == 2:
|
|
10384
10672
|
tile_result = tile_result[:, :, None]
|
|
10385
10673
|
expected_shape = (th, tw, channels)
|
|
10386
10674
|
if tile_result.shape != expected_shape:
|
|
10387
|
-
if tile_result.shape[2] ==
|
|
10388
|
-
tile_result = np.zeros(expected_shape, dtype=np.float32)
|
|
10389
|
-
elif tile_result.shape[:2] == (th, tw):
|
|
10675
|
+
if tile_result.shape[:2] == (th, tw):
|
|
10390
10676
|
if tile_result.shape[2] > channels:
|
|
10391
10677
|
tile_result = tile_result[:, :, :channels]
|
|
10392
10678
|
else:
|
|
10393
|
-
tile_result = np.repeat(
|
|
10394
|
-
|
|
10395
|
-
|
|
10679
|
+
tile_result = np.repeat(tile_result, channels, axis=2)[:, :, :channels]
|
|
10680
|
+
else:
|
|
10681
|
+
tile_result = np.zeros(expected_shape, dtype=np.float32)
|
|
10396
10682
|
|
|
10397
|
-
# Commit tile result into final memmap
|
|
10398
10683
|
final_stacked[y0:y1, x0:x1, :] = tile_result
|
|
10399
|
-
|
|
10400
10684
|
pd.step()
|
|
10401
10685
|
use0 = not use0
|
|
10402
10686
|
|
|
10403
10687
|
tp.shutdown(wait=True)
|
|
10404
10688
|
|
|
10405
|
-
# Close memmapped sources for this group
|
|
10406
10689
|
for s in sources:
|
|
10407
10690
|
try:
|
|
10408
10691
|
s.close()
|
|
@@ -10410,9 +10693,7 @@ class StackingSuiteDialog(QDialog):
|
|
|
10410
10693
|
pass
|
|
10411
10694
|
|
|
10412
10695
|
if cancelled_group:
|
|
10413
|
-
self.update_status(self.tr(
|
|
10414
|
-
"⛔ Master Dark creation cancelled; cleaning up temporary files."
|
|
10415
|
-
))
|
|
10696
|
+
self.update_status(self.tr("⛔ Master Dark creation cancelled; cleaning up temporary files."))
|
|
10416
10697
|
try:
|
|
10417
10698
|
del final_stacked
|
|
10418
10699
|
except Exception:
|
|
@@ -10423,7 +10704,6 @@ class StackingSuiteDialog(QDialog):
|
|
|
10423
10704
|
pass
|
|
10424
10705
|
break
|
|
10425
10706
|
|
|
10426
|
-
# Convert memmap to regular array and free the file
|
|
10427
10707
|
master_dark_data = np.asarray(final_stacked, dtype=np.float32)
|
|
10428
10708
|
del final_stacked
|
|
10429
10709
|
gc.collect()
|
|
@@ -10432,38 +10712,29 @@ class StackingSuiteDialog(QDialog):
|
|
|
10432
10712
|
except Exception:
|
|
10433
10713
|
pass
|
|
10434
10714
|
|
|
10435
|
-
|
|
10715
|
+
# Include session in output name
|
|
10716
|
+
master_dark_stem = f"MasterDark_{session}_{int(exposure_time)}s_{image_size}"
|
|
10436
10717
|
master_dark_path = self._build_out(master_dir, master_dark_stem, "fit")
|
|
10437
10718
|
|
|
10438
10719
|
master_header = fits.Header()
|
|
10439
10720
|
master_header["IMAGETYP"] = "DARK"
|
|
10440
|
-
master_header["EXPTIME"] = (
|
|
10441
|
-
|
|
10442
|
-
"User-specified or from grouping",
|
|
10443
|
-
)
|
|
10721
|
+
master_header["EXPTIME"] = (exposure_time, "User-specified or from grouping")
|
|
10722
|
+
master_header["SESSION"] = (session, "User session tag") # optional but useful
|
|
10444
10723
|
master_header["NAXIS"] = 3 if channels == 3 else 2
|
|
10445
10724
|
master_header["NAXIS1"] = master_dark_data.shape[1]
|
|
10446
10725
|
master_header["NAXIS2"] = master_dark_data.shape[0]
|
|
10447
10726
|
if channels == 3:
|
|
10448
10727
|
master_header["NAXIS3"] = 3
|
|
10449
10728
|
|
|
10450
|
-
save_image(
|
|
10451
|
-
|
|
10452
|
-
|
|
10453
|
-
"fit",
|
|
10454
|
-
"32-bit floating point",
|
|
10455
|
-
master_header,
|
|
10456
|
-
is_mono=(channels == 1),
|
|
10457
|
-
)
|
|
10458
|
-
self.add_master_dark_to_tree(
|
|
10459
|
-
f"{exposure_time}s ({image_size})", master_dark_path
|
|
10460
|
-
)
|
|
10729
|
+
save_image(master_dark_data, master_dark_path, "fit", "32-bit floating point", master_header, is_mono=(channels == 1))
|
|
10730
|
+
|
|
10731
|
+
self.add_master_dark_to_tree(f"{exposure_time}s ({image_size}) [{session}]", master_dark_path)
|
|
10461
10732
|
self.update_status(self.tr(f"✅ Master Dark saved: {master_dark_path}"))
|
|
10462
10733
|
QApplication.processEvents()
|
|
10734
|
+
|
|
10463
10735
|
self.assign_best_master_files()
|
|
10464
10736
|
self.save_master_paths_to_settings()
|
|
10465
10737
|
|
|
10466
|
-
# wrap-up
|
|
10467
10738
|
self.assign_best_master_dark()
|
|
10468
10739
|
self.update_override_dark_combo()
|
|
10469
10740
|
self.assign_best_master_files()
|
|
@@ -10475,6 +10746,7 @@ class StackingSuiteDialog(QDialog):
|
|
|
10475
10746
|
import logging
|
|
10476
10747
|
logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
|
|
10477
10748
|
pd.close()
|
|
10749
|
+
|
|
10478
10750
|
|
|
10479
10751
|
def add_master_dark_to_tree(self, exposure_label: str, master_dark_path: str):
|
|
10480
10752
|
"""
|
|
@@ -10691,7 +10963,15 @@ class StackingSuiteDialog(QDialog):
|
|
|
10691
10963
|
# -------------------------------------------------------------------------
|
|
10692
10964
|
# Group flats exactly as before
|
|
10693
10965
|
# -------------------------------------------------------------------------
|
|
10694
|
-
for
|
|
10966
|
+
for key, file_list in (self.flat_files or {}).items():
|
|
10967
|
+
# Support both legacy and new key formats
|
|
10968
|
+
if isinstance(key, tuple) and len(key) >= 2:
|
|
10969
|
+
filter_exposure = str(key[0])
|
|
10970
|
+
session = str(key[1] or "Default").strip() or "Default"
|
|
10971
|
+
else:
|
|
10972
|
+
filter_exposure = str(key)
|
|
10973
|
+
session = "Default"
|
|
10974
|
+
|
|
10695
10975
|
try:
|
|
10696
10976
|
filter_name, exposure_size = filter_exposure.split(" - ")
|
|
10697
10977
|
exposure_time_str, image_size = exposure_size.split(" (")
|
|
@@ -10704,21 +10984,35 @@ class StackingSuiteDialog(QDialog):
|
|
|
10704
10984
|
exposure_time = float(match.group(1)) if match else -10.0
|
|
10705
10985
|
|
|
10706
10986
|
matched_group = None
|
|
10707
|
-
for
|
|
10708
|
-
existing_exposure, existing_size, existing_filter, existing_session =
|
|
10987
|
+
for k in flat_files_by_group:
|
|
10988
|
+
existing_exposure, existing_size, existing_filter, existing_session = k
|
|
10709
10989
|
if (
|
|
10710
10990
|
abs(existing_exposure - exposure_time) <= exposure_tolerance
|
|
10711
10991
|
and existing_size == image_size
|
|
10712
10992
|
and existing_filter == filter_name
|
|
10713
10993
|
and existing_session == session
|
|
10714
10994
|
):
|
|
10715
|
-
matched_group =
|
|
10995
|
+
matched_group = k
|
|
10716
10996
|
break
|
|
10717
10997
|
|
|
10718
10998
|
if matched_group is None:
|
|
10719
10999
|
matched_group = (exposure_time, image_size, filter_name, session)
|
|
10720
11000
|
flat_files_by_group[matched_group] = []
|
|
10721
|
-
|
|
11001
|
+
|
|
11002
|
+
flat_files_by_group[matched_group].extend(file_list or [])
|
|
11003
|
+
|
|
11004
|
+
# Dedupe paths within each group (prevents accidental double-counts)
|
|
11005
|
+
for k, lst in list(flat_files_by_group.items()):
|
|
11006
|
+
seen = set()
|
|
11007
|
+
out = []
|
|
11008
|
+
for p in (lst or []):
|
|
11009
|
+
pn = os.path.normcase(os.path.abspath(p))
|
|
11010
|
+
if pn in seen:
|
|
11011
|
+
continue
|
|
11012
|
+
seen.add(pn)
|
|
11013
|
+
out.append(p)
|
|
11014
|
+
flat_files_by_group[k] = out
|
|
11015
|
+
|
|
10722
11016
|
|
|
10723
11017
|
# Discovery summary
|
|
10724
11018
|
try:
|
|
@@ -17809,12 +18103,7 @@ class StackingSuiteDialog(QDialog):
|
|
|
17809
18103
|
# --- reusable C-order tile buffers (avoid copies before GPU) ---
|
|
17810
18104
|
def _mk_buf():
|
|
17811
18105
|
buf = np.empty((N, chunk_h, chunk_w, channels), dtype=np.float32, order='C')
|
|
17812
|
-
|
|
17813
|
-
# We'll pin tensors inside _torch_reduce_tile; nothing to do here.
|
|
17814
|
-
try:
|
|
17815
|
-
import torch # noqa: F401
|
|
17816
|
-
except Exception:
|
|
17817
|
-
pass
|
|
18106
|
+
|
|
17818
18107
|
return buf
|
|
17819
18108
|
|
|
17820
18109
|
buf0 = _mk_buf()
|