setiastrosuitepro 1.6.10__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. setiastro/images/colorwheel.svg +97 -0
  2. setiastro/images/narrowbandnormalization.png +0 -0
  3. setiastro/images/planetarystacker.png +0 -0
  4. setiastro/saspro/__main__.py +1 -1
  5. setiastro/saspro/_generated/build_info.py +2 -2
  6. setiastro/saspro/aberration_ai.py +49 -11
  7. setiastro/saspro/aberration_ai_preset.py +29 -3
  8. setiastro/saspro/backgroundneutral.py +73 -33
  9. setiastro/saspro/blink_comparator_pro.py +116 -71
  10. setiastro/saspro/convo.py +9 -6
  11. setiastro/saspro/curve_editor_pro.py +72 -22
  12. setiastro/saspro/curves_preset.py +249 -47
  13. setiastro/saspro/doc_manager.py +178 -11
  14. setiastro/saspro/gui/main_window.py +218 -66
  15. setiastro/saspro/gui/mixins/dock_mixin.py +245 -24
  16. setiastro/saspro/gui/mixins/file_mixin.py +35 -16
  17. setiastro/saspro/gui/mixins/menu_mixin.py +31 -1
  18. setiastro/saspro/gui/mixins/toolbar_mixin.py +132 -10
  19. setiastro/saspro/histogram.py +179 -7
  20. setiastro/saspro/imageops/narrowband_normalization.py +816 -0
  21. setiastro/saspro/imageops/serloader.py +769 -0
  22. setiastro/saspro/imageops/starbasedwhitebalance.py +23 -52
  23. setiastro/saspro/imageops/stretch.py +66 -15
  24. setiastro/saspro/legacy/numba_utils.py +25 -48
  25. setiastro/saspro/live_stacking.py +24 -4
  26. setiastro/saspro/multiscale_decomp.py +30 -17
  27. setiastro/saspro/narrowband_normalization.py +1618 -0
  28. setiastro/saspro/numba_utils.py +0 -55
  29. setiastro/saspro/ops/script_editor.py +5 -0
  30. setiastro/saspro/ops/scripts.py +119 -0
  31. setiastro/saspro/remove_green.py +1 -1
  32. setiastro/saspro/resources.py +4 -0
  33. setiastro/saspro/ser_stack_config.py +68 -0
  34. setiastro/saspro/ser_stacker.py +2245 -0
  35. setiastro/saspro/ser_stacker_dialog.py +1481 -0
  36. setiastro/saspro/ser_tracking.py +206 -0
  37. setiastro/saspro/serviewer.py +1242 -0
  38. setiastro/saspro/sfcc.py +602 -214
  39. setiastro/saspro/shortcuts.py +35 -16
  40. setiastro/saspro/stacking_suite.py +332 -87
  41. setiastro/saspro/star_alignment.py +243 -122
  42. setiastro/saspro/stat_stretch.py +220 -31
  43. setiastro/saspro/subwindow.py +2 -4
  44. setiastro/saspro/whitebalance.py +24 -0
  45. setiastro/saspro/widgets/resource_monitor.py +122 -74
  46. {setiastrosuitepro-1.6.10.dist-info → setiastrosuitepro-1.7.0.dist-info}/METADATA +2 -2
  47. {setiastrosuitepro-1.6.10.dist-info → setiastrosuitepro-1.7.0.dist-info}/RECORD +51 -40
  48. {setiastrosuitepro-1.6.10.dist-info → setiastrosuitepro-1.7.0.dist-info}/WHEEL +0 -0
  49. {setiastrosuitepro-1.6.10.dist-info → setiastrosuitepro-1.7.0.dist-info}/entry_points.txt +0 -0
  50. {setiastrosuitepro-1.6.10.dist-info → setiastrosuitepro-1.7.0.dist-info}/licenses/LICENSE +0 -0
  51. {setiastrosuitepro-1.6.10.dist-info → setiastrosuitepro-1.7.0.dist-info}/licenses/license.txt +0 -0
@@ -30,10 +30,10 @@ cached_star_sources: Optional[np.ndarray] = None
30
30
  cached_flux_radii: Optional[np.ndarray] = None
31
31
 
32
32
 
33
- def _tone_preserve_bg_neutralize(rgb: np.ndarray) -> np.ndarray:
33
+ def _tone_preserve_bg_neutralize(rgb: np.ndarray, *, return_pivot: bool = False):
34
34
  """
35
35
  Neutralize background using the darkest grid patch in a tone-preserving way.
36
- Operates in-place on a copy; returns the neutralized image (float32 [0,1]).
36
+ Returns float32 RGB in [0,1]. If return_pivot, also returns the patch median (pivot).
37
37
  """
38
38
  h, w = rgb.shape[:2]
39
39
  patch_size = 10
@@ -56,12 +56,15 @@ def _tone_preserve_bg_neutralize(rgb: np.ndarray) -> np.ndarray:
56
56
  out = rgb.copy()
57
57
  if best is not None:
58
58
  avg = float(np.mean(best))
59
- # “tone-preserving” shift+scale channel-wise toward avg
60
59
  for c in range(3):
61
60
  diff = float(best[c] - avg)
62
61
  denom = (1.0 - diff) if abs(1.0 - diff) > 1e-8 else 1e-8
63
62
  out[:, :, c] = np.clip((out[:, :, c] - diff) / denom, 0.0, 1.0)
64
- return out
63
+
64
+ if return_pivot:
65
+ pivot = best.astype(np.float32) if best is not None else np.median(rgb, axis=(0, 1)).astype(np.float32)
66
+ return out.astype(np.float32, copy=False), pivot
67
+ return out.astype(np.float32, copy=False)
65
68
 
66
69
 
67
70
  def apply_star_based_white_balance(
@@ -73,38 +76,16 @@ def apply_star_based_white_balance(
73
76
  ) -> Tuple[np.ndarray, int, np.ndarray, np.ndarray, np.ndarray] | Tuple[np.ndarray, int, np.ndarray]:
74
77
  """
75
78
  Star-based white balance with background neutralization and an RGB overlay of detected stars.
76
-
77
- Parameters
78
- ----------
79
- image : np.ndarray
80
- RGB image (any dtype). Assumed RGB ordering.
81
- threshold : float
82
- SExtractor detection threshold (in background sigma).
83
- autostretch : bool
84
- If True, overlay is built from an autostretched view for visibility.
85
- reuse_cached_sources : bool
86
- If True, reuses star positions measured on a previous call (same scene).
87
- return_star_colors : bool
88
- If True, also returns (raw_star_pixels, after_star_pixels).
89
-
90
- Returns
91
- -------
92
- balanced_rgb : float32 RGB in [0,1]
93
- star_count : int
94
- overlay_rgb : float32 RGB in [0,1] with star ellipses drawn
95
- (optional) raw_star_pixels : (N,3) float array, colors sampled from ORIGINAL image
96
- (optional) after_star_pixels : (N,3) float array, colors sampled after WB
79
+ (Correct version: does NOT crush data below the pivot.)
97
80
  """
98
81
  if image.ndim != 3 or image.shape[2] != 3:
99
82
  raise ValueError("apply_star_based_white_balance: input must be an RGB image (H,W,3).")
100
83
 
101
- # 0) normalize
102
84
  img_rgb = _to_float01(image)
103
85
 
104
- # 1) first background neutralization (tone-preserving)
105
- bg_neutral = _tone_preserve_bg_neutralize(img_rgb)
86
+ # 1) background neutralization + pivot (per-channel medians of darkest patch)
87
+ bg_neutral, pivot = _tone_preserve_bg_neutralize(img_rgb, return_pivot=True)
106
88
 
107
- # 2) detect / reuse star positions
108
89
  if sep is None:
109
90
  raise ImportError(
110
91
  "apply_star_based_white_balance requires the 'sep' package. "
@@ -135,7 +116,6 @@ def apply_star_based_white_balance(
135
116
  cached_star_sources = sources
136
117
  cached_flux_radii = r
137
118
 
138
- # filter: small-ish, star-like
139
119
  mask = (r > 0) & (r <= 10)
140
120
  sources = sources[mask]
141
121
  r = r[mask]
@@ -143,15 +123,14 @@ def apply_star_based_white_balance(
143
123
  raise ValueError("All detected sources were rejected as non-stellar (too large).")
144
124
 
145
125
  h, w = gray.shape
146
- # raw colors from ORIGINAL image - optimized vectorized extraction
147
126
  xs = sources["x"].astype(np.int32)
148
127
  ys = sources["y"].astype(np.int32)
149
128
  valid = (xs >= 0) & (xs < w) & (ys >= 0) & (ys < h)
129
+
150
130
  raw_star_pixels = img_rgb[ys[valid], xs[valid], :]
151
131
 
152
- # 3) build overlay (autostretched if requested) and draw ellipses
132
+ # overlay
153
133
  disp = stretch_color_image(bg_neutral.copy(), 0.25) if autostretch else bg_neutral.copy()
154
-
155
134
  if cv2 is not None:
156
135
  overlay_bgr = cv2.cvtColor((disp * 255).astype(np.uint8), cv2.COLOR_RGB2BGR)
157
136
  for i in range(len(sources)):
@@ -160,41 +139,35 @@ def apply_star_based_white_balance(
160
139
  theta_deg = float(sources["theta"][i] * 180.0 / np.pi)
161
140
  center = (int(round(cx)), int(round(cy)))
162
141
  axes = (max(1, int(round(3 * a))), max(1, int(round(3 * b))))
163
- # red ellipse in BGR
164
142
  cv2.ellipse(overlay_bgr, center, axes, angle=theta_deg, startAngle=0, endAngle=360,
165
143
  color=(0, 0, 255), thickness=1)
166
144
  overlay_rgb = cv2.cvtColor(overlay_bgr, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
167
145
  else:
168
- # fallback: no ellipses, just the display image
169
146
  overlay_rgb = disp.astype(np.float32, copy=False)
170
147
 
171
- # 4) compute WB scale using star colors sampled on bg_neutral image
172
- # Optimized: vectorized extraction instead of Python loop (10-50x faster)
173
- xs = sources["x"].astype(np.int32)
174
- ys = sources["y"].astype(np.int32)
175
- valid_mask = (xs >= 0) & (xs < w) & (ys >= 0) & (ys < h)
176
-
148
+ # star pixels for WB
149
+ valid_mask = valid
177
150
  if not np.any(valid_mask):
178
151
  raise ValueError("No stellar samples available for white balance.")
179
-
152
+
180
153
  star_pixels = bg_neutral[ys[valid_mask], xs[valid_mask], :].astype(np.float32)
181
154
  avg_color = np.mean(star_pixels, axis=0)
182
155
  max_val = float(np.max(avg_color))
183
- # protect against divide-by-zero
184
156
  avg_color = np.where(avg_color <= 1e-8, 1e-8, avg_color)
185
- scaling = max_val / avg_color
157
+ scaling = (max_val / avg_color).astype(np.float32) # (3,)
186
158
 
187
- balanced = (bg_neutral * scaling.reshape((1, 1, 3))).clip(0.0, 1.0)
159
+ # Correct median-locked WB (NO hard floor)
160
+ m = pivot.reshape((1, 1, 3)).astype(np.float32)
161
+ g = scaling.reshape((1, 1, 3)).astype(np.float32)
188
162
 
189
- # 5) second background neutralization pass on balanced image
190
- balanced = _tone_preserve_bg_neutralize(balanced)
163
+ balanced = (bg_neutral.astype(np.float32) - m) * g + m
164
+ balanced = np.clip(balanced, 0.0, 1.0).astype(np.float32, copy=False)
191
165
 
192
- # 6) collect after-WB star samples - optimized vectorized extraction
193
166
  after_star_pixels = balanced[ys[valid_mask], xs[valid_mask], :]
194
167
 
195
168
  if return_star_colors:
196
169
  return (
197
- balanced.astype(np.float32, copy=False),
170
+ balanced,
198
171
  int(len(star_pixels)),
199
172
  overlay_rgb.astype(np.float32, copy=False),
200
173
  np.asarray(raw_star_pixels, dtype=np.float32),
@@ -202,9 +175,7 @@ def apply_star_based_white_balance(
202
175
  )
203
176
 
204
177
  return (
205
- balanced.astype(np.float32, copy=False),
178
+ balanced,
206
179
  int(len(star_pixels)),
207
180
  overlay_rgb.astype(np.float32, copy=False),
208
181
  )
209
-
210
-
@@ -504,6 +504,7 @@ def stretch_color_image(image: np.ndarray,
504
504
  hdr_knee: float = 0.75,
505
505
  luma_only: bool = False,
506
506
  luma_mode: str = "rec709",
507
+ luma_blend: float = 1.0,
507
508
  high_range: bool = False,
508
509
  highrange_pedestal: float = 0.001,
509
510
  highrange_soft_ceil_pct: float = 99.0,
@@ -536,17 +537,64 @@ def stretch_color_image(image: np.ndarray,
536
537
 
537
538
  sig = float(blackpoint_sigma)
538
539
 
539
- # ----- LUMA ONLY PATH -----
540
+ # ----- LUMA ONLY PATH (now with optional blending) -----
540
541
  if luma_only:
542
+ b = float(np.clip(luma_blend, 0.0, 1.0))
543
+
544
+ # --- A) Normal linked RGB stretch (same settings, but NOT luma-only) ---
545
+ # Force linked=True here (matches "normal linked stretch" expectation)
546
+ # We compute this first so b=0 is fast-ish if you later optimize.
547
+ if no_black_clip:
548
+ bp = float(img.min())
549
+ med_img = float(np.median(img))
550
+ else:
551
+ bp, med_img = _compute_blackpoint_sigma(img, sig)
552
+
553
+ denom = max(1.0 - bp, 1e-12)
554
+ med_rescaled = (med_img - bp) / denom
555
+
556
+ linked_out = numba_color_linked_from_img(img, bp, denom, float(med_rescaled), float(target_median))
557
+
558
+ if apply_curves:
559
+ linked_out = apply_curves_adjustment(linked_out, float(target_median), float(curves_boost))
560
+
561
+ if hdr_compress and hdr_amount > 0.0:
562
+ linked_out = hdr_compress_color_luminance(
563
+ linked_out,
564
+ amount=float(hdr_amount),
565
+ knee=float(hdr_knee),
566
+ luma_mode="rec709",
567
+ )
568
+
569
+ if high_range:
570
+ linked_out = _high_range_rescale_and_softclip(
571
+ linked_out,
572
+ target_bg=float(target_median),
573
+ pedestal=float(highrange_pedestal),
574
+ soft_ceil_pct=float(highrange_soft_ceil_pct),
575
+ hard_ceil_pct=float(highrange_hard_ceil_pct),
576
+ floor_sigma=float(blackpoint_sigma),
577
+ softclip_threshold=float(highrange_softclip_threshold),
578
+ softclip_rolloff=float(highrange_softclip_rolloff),
579
+ )
580
+
581
+ if normalize:
582
+ mx = float(linked_out.max())
583
+ if mx > 0:
584
+ linked_out = linked_out / mx
585
+
586
+ linked_out = np.clip(linked_out, 0.0, 1.0).astype(np.float32, copy=False)
587
+
588
+ # Short-circuit if blend is 0 (pure linked)
589
+ if b <= 0.0:
590
+ return linked_out
591
+
592
+ # --- B) Your existing luma-only recombine stretch ---
541
593
  resolved_method, w, _profile_name = resolve_luma_profile_weights(luma_mode)
542
594
 
543
- # For snr mode, compute_luminance may require noise_sigma; your module supports that,
544
- # but for stretch we can keep it simple: treat snr as normal rec709 unless you want
545
- # to plumb sigma estimation here too.
546
- # If you DO want snr weights, we can reuse _estimate_noise_sigma_per_channel.
547
595
  ns = None
548
596
  if resolved_method == "snr":
549
- ns = _estimate_noise_sigma_per_channel(img) # expects ~[0..1] float
597
+ ns = _estimate_noise_sigma_per_channel(img)
550
598
  L = compute_luminance(img, method=resolved_method, weights=w, noise_sigma=ns)
551
599
 
552
600
  Ls = stretch_mono_image(
@@ -556,23 +604,22 @@ def stretch_color_image(image: np.ndarray,
556
604
  apply_curves=apply_curves,
557
605
  curves_boost=curves_boost,
558
606
  blackpoint_sigma=sig,
607
+ no_black_clip=no_black_clip,
559
608
  hdr_compress=False,
560
609
  hdr_amount=0.0,
561
610
  hdr_knee=hdr_knee,
562
- high_range=False, # do high_range after recombine
611
+ high_range=False,
563
612
  )
564
613
 
565
614
  if hdr_compress and hdr_amount > 0.0:
566
615
  Ls = hdr_compress_highlights(Ls, float(hdr_amount), knee=float(hdr_knee))
567
616
 
568
- # Choose actual RGB weights for recombine
569
617
  if w is not None and np.asarray(w).size == 3:
570
618
  rw = np.asarray(w, dtype=np.float32)
571
619
  s = float(rw.sum())
572
620
  if s > 0:
573
621
  rw = rw / s
574
622
  else:
575
- # If resolver returns None for standard modes, fall back
576
623
  if resolved_method == "rec601":
577
624
  rw = np.array([0.2990, 0.5870, 0.1140], dtype=np.float32)
578
625
  elif resolved_method == "rec2020":
@@ -580,17 +627,17 @@ def stretch_color_image(image: np.ndarray,
580
627
  else:
581
628
  rw = np.array([0.2126, 0.7152, 0.0722], dtype=np.float32)
582
629
 
583
- out = recombine_luminance_linear_scale(
630
+ luma_out = recombine_luminance_linear_scale(
584
631
  img,
585
632
  Ls,
586
633
  weights=rw,
587
634
  blend=1.0,
588
- highlight_soft_knee=0.0, # separate from HDR; keep 0 unless you want extra protection
635
+ highlight_soft_knee=0.0,
589
636
  )
590
637
 
591
638
  if high_range:
592
- out = _high_range_rescale_and_softclip(
593
- out,
639
+ luma_out = _high_range_rescale_and_softclip(
640
+ luma_out,
594
641
  target_bg=float(target_median),
595
642
  pedestal=float(highrange_pedestal),
596
643
  soft_ceil_pct=float(highrange_soft_ceil_pct),
@@ -601,10 +648,14 @@ def stretch_color_image(image: np.ndarray,
601
648
  )
602
649
 
603
650
  if normalize:
604
- mx = float(out.max())
651
+ mx = float(luma_out.max())
605
652
  if mx > 0:
606
- out = out / mx
653
+ luma_out = luma_out / mx
654
+
655
+ luma_out = np.clip(luma_out, 0.0, 1.0).astype(np.float32, copy=False)
607
656
 
657
+ # --- Final blend: exactly “blend two separate stretched images” ---
658
+ out = (1.0 - b) * linked_out + b * luma_out
608
659
  return np.clip(out, 0.0, 1.0).astype(np.float32, copy=False)
609
660
 
610
661
  # ----- NORMAL RGB PATH -----
@@ -1,4 +1,4 @@
1
- #legacy.numba_utils.py
1
+ #src.setiastro.saspro.legacy.numba_utils.py
2
2
  import numpy as np
3
3
  from numba import njit, prange
4
4
  from numba.typed import List
@@ -407,43 +407,7 @@ def normalize_flat_cfa_inplace(flat2d: np.ndarray, pattern: str, *, combine_gree
407
407
  flat2d[flat2d == 0] = 1.0
408
408
  return flat2d
409
409
 
410
- @njit(parallel=True, fastmath=True)
411
- def apply_flat_division_numba_2d(image, master_flat, master_bias=None):
412
- """
413
- Mono version: image.shape == (H,W)
414
- """
415
- if master_bias is not None:
416
- master_flat = master_flat - master_bias
417
- image = image - master_bias
418
-
419
- median_flat = np.mean(master_flat)
420
- height, width = image.shape
421
-
422
- for y in prange(height):
423
- for x in range(width):
424
- image[y, x] /= (master_flat[y, x] / median_flat)
425
-
426
- return image
427
-
428
-
429
- @njit(parallel=True, fastmath=True)
430
- def apply_flat_division_numba_3d(image, master_flat, master_bias=None):
431
- """
432
- Color version: image.shape == (H,W,C)
433
- """
434
- if master_bias is not None:
435
- master_flat = master_flat - master_bias
436
- image = image - master_bias
437
-
438
- median_flat = np.mean(master_flat)
439
- height, width, channels = image.shape
440
-
441
- for y in prange(height):
442
- for x in range(width):
443
- for c in range(channels):
444
- image[y, x, c] /= (master_flat[y, x, c] / median_flat)
445
410
 
446
- return image
447
411
 
448
412
  @njit(parallel=True, fastmath=True)
449
413
  def _flat_div_2d(img, flat):
@@ -563,24 +527,37 @@ def apply_flat_division_numba_bayer_2d(image, master_flat, med4, pat_id):
563
527
  Bayer-aware mono division. image/master_flat are (H,W).
564
528
  med4 is [R,G1,G2,B] for that master_flat, pat_id in {0..3}.
565
529
  """
530
+ # parity index = (row&1)*2 + (col&1)
531
+ # med4 index order: 0=R, 1=G1, 2=G2, 3=B
532
+
533
+ # tables map parity_index -> med4 index
534
+ # parity_index: 0:(0,0) 1:(0,1) 2:(1,0) 3:(1,1)
535
+ if pat_id == 0: # RGGB: (0,0)R (0,1)G1 (1,0)G2 (1,1)B
536
+ t0, t1, t2, t3 = 0, 1, 2, 3
537
+ elif pat_id == 1: # BGGR: (0,0)B (0,1)G1 (1,0)G2 (1,1)R
538
+ t0, t1, t2, t3 = 3, 1, 2, 0
539
+ elif pat_id == 2: # GRBG: (0,0)G1 (0,1)R (1,0)B (1,1)G2
540
+ t0, t1, t2, t3 = 1, 0, 3, 2
541
+ else: # GBRG: (0,0)G1 (0,1)B (1,0)R (1,1)G2
542
+ t0, t1, t2, t3 = 1, 3, 0, 2
543
+
566
544
  H, W = image.shape
567
545
  for y in prange(H):
568
546
  y1 = y & 1
569
547
  for x in range(W):
570
548
  x1 = x & 1
571
-
572
- # map parity->plane index
573
- if pat_id == 0: # RGGB: (0,0)R (0,1)G1 (1,0)G2 (1,1)B
574
- pi = 0 if (y1==0 and x1==0) else 1 if (y1==0 and x1==1) else 2 if (y1==1 and x1==0) else 3
575
- elif pat_id == 1: # BGGR
576
- pi = 3 if (y1==1 and x1==1) else 1 if (y1==0 and x1==1) else 2 if (y1==1 and x1==0) else 0
577
- elif pat_id == 2: # GRBG
578
- pi = 1 if (y1==0 and x1==0) else 0 if (y1==0 and x1==1) else 3 if (y1==1 and x1==0) else 2
579
- else: # GBRG
580
- pi = 1 if (y1==0 and x1==0) else 3 if (y1==0 and x1==1) else 0 if (y1==1 and x1==0) else 2
549
+ p = (y1 << 1) | x1 # 0..3
550
+ if p == 0:
551
+ pi = t0
552
+ elif p == 1:
553
+ pi = t1
554
+ elif p == 2:
555
+ pi = t2
556
+ else:
557
+ pi = t3
581
558
 
582
559
  denom = master_flat[y, x] / med4[pi]
583
- if denom == 0.0 or not np.isfinite(denom):
560
+ if denom == 0.0 or (not np.isfinite(denom)):
584
561
  denom = 1.0
585
562
  image[y, x] /= denom
586
563
  return image
@@ -1328,7 +1328,23 @@ class LiveStackWindow(QDialog):
1328
1328
  QApplication.processEvents()
1329
1329
  finally:
1330
1330
  self._poll_busy = False
1331
-
1331
+
1332
+ def _match_master_to_image(self, master: np.ndarray, img: np.ndarray) -> np.ndarray:
1333
+ """
1334
+ Coerce master (dark/flat) to match img dimensionality.
1335
+ - If img is RGB (H,W,3) and master is mono (H,W), expand to (H,W,1).
1336
+ - If img is mono (H,W) and master is RGB (H,W,3), collapse to mono via mean.
1337
+ """
1338
+ if master is None:
1339
+ return None
1340
+
1341
+ if img.ndim == 3 and master.ndim == 2:
1342
+ return master[..., None] # (H,W,1) broadcasts to (H,W,3)
1343
+ if img.ndim == 2 and master.ndim == 3:
1344
+ return master.mean(axis=2) # (H,W)
1345
+ return master
1346
+
1347
+
1332
1348
  def process_frame(self, path):
1333
1349
  if self._should_stop():
1334
1350
  return
@@ -1422,12 +1438,16 @@ class LiveStackWindow(QDialog):
1422
1438
 
1423
1439
  # ——— 2b) CALIBRATION (once) ————————————————————————
1424
1440
  if self.master_dark is not None:
1425
- img = img.astype(np.float32) - self.master_dark
1441
+ md = self._match_master_to_image(self.master_dark, img).astype(np.float32, copy=False)
1442
+ img = img.astype(np.float32, copy=False) - md
1426
1443
  # prefer per-filter flat if we’re in mono→color and have one
1427
1444
  if mono_key and mono_key in self.master_flats:
1428
- img = apply_flat_division_numba(img, self.master_flats[mono_key])
1445
+ mf = self._match_master_to_image(self.master_flats[mono_key], img).astype(np.float32, copy=False)
1446
+ img = apply_flat_division_numba(img, mf)
1429
1447
  elif self.master_flat is not None:
1430
- img = apply_flat_division_numba(img, self.master_flat)
1448
+ mf = self._match_master_to_image(self.master_flat, img).astype(np.float32, copy=False)
1449
+ img = apply_flat_division_numba(img, mf)
1450
+
1431
1451
 
1432
1452
  if self._should_stop():
1433
1453
  return
@@ -120,37 +120,41 @@ def soft_threshold(x: np.ndarray, t: float):
120
120
  def apply_layer_ops(
121
121
  w: np.ndarray,
122
122
  bias_gain: float,
123
- thr_sigma: float, # threshold in units of σ
123
+ thr_sigma: float,
124
124
  amount: float,
125
125
  denoise_strength: float = 0.0,
126
126
  sigma: float | np.ndarray | None = None,
127
+ layer_index: int | None = None,
127
128
  *,
128
129
  mode: str = "μ–σ Thresholding",
129
130
  ):
130
131
  w2 = w
131
132
 
132
- # Normalize mode to something robust to label wording
133
133
  m = (mode or "").strip().lower()
134
134
  is_linear = m.startswith("linear")
135
-
136
- # --- Linear mode: strictly linear multiscale transform ---
137
135
  if is_linear:
138
- # Ignore thresholding and denoise; just apply gain
139
- if abs(bias_gain - 1.0) > 1e-6:
140
- return w * bias_gain
141
- return w
136
+ return w * bias_gain if abs(bias_gain - 1.0) > 1e-6 else w
142
137
 
143
- # --- μ–σ Thresholding mode (robust nonlinear) ---
144
138
  # 1) Noise reduction step (MMT-style NR)
145
139
  if denoise_strength > 0.0:
146
140
  if sigma is None:
147
141
  sigma = _robust_sigma(w2)
148
142
  sigma_f = float(sigma)
149
- # 3σ at denoise=1, scaled linearly
150
- t_dn = max(0.0, denoise_strength * 3.0 * sigma_f)
143
+
144
+ i = int(layer_index or 0)
145
+
146
+ # --- SMOOTH scaling option (pick ONE) ---
147
+ # Option A: linear growth (very controllable)
148
+ # scale = 1.0 + 0.75 * i
149
+
150
+ # Option B: sqrt growth of 2^i (gentle, "natural")
151
+ scale = (2.0 ** i) ** 0.5 # 1, 1.41, 2, 2.83, 4, ...
152
+
153
+ # Base: 3σ at denoise=1 for layer 0, increases by scale
154
+ t_dn = denoise_strength * 3.0 * scale * sigma_f
155
+
151
156
  if t_dn > 0.0:
152
157
  w_dn = soft_threshold(w2, t_dn)
153
- # Blend original vs denoised based on denoise_strength
154
158
  w2 = (1.0 - denoise_strength) * w2 + denoise_strength * w_dn
155
159
 
156
160
  # 2) Threshold in σ units + bias shaping
@@ -158,7 +162,7 @@ def apply_layer_ops(
158
162
  if sigma is None:
159
163
  sigma = _robust_sigma(w2)
160
164
  sigma_f = float(sigma)
161
- t = thr_sigma * sigma_f # convert N·σ → absolute threshold
165
+ t = thr_sigma * sigma_f
162
166
  if t > 0.0:
163
167
  wt = soft_threshold(w2, t)
164
168
  w2 = (1.0 - amount) * w2 + amount * wt
@@ -167,7 +171,6 @@ def apply_layer_ops(
167
171
  w2 = w2 * bias_gain
168
172
  return w2
169
173
 
170
-
171
174
  def _robust_sigma(arr: np.ndarray) -> float:
172
175
  """
173
176
  Robust per-layer sigma estimate using MAD, fallback to std if needed.
@@ -455,7 +458,7 @@ class MultiscaleDecompDialog(QDialog):
455
458
 
456
459
  # --- Spin boxes ---
457
460
  self.spin_gain = QDoubleSpinBox()
458
- self.spin_gain.setRange(0.0, 3.0)
461
+ self.spin_gain.setRange(0.0, 10.0)
459
462
  self.spin_gain.setSingleStep(0.05)
460
463
  self.spin_gain.setValue(1.0)
461
464
  self.spin_gain.setToolTip(
@@ -491,7 +494,7 @@ class MultiscaleDecompDialog(QDialog):
491
494
 
492
495
  # --- Sliders (int ranges, mapped to spins) ---
493
496
  self.slider_gain = QSlider(Qt.Orientation.Horizontal)
494
- self.slider_gain.setRange(0, 300) # 0..3.00
497
+ self.slider_gain.setRange(0, 1000) # 0..10.00
495
498
  self.slider_gain.setToolTip(self.spin_gain.toolTip())
496
499
 
497
500
  self.slider_thr = QSlider(Qt.Orientation.Horizontal)
@@ -748,6 +751,9 @@ class MultiscaleDecompDialog(QDialog):
748
751
  cfg = self.cfgs[i]
749
752
  if not cfg.enabled:
750
753
  return i, np.zeros_like(w)
754
+
755
+ layer_sigma = self.base_sigma * (2 ** i)
756
+
751
757
  sigma = self._layer_noise[i] if self._layer_noise and i < len(self._layer_noise) else None
752
758
  out = apply_layer_ops(
753
759
  w,
@@ -756,6 +762,7 @@ class MultiscaleDecompDialog(QDialog):
756
762
  cfg.amount,
757
763
  cfg.denoise,
758
764
  sigma,
765
+ layer_index=i,
759
766
  mode=mode,
760
767
  )
761
768
  return i, out
@@ -1264,11 +1271,17 @@ class MultiscaleDecompDialog(QDialog):
1264
1271
  cfg = self.cfgs[i]
1265
1272
  if not cfg.enabled:
1266
1273
  return i, np.zeros_like(w)
1274
+
1275
+ layer_sigma = base_sigma * (2 ** i)
1276
+
1267
1277
  return i, apply_layer_ops(
1268
1278
  w, cfg.bias_gain, cfg.thr, cfg.amount, cfg.denoise,
1269
- layer_noise[i], mode=mode
1279
+ layer_noise[i],
1280
+ layer_index=i,
1281
+ mode=mode
1270
1282
  )
1271
1283
 
1284
+
1272
1285
  tuned = [None] * len(details)
1273
1286
  max_workers = min(os.cpu_count() or 4, len(details) or 1)
1274
1287
  with ThreadPoolExecutor(max_workers=max_workers) as ex: