setiastrosuitepro 1.7.3__py3-none-any.whl → 1.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of setiastrosuitepro might be problematic. Click here for more details.

Files changed (52) hide show
  1. setiastro/images/clonestamp.png +0 -0
  2. setiastro/saspro/__init__.py +15 -4
  3. setiastro/saspro/__main__.py +23 -5
  4. setiastro/saspro/_generated/build_info.py +2 -2
  5. setiastro/saspro/abe.py +4 -4
  6. setiastro/saspro/autostretch.py +29 -18
  7. setiastro/saspro/blemish_blaster.py +54 -14
  8. setiastro/saspro/clone_stamp.py +753 -0
  9. setiastro/saspro/gui/main_window.py +27 -6
  10. setiastro/saspro/gui/mixins/menu_mixin.py +1 -0
  11. setiastro/saspro/gui/mixins/toolbar_mixin.py +10 -15
  12. setiastro/saspro/legacy/numba_utils.py +301 -119
  13. setiastro/saspro/numba_utils.py +998 -270
  14. setiastro/saspro/ops/settings.py +6 -6
  15. setiastro/saspro/pixelmath.py +1 -1
  16. setiastro/saspro/planetprojection.py +310 -105
  17. setiastro/saspro/resources.py +2 -0
  18. setiastro/saspro/sfcc.py +14 -8
  19. setiastro/saspro/stacking_suite.py +413 -174
  20. setiastro/saspro/subwindow.py +28 -35
  21. setiastro/saspro/translations/all_source_strings.json +2 -2
  22. setiastro/saspro/translations/ar_translations.py +3 -3
  23. setiastro/saspro/translations/de_translations.py +2 -2
  24. setiastro/saspro/translations/es_translations.py +2 -2
  25. setiastro/saspro/translations/fr_translations.py +2 -2
  26. setiastro/saspro/translations/hi_translations.py +2 -2
  27. setiastro/saspro/translations/it_translations.py +2 -2
  28. setiastro/saspro/translations/ja_translations.py +2 -2
  29. setiastro/saspro/translations/pt_translations.py +2 -2
  30. setiastro/saspro/translations/ru_translations.py +2 -2
  31. setiastro/saspro/translations/saspro_ar.ts +2 -2
  32. setiastro/saspro/translations/saspro_de.ts +4 -4
  33. setiastro/saspro/translations/saspro_es.ts +2 -2
  34. setiastro/saspro/translations/saspro_fr.ts +2 -2
  35. setiastro/saspro/translations/saspro_hi.ts +2 -2
  36. setiastro/saspro/translations/saspro_it.ts +4 -4
  37. setiastro/saspro/translations/saspro_ja.ts +2 -2
  38. setiastro/saspro/translations/saspro_pt.ts +2 -2
  39. setiastro/saspro/translations/saspro_ru.ts +2 -2
  40. setiastro/saspro/translations/saspro_sw.ts +2 -2
  41. setiastro/saspro/translations/saspro_uk.ts +2 -2
  42. setiastro/saspro/translations/saspro_zh.ts +2 -2
  43. setiastro/saspro/translations/sw_translations.py +2 -2
  44. setiastro/saspro/translations/uk_translations.py +2 -2
  45. setiastro/saspro/translations/zh_translations.py +2 -2
  46. setiastro/saspro/window_shelf.py +62 -1
  47. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.5.dist-info}/METADATA +1 -1
  48. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.5.dist-info}/RECORD +52 -50
  49. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.5.dist-info}/entry_points.txt +1 -1
  50. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.5.dist-info}/WHEEL +0 -0
  51. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.5.dist-info}/licenses/LICENSE +0 -0
  52. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.5.dist-info}/licenses/license.txt +0 -0
@@ -5,7 +5,7 @@ from numba.typed import List
5
5
  import cv2
6
6
  import math
7
7
 
8
- @njit(parallel=True, fastmath=True)
8
+ @njit(parallel=True, fastmath=True, cache=True)
9
9
  def blend_add_numba(A, B, alpha):
10
10
  H, W, C = A.shape
11
11
  out = np.empty_like(A)
@@ -19,7 +19,7 @@ def blend_add_numba(A, B, alpha):
19
19
  out[y,x,c] = v
20
20
  return out
21
21
 
22
- @njit(parallel=True, fastmath=True)
22
+ @njit(parallel=True, fastmath=True, cache=True)
23
23
  def blend_subtract_numba(A, B, alpha):
24
24
  H, W, C = A.shape
25
25
  out = np.empty_like(A)
@@ -32,7 +32,7 @@ def blend_subtract_numba(A, B, alpha):
32
32
  out[y,x,c] = v
33
33
  return out
34
34
 
35
- @njit(parallel=True, fastmath=True)
35
+ @njit(parallel=True, fastmath=True, cache=True)
36
36
  def blend_multiply_numba(A, B, alpha):
37
37
  H, W, C = A.shape
38
38
  out = np.empty_like(A)
@@ -45,7 +45,7 @@ def blend_multiply_numba(A, B, alpha):
45
45
  out[y,x,c] = v
46
46
  return out
47
47
 
48
- @njit(parallel=True, fastmath=True)
48
+ @njit(parallel=True, fastmath=True, cache=True)
49
49
  def blend_divide_numba(A, B, alpha):
50
50
  H, W, C = A.shape
51
51
  out = np.empty_like(A)
@@ -66,7 +66,7 @@ def blend_divide_numba(A, B, alpha):
66
66
  out[y,x,c] = v
67
67
  return out
68
68
 
69
- @njit(parallel=True, fastmath=True)
69
+ @njit(parallel=True, fastmath=True, cache=True)
70
70
  def blend_screen_numba(A, B, alpha):
71
71
  H, W, C = A.shape
72
72
  out = np.empty_like(A)
@@ -83,7 +83,7 @@ def blend_screen_numba(A, B, alpha):
83
83
  out[y,x,c] = v
84
84
  return out
85
85
 
86
- @njit(parallel=True, fastmath=True)
86
+ @njit(parallel=True, fastmath=True, cache=True)
87
87
  def blend_overlay_numba(A, B, alpha):
88
88
  H, W, C = A.shape
89
89
  out = np.empty_like(A)
@@ -105,7 +105,7 @@ def blend_overlay_numba(A, B, alpha):
105
105
  out[y,x,c] = v
106
106
  return out
107
107
 
108
- @njit(parallel=True, fastmath=True)
108
+ @njit(parallel=True, fastmath=True, cache=True)
109
109
  def blend_difference_numba(A, B, alpha):
110
110
  H, W, C = A.shape
111
111
  out = np.empty_like(A)
@@ -123,7 +123,7 @@ def blend_difference_numba(A, B, alpha):
123
123
  out[y,x,c] = v
124
124
  return out
125
125
 
126
- @njit(parallel=True, fastmath=True)
126
+ @njit(parallel=True, fastmath=True, cache=True)
127
127
  def rescale_image_numba(image, factor):
128
128
  """
129
129
  Custom rescale function using bilinear interpolation optimized with numba.
@@ -169,7 +169,7 @@ def rescale_image_numba(image, factor):
169
169
  image[y1, x1, c] * dx * dy)
170
170
  return output
171
171
 
172
- @njit(parallel=True, fastmath=True)
172
+ @njit(parallel=True, fastmath=True, cache=True)
173
173
  def bin2x2_numba(image):
174
174
  """
175
175
  Downsample the image by 2×2 via simple averaging (“integer binning”).
@@ -205,7 +205,7 @@ def bin2x2_numba(image):
205
205
 
206
206
  return out
207
207
 
208
- @njit(parallel=True, fastmath=True)
208
+ @njit(parallel=True, fastmath=True, cache=True)
209
209
  def flip_horizontal_numba(image):
210
210
  """
211
211
  Flips an image horizontally using Numba JIT.
@@ -228,7 +228,7 @@ def flip_horizontal_numba(image):
228
228
  return output
229
229
 
230
230
 
231
- @njit(parallel=True, fastmath=True)
231
+ @njit(parallel=True, fastmath=True, cache=True)
232
232
  def flip_vertical_numba(image):
233
233
  """
234
234
  Flips an image vertically using Numba JIT.
@@ -251,7 +251,7 @@ def flip_vertical_numba(image):
251
251
  return output
252
252
 
253
253
 
254
- @njit(parallel=True, fastmath=True)
254
+ @njit(parallel=True, fastmath=True, cache=True)
255
255
  def rotate_90_clockwise_numba(image):
256
256
  """
257
257
  Rotates the image 90 degrees clockwise.
@@ -274,7 +274,7 @@ def rotate_90_clockwise_numba(image):
274
274
  return output
275
275
 
276
276
 
277
- @njit(parallel=True, fastmath=True)
277
+ @njit(parallel=True, fastmath=True, cache=True)
278
278
  def rotate_90_counterclockwise_numba(image):
279
279
  """
280
280
  Rotates the image 90 degrees counterclockwise.
@@ -297,7 +297,7 @@ def rotate_90_counterclockwise_numba(image):
297
297
  return output
298
298
 
299
299
 
300
- @njit(parallel=True, fastmath=True)
300
+ @njit(parallel=True, fastmath=True, cache=True)
301
301
  def invert_image_numba(image):
302
302
  """
303
303
  Inverts an image (1 - pixel value) using Numba JIT.
@@ -585,7 +585,7 @@ def subtract_dark_3d(frames, dark_frame):
585
585
  return result
586
586
 
587
587
 
588
- @njit(parallel=True)
588
+ @njit(parallel=True, cache=True)
589
589
  def subtract_dark_4d(frames, dark_frame):
590
590
  """
591
591
  For color stack:
@@ -626,7 +626,7 @@ from numba import njit, prange
626
626
  # Windsorized Sigma Clipping (Weighted, Iterative)
627
627
  # -------------------------------
628
628
 
629
- @njit(parallel=True, fastmath=True)
629
+ @njit(parallel=True, fastmath=True, cache=True)
630
630
  def windsorized_sigma_clip_weighted_3d_iter(stack, weights, lower=2.5, upper=2.5, iterations=2):
631
631
  """
632
632
  Iterative Weighted Windsorized Sigma Clipping for a 3D mono stack.
@@ -685,7 +685,7 @@ def windsorized_sigma_clip_weighted_3d_iter(stack, weights, lower=2.5, upper=2.5
685
685
  return clipped, rej_mask
686
686
 
687
687
 
688
- @njit(parallel=True, fastmath=True)
688
+ @njit(parallel=True, fastmath=True, cache=True)
689
689
  def windsorized_sigma_clip_weighted_4d_iter(stack, weights, lower=2.5, upper=2.5, iterations=2):
690
690
  """
691
691
  Iterative Weighted Windsorized Sigma Clipping for a 4D color stack.
@@ -760,7 +760,7 @@ def windsorized_sigma_clip_weighted(stack, weights, lower=2.5, upper=2.5, iterat
760
760
  # Kappa-Sigma Clipping (Weighted)
761
761
  # -------------------------------
762
762
 
763
- @njit(parallel=True, fastmath=True)
763
+ @njit(parallel=True, fastmath=True, cache=True)
764
764
  def kappa_sigma_clip_weighted_3d(stack, weights, kappa=2.5, iterations=3):
765
765
  """
766
766
  Kappa-Sigma Clipping for a 3D mono stack.
@@ -824,7 +824,7 @@ def kappa_sigma_clip_weighted_3d(stack, weights, kappa=2.5, iterations=3):
824
824
  return clipped, rej_mask
825
825
 
826
826
 
827
- @njit(parallel=True, fastmath=True)
827
+ @njit(parallel=True, fastmath=True, cache=True)
828
828
  def kappa_sigma_clip_weighted_4d(stack, weights, kappa=2.5, iterations=3):
829
829
  """
830
830
  Kappa-Sigma Clipping for a 4D color stack.
@@ -904,7 +904,7 @@ def kappa_sigma_clip_weighted(stack, weights, kappa=2.5, iterations=3):
904
904
  # Trimmed Mean (Weighted)
905
905
  # -------------------------------
906
906
 
907
- @njit(parallel=True, fastmath=True)
907
+ @njit(parallel=True, fastmath=True, cache=True)
908
908
  def trimmed_mean_weighted_3d(stack, weights, trim_fraction=0.1):
909
909
  """
910
910
  Trimmed Mean for a 3D mono stack.
@@ -975,7 +975,7 @@ def trimmed_mean_weighted_3d(stack, weights, trim_fraction=0.1):
975
975
  return clipped, rej_mask
976
976
 
977
977
 
978
- @njit(parallel=True, fastmath=True)
978
+ @njit(parallel=True, fastmath=True, cache=True)
979
979
  def trimmed_mean_weighted_4d(stack, weights, trim_fraction=0.1):
980
980
  """
981
981
  Trimmed Mean for a 4D color stack.
@@ -1057,7 +1057,7 @@ def trimmed_mean_weighted(stack, weights, trim_fraction=0.1):
1057
1057
  # Extreme Studentized Deviate (ESD) Clipping (Weighted)
1058
1058
  # -------------------------------
1059
1059
 
1060
- @njit(parallel=True, fastmath=True)
1060
+ @njit(parallel=True, fastmath=True, cache=True)
1061
1061
  def esd_clip_weighted_3d(stack, weights, threshold=3.0):
1062
1062
  """
1063
1063
  ESD Clipping for a 3D mono stack.
@@ -1121,7 +1121,7 @@ def esd_clip_weighted_3d(stack, weights, threshold=3.0):
1121
1121
  return clipped, rej_mask
1122
1122
 
1123
1123
 
1124
- @njit(parallel=True, fastmath=True)
1124
+ @njit(parallel=True, fastmath=True, cache=True)
1125
1125
  def esd_clip_weighted_4d(stack, weights, threshold=3.0):
1126
1126
  """
1127
1127
  ESD Clipping for a 4D color stack.
@@ -1201,7 +1201,7 @@ def esd_clip_weighted(stack, weights, threshold=3.0):
1201
1201
  # Biweight Location (Weighted)
1202
1202
  # -------------------------------
1203
1203
 
1204
- @njit(parallel=True, fastmath=True)
1204
+ @njit(parallel=True, fastmath=True, cache=True)
1205
1205
  def biweight_location_weighted_3d(stack, weights, tuning_constant=6.0):
1206
1206
  """
1207
1207
  Biweight Location for a 3D mono stack.
@@ -1265,7 +1265,7 @@ def biweight_location_weighted_3d(stack, weights, tuning_constant=6.0):
1265
1265
  return clipped, rej_mask
1266
1266
 
1267
1267
 
1268
- @njit(parallel=True, fastmath=True)
1268
+ @njit(parallel=True, fastmath=True, cache=True)
1269
1269
  def biweight_location_weighted_4d(stack, weights, tuning_constant=6.0):
1270
1270
  """
1271
1271
  Biweight Location for a 4D color stack.
@@ -1344,7 +1344,7 @@ def biweight_location_weighted(stack, weights, tuning_constant=6.0):
1344
1344
  # Modified Z-Score Clipping (Weighted)
1345
1345
  # -------------------------------
1346
1346
 
1347
- @njit(parallel=True, fastmath=True)
1347
+ @njit(parallel=True, fastmath=True, cache=True)
1348
1348
  def modified_zscore_clip_weighted_3d(stack, weights, threshold=3.5):
1349
1349
  """
1350
1350
  Modified Z-Score Clipping for a 3D mono stack.
@@ -1407,7 +1407,7 @@ def modified_zscore_clip_weighted_3d(stack, weights, threshold=3.5):
1407
1407
  return clipped, rej_mask
1408
1408
 
1409
1409
 
1410
- @njit(parallel=True, fastmath=True)
1410
+ @njit(parallel=True, fastmath=True, cache=True)
1411
1411
  def modified_zscore_clip_weighted_4d(stack, weights, threshold=3.5):
1412
1412
  """
1413
1413
  Modified Z-Score Clipping for a 4D color stack.
@@ -1487,7 +1487,7 @@ def modified_zscore_clip_weighted(stack, weights, threshold=3.5):
1487
1487
  # Windsorized Sigma Clipping (Non-weighted)
1488
1488
  # -------------------------------
1489
1489
 
1490
- @njit(parallel=True, fastmath=True)
1490
+ @njit(parallel=True, fastmath=True, cache=True)
1491
1491
  def windsorized_sigma_clip_3d(stack, lower=2.5, upper=2.5):
1492
1492
  """
1493
1493
  Windsorized Sigma Clipping for a 3D mono stack (non-weighted).
@@ -1516,7 +1516,7 @@ def windsorized_sigma_clip_3d(stack, lower=2.5, upper=2.5):
1516
1516
  return clipped, rej_mask
1517
1517
 
1518
1518
 
1519
- @njit(parallel=True, fastmath=True)
1519
+ @njit(parallel=True, fastmath=True, cache=True)
1520
1520
  def windsorized_sigma_clip_4d(stack, lower=2.5, upper=2.5):
1521
1521
  """
1522
1522
  Windsorized Sigma Clipping for a 4D color stack (non-weighted).
@@ -1569,7 +1569,7 @@ def max_value_stack(stack, weights=None):
1569
1569
  rej_mask = np.zeros(stack.shape, dtype=bool)
1570
1570
  return clipped, rej_mask
1571
1571
 
1572
- @njit(parallel=True)
1572
+ @njit(parallel=True, cache=True)
1573
1573
  def subtract_dark_with_pedestal_3d(frames, dark_frame, pedestal):
1574
1574
  """
1575
1575
  For mono stack:
@@ -1594,7 +1594,7 @@ def subtract_dark_with_pedestal_3d(frames, dark_frame, pedestal):
1594
1594
 
1595
1595
  return result
1596
1596
 
1597
- @njit(parallel=True)
1597
+ @njit(parallel=True, cache=True)
1598
1598
  def subtract_dark_with_pedestal_4d(frames, dark_frame, pedestal):
1599
1599
  """
1600
1600
  For color stack:
@@ -1654,8 +1654,7 @@ def parallel_measure_frames(images_py):
1654
1654
  a = [x[:, :, None] if x.ndim == 2 else x for x in a]
1655
1655
  stack = np.ascontiguousarray(np.stack(a, axis=0)) # (N,H,W,C)
1656
1656
  return _parallel_measure_frames_stack(stack)
1657
-
1658
- @njit(fastmath=True)
1657
+ @njit(fastmath=True, cache=True)
1659
1658
  def fast_mad(image):
1660
1659
  """ Computes the Median Absolute Deviation (MAD) as a robust noise estimator. """
1661
1660
  flat_image = image.ravel() # ✅ Flatten the 2D array into 1D
@@ -1665,7 +1664,7 @@ def fast_mad(image):
1665
1664
 
1666
1665
 
1667
1666
 
1668
- @njit(fastmath=True)
1667
+ @njit(fastmath=True, cache=True)
1669
1668
  def compute_snr(image):
1670
1669
  """ Computes the Signal-to-Noise Ratio (SNR) using fast Numba std. """
1671
1670
  mean_signal = np.mean(image)
@@ -1675,7 +1674,7 @@ def compute_snr(image):
1675
1674
 
1676
1675
 
1677
1676
 
1678
- @njit(fastmath=True)
1677
+ @njit(fastmath=True, cache=True)
1679
1678
  def compute_noise(image):
1680
1679
  """ Estimates noise using Median Absolute Deviation (MAD). """
1681
1680
  return fast_mad(image)
@@ -1776,30 +1775,33 @@ def compute_star_count_fast_preview(preview_2d: np.ndarray) -> tuple[int, float]
1776
1775
  tiny = _downsample_for_stars(preview_2d, factor=4) # try 4–8 depending on your sensor
1777
1776
  return fast_star_count_lite(tiny, sample_stride=8, localmax_k=3, thr_sigma=4.0, max_ecc_samples=120)
1778
1777
 
1779
-
1780
-
1781
1778
  def compute_star_count(image):
1782
- """Fast star detection with robust pre-stretch for linear data."""
1783
1779
  return fast_star_count(image)
1784
1780
 
1785
1781
  def fast_star_count(
1786
1782
  image,
1787
- blur_size=None, # adaptive if None
1783
+ blur_size=None,
1788
1784
  threshold_factor=0.8,
1789
- min_area=None, # adaptive if None
1790
- max_area=None, # adaptive if None
1785
+ min_area=None,
1786
+ max_area=None,
1791
1787
  *,
1792
- gamma=0.45, # <1 brightens faint signal; 0.35–0.55 is a good range
1793
- p_lo=0.1, # robust low percentile for stretch
1794
- p_hi=99.8 # robust high percentile for stretch
1788
+ # robust stretch controls (file1)
1789
+ stretch=True,
1790
+ gamma=0.45,
1791
+ p_lo=0.1,
1792
+ p_hi=99.8,
1793
+ # morphology behavior
1794
+ morph_open="auto", # "auto" | True | False
1795
1795
  ):
1796
1796
  """
1797
- Estimate star count + avg eccentricity from a 2D float/uint8 image.
1798
- Now does robust percentile stretch + gamma in float BEFORE 8-bit/Otsu.
1797
+ Returns (star_count, avg_ecc).
1798
+
1799
+ stretch=True (default): robust for linear astro images (percentile stretch + gamma)
1800
+ stretch=False: simple min/max normalize (legacy behavior)
1799
1801
  """
1800
- # 1) Ensure 2D grayscale (stay float32)
1802
+
1803
+ # 1) grayscale float32
1801
1804
  if image.ndim == 3:
1802
- # RGB -> luma
1803
1805
  r, g, b = image[..., 0], image[..., 1], image[..., 2]
1804
1806
  img = (0.2126 * r + 0.7152 * g + 0.0722 * b).astype(np.float32, copy=False)
1805
1807
  else:
@@ -1808,48 +1810,61 @@ def fast_star_count(
1808
1810
  H, W = img.shape[:2]
1809
1811
  short_side = max(1, min(H, W))
1810
1812
 
1811
- # Adaptive params
1813
+ # 2) adaptive params (file1 style)
1812
1814
  if blur_size is None:
1813
1815
  k = max(3, int(round(short_side / 80)))
1814
1816
  blur_size = k if (k % 2 == 1) else (k + 1)
1817
+
1815
1818
  if min_area is None:
1816
1819
  min_area = 1
1820
+
1817
1821
  if max_area is None:
1818
1822
  max_area = max(100, int(0.01 * H * W))
1819
1823
 
1820
- # 2) Robust percentile stretch in float (no 8-bit yet)
1821
- # This lifts the sky background and pulls faint stars up before thresholding.
1822
- lo = float(np.percentile(img, p_lo))
1823
- hi = float(np.percentile(img, p_hi))
1824
- if not (hi > lo):
1825
- lo, hi = float(img.min()), float(img.max())
1824
+ # 3) build 8-bit working image
1825
+ if stretch:
1826
+ lo = float(np.percentile(img, p_lo))
1827
+ hi = float(np.percentile(img, p_hi))
1826
1828
  if not (hi > lo):
1827
- return 0, 0.0
1829
+ lo, hi = float(img.min()), float(img.max())
1830
+ if not (hi > lo):
1831
+ return 0, 0.0
1828
1832
 
1829
- norm = (img - lo) / max(1e-8, (hi - lo))
1830
- norm = np.clip(norm, 0.0, 1.0)
1833
+ norm = (img - lo) / max(1e-8, (hi - lo))
1834
+ norm = np.clip(norm, 0.0, 1.0)
1831
1835
 
1832
- # 3) Gamma (<1 brightens low end)
1833
- if gamma and gamma > 0:
1834
- norm = np.power(norm, gamma, dtype=np.float32)
1836
+ if gamma and gamma > 0:
1837
+ norm = np.power(norm, gamma, dtype=np.float32)
1835
1838
 
1836
- # 4) Convert to 8-bit ONLY after stretch/gamma (preserves faint structure)
1837
- image_8u = (norm * 255.0).astype(np.uint8)
1839
+ image_8u = (norm * 255.0).astype(np.uint8)
1840
+ else:
1841
+ img_min = float(img.min())
1842
+ img_max = float(img.max())
1843
+ if img_max > img_min:
1844
+ image_8u = (255.0 * (img - img_min) / (img_max - img_min)).astype(np.uint8)
1845
+ else:
1846
+ return 0, 0.0
1838
1847
 
1839
- # 5) Blur + subtract (unsharp-ish)
1848
+ # 4) blur + subtract
1840
1849
  blurred = cv2.GaussianBlur(image_8u, (blur_size, blur_size), 0)
1841
1850
  sub = cv2.absdiff(image_8u, blurred)
1842
1851
 
1843
- # 6) Otsu + threshold_factor
1852
+ # 5) Otsu + threshold_factor
1844
1853
  otsu, _ = cv2.threshold(sub, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
1845
1854
  thr = max(2, int(otsu * threshold_factor))
1846
1855
  _, mask = cv2.threshold(sub, thr, 255, cv2.THRESH_BINARY)
1847
1856
 
1848
- # 7) Morph open *only* on larger frames (tiny frames lose stars otherwise)
1849
- if short_side >= 600:
1857
+ # 6) morphology
1858
+ do_morph = False
1859
+ if morph_open == "auto":
1860
+ do_morph = short_side >= 600
1861
+ elif morph_open is True:
1862
+ do_morph = True
1863
+
1864
+ if do_morph:
1850
1865
  mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((2, 2), np.uint8))
1851
1866
 
1852
- # 8) Contours area filter → eccentricity
1867
+ # 7) contours -> ellipse ecc
1853
1868
  contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1854
1869
 
1855
1870
  star_count = 0
@@ -1861,23 +1876,22 @@ def fast_star_count(
1861
1876
  if len(c) < 5:
1862
1877
  continue
1863
1878
  (_, _), (a, b), _ = cv2.fitEllipse(c)
1864
- if b > a: a, b = b, a
1865
- if a > 0:
1866
- e = math.sqrt(max(0.0, 1.0 - (b * b) / (a * a)))
1867
- else:
1868
- e = 0.0
1879
+ if b > a:
1880
+ a, b = b, a
1881
+ e = math.sqrt(max(0.0, 1.0 - (b * b) / (a * a))) if a > 0 else 0.0
1869
1882
  ecc_values.append(e)
1870
1883
  star_count += 1
1871
1884
 
1872
- # 9) Gentle fallback if too few detections: lower threshold & smaller blur
1885
+ # 8) fallback if too few
1873
1886
  if star_count < 5:
1874
1887
  k2 = max(3, (blur_size // 2) | 1)
1875
1888
  blurred2 = cv2.GaussianBlur(image_8u, (k2, k2), 0)
1876
1889
  sub2 = cv2.absdiff(image_8u, blurred2)
1877
1890
  otsu2, _ = cv2.threshold(sub2, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
1878
- thr2 = max(2, int(otsu2 * 0.6)) # more permissive
1891
+ thr2 = max(2, int(otsu2 * 0.6))
1879
1892
  _, mask2 = cv2.threshold(sub2, thr2, 255, cv2.THRESH_BINARY)
1880
1893
  contours2, _ = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1894
+
1881
1895
  star_count = 0
1882
1896
  ecc_values = []
1883
1897
  for c in contours2:
@@ -1887,7 +1901,8 @@ def fast_star_count(
1887
1901
  if len(c) < 5:
1888
1902
  continue
1889
1903
  (_, _), (a, b), _ = cv2.fitEllipse(c)
1890
- if b > a: a, b = b, a
1904
+ if b > a:
1905
+ a, b = b, a
1891
1906
  e = math.sqrt(max(0.0, 1.0 - (b * b) / (a * a))) if a > 0 else 0.0
1892
1907
  ecc_values.append(e)
1893
1908
  star_count += 1
@@ -1895,7 +1910,8 @@ def fast_star_count(
1895
1910
  avg_ecc = float(np.mean(ecc_values)) if star_count > 0 else 0.0
1896
1911
  return star_count, avg_ecc
1897
1912
 
1898
- @njit(parallel=True, fastmath=True)
1913
+
1914
+ @njit(parallel=True, fastmath=True, cache=True)
1899
1915
  def normalize_images_3d(stack, ref_median):
1900
1916
  """
1901
1917
  Normalizes each frame in a 3D mono stack (F,H,W)
@@ -1918,7 +1934,7 @@ def normalize_images_3d(stack, ref_median):
1918
1934
 
1919
1935
  return normalized_stack
1920
1936
 
1921
- @njit(parallel=True, fastmath=True)
1937
+ @njit(parallel=True, fastmath=True, cache=True)
1922
1938
  def normalize_images_4d(stack, ref_median):
1923
1939
  """
1924
1940
  Normalizes each frame in a 4D color stack (F,H,W,C)
@@ -1961,7 +1977,7 @@ def normalize_images(stack, ref_median):
1961
1977
  else:
1962
1978
  raise ValueError(f"normalize_images: stack must be 3D or 4D, got shape {stack.shape}")
1963
1979
 
1964
- @njit(parallel=True, fastmath=True)
1980
+ @njit(parallel=True, fastmath=True, cache=True)
1965
1981
  def _bilinear_interpolate_numba(out):
1966
1982
  H, W, C = out.shape
1967
1983
  for c in range(C):
@@ -2084,7 +2100,7 @@ def _edge_aware_interpolate_numba(out):
2084
2100
  # These njit functions assume the raw image is arranged in a Bayer pattern
2085
2101
  # and that we want a full (H,W,3) output.
2086
2102
 
2087
- @njit(parallel=True, fastmath=True)
2103
+ @njit(parallel=True, fastmath=True, cache=True)
2088
2104
  def debayer_RGGB_fullres_fast(image, interpolate=True):
2089
2105
  H, W = image.shape
2090
2106
  out = np.zeros((H, W, 3), dtype=image.dtype)
@@ -2100,7 +2116,7 @@ def debayer_RGGB_fullres_fast(image, interpolate=True):
2100
2116
  _edge_aware_interpolate_numba(out)
2101
2117
  return out
2102
2118
 
2103
- @njit(parallel=True, fastmath=True)
2119
+ @njit(parallel=True, fastmath=True, cache=True)
2104
2120
  def debayer_BGGR_fullres_fast(image, interpolate=True):
2105
2121
  H, W = image.shape
2106
2122
  out = np.zeros((H, W, 3), dtype=image.dtype)
@@ -2116,7 +2132,7 @@ def debayer_BGGR_fullres_fast(image, interpolate=True):
2116
2132
  _edge_aware_interpolate_numba(out)
2117
2133
  return out
2118
2134
 
2119
- @njit(parallel=True, fastmath=True)
2135
+ @njit(parallel=True, fastmath=True, cache=True)
2120
2136
  def debayer_GRBG_fullres_fast(image, interpolate=True):
2121
2137
  H, W = image.shape
2122
2138
  out = np.zeros((H, W, 3), dtype=image.dtype)
@@ -2132,7 +2148,7 @@ def debayer_GRBG_fullres_fast(image, interpolate=True):
2132
2148
  _edge_aware_interpolate_numba(out)
2133
2149
  return out
2134
2150
 
2135
- @njit(parallel=True, fastmath=True)
2151
+ @njit(parallel=True, fastmath=True, cache=True)
2136
2152
  def debayer_GBRG_fullres_fast(image, interpolate=True):
2137
2153
  H, W = image.shape
2138
2154
  out = np.zeros((H, W, 3), dtype=image.dtype)
@@ -2148,43 +2164,39 @@ def debayer_GBRG_fullres_fast(image, interpolate=True):
2148
2164
  _edge_aware_interpolate_numba(out)
2149
2165
  return out
2150
2166
 
2151
- # === Python-Level Dispatch Function ===
2152
- # Since Numba cannot easily compare strings in nopython mode,
2153
- # we do the if/elif check here in Python and then call the appropriate njit function.
2154
2167
 
2155
2168
  def debayer_fits_fast(image_data, bayer_pattern, cfa_drizzle=False, method="edge"):
2156
2169
  bp = (bayer_pattern or "").upper()
2157
2170
  interpolate = not cfa_drizzle
2158
2171
 
2159
- # 1) lay down the known samples per CFA
2160
- if bp == 'RGGB':
2172
+ # 1) lay down samples; skip interpolate here so we can select method later
2173
+ if bp == "RGGB":
2161
2174
  out = debayer_RGGB_fullres_fast(image_data, interpolate=False)
2162
- elif bp == 'BGGR':
2175
+ elif bp == "BGGR":
2163
2176
  out = debayer_BGGR_fullres_fast(image_data, interpolate=False)
2164
- elif bp == 'GRBG':
2177
+ elif bp == "GRBG":
2165
2178
  out = debayer_GRBG_fullres_fast(image_data, interpolate=False)
2166
- elif bp == 'GBRG':
2179
+ elif bp == "GBRG":
2167
2180
  out = debayer_GBRG_fullres_fast(image_data, interpolate=False)
2168
2181
  else:
2169
2182
  raise ValueError(f"Unsupported Bayer pattern: {bayer_pattern}")
2170
2183
 
2171
- # 2) perform interpolation unless doing CFA-drizzle
2184
+ # 2) interpolate unless CFA drizzle
2172
2185
  if interpolate:
2173
2186
  m = (method or "edge").lower()
2174
- if m == "edge":
2175
- _edge_aware_interpolate_numba(out)
2176
- elif m == "bilinear":
2187
+ if m == "bilinear":
2177
2188
  _bilinear_interpolate_numba(out)
2178
2189
  else:
2179
- # fallback to edge-aware if unknown
2180
2190
  _edge_aware_interpolate_numba(out)
2181
2191
 
2182
2192
  return out
2183
2193
 
2194
+
2184
2195
  def debayer_raw_fast(raw_image_data, bayer_pattern="RGGB", cfa_drizzle=False, method="edge"):
2185
2196
  return debayer_fits_fast(raw_image_data, bayer_pattern, cfa_drizzle=cfa_drizzle, method=method)
2186
2197
 
2187
- @njit(parallel=True, fastmath=True)
2198
+
2199
+ @njit(parallel=True, fastmath=True, cache=True)
2188
2200
  def applyPixelMath_numba(image_array, amount):
2189
2201
  factor = 3 ** amount
2190
2202
  denom_factor = 3 ** amount - 1
@@ -2199,7 +2211,7 @@ def applyPixelMath_numba(image_array, amount):
2199
2211
 
2200
2212
  return output
2201
2213
 
2202
- @njit(parallel=True, fastmath=True)
2214
+ @njit(parallel=True, fastmath=True, cache=True)
2203
2215
  def adjust_saturation_numba(image_array, saturation_factor):
2204
2216
  height, width, channels = image_array.shape
2205
2217
  output = np.empty_like(image_array, dtype=np.float32)
@@ -2261,7 +2273,7 @@ def adjust_saturation_numba(image_array, saturation_factor):
2261
2273
 
2262
2274
 
2263
2275
 
2264
- @njit(parallel=True, fastmath=True)
2276
+ @njit(parallel=True, fastmath=True, cache=True)
2265
2277
  def applySCNR_numba(image_array):
2266
2278
  height, width, _ = image_array.shape
2267
2279
  output = np.empty_like(image_array, dtype=np.float32)
@@ -2298,7 +2310,7 @@ _M_xyz2rgb = np.array([
2298
2310
 
2299
2311
 
2300
2312
 
2301
- @njit(parallel=True, fastmath=True)
2313
+ @njit(parallel=True, fastmath=True, cache=True)
2302
2314
  def apply_lut_gray(image_in, lut):
2303
2315
  """
2304
2316
  Numba-accelerated application of 'lut' to a single-channel image_in in [0..1].
@@ -2318,7 +2330,7 @@ def apply_lut_gray(image_in, lut):
2318
2330
 
2319
2331
  return out
2320
2332
 
2321
- @njit(parallel=True, fastmath=True)
2333
+ @njit(parallel=True, fastmath=True, cache=True)
2322
2334
  def apply_lut_color(image_in, lut):
2323
2335
  """
2324
2336
  Numba-accelerated application of 'lut' to a 3-channel image_in in [0..1].
@@ -2339,7 +2351,7 @@ def apply_lut_color(image_in, lut):
2339
2351
 
2340
2352
  return out
2341
2353
 
2342
- @njit(parallel=True, fastmath=True)
2354
+ @njit(parallel=True, fastmath=True, cache=True)
2343
2355
  def apply_lut_mono_inplace(array2d, lut):
2344
2356
  """
2345
2357
  In-place LUT application on a single-channel 2D array in [0..1].
@@ -2357,7 +2369,7 @@ def apply_lut_mono_inplace(array2d, lut):
2357
2369
  idx = size_lut
2358
2370
  array2d[y, x] = lut[idx]
2359
2371
 
2360
- @njit(parallel=True, fastmath=True)
2372
+ @njit(parallel=True, fastmath=True, cache=True)
2361
2373
  def apply_lut_color_inplace(array3d, lut):
2362
2374
  """
2363
2375
  In-place LUT application on a 3-channel array in [0..1].
@@ -2376,7 +2388,7 @@ def apply_lut_color_inplace(array3d, lut):
2376
2388
  idx = size_lut
2377
2389
  array3d[y, x, c] = lut[idx]
2378
2390
 
2379
- @njit(parallel=True, fastmath=True)
2391
+ @njit(parallel=True, fastmath=True, cache=True)
2380
2392
  def rgb_to_xyz_numba(rgb):
2381
2393
  """
2382
2394
  Convert an image from sRGB to XYZ (D65).
@@ -2399,7 +2411,7 @@ def rgb_to_xyz_numba(rgb):
2399
2411
  out[y, x, 2] = Z
2400
2412
  return out
2401
2413
 
2402
- @njit(parallel=True, fastmath=True)
2414
+ @njit(parallel=True, fastmath=True, cache=True)
2403
2415
  def xyz_to_rgb_numba(xyz):
2404
2416
  """
2405
2417
  Convert an image from XYZ (D65) to sRGB.
@@ -2429,7 +2441,7 @@ def xyz_to_rgb_numba(xyz):
2429
2441
  out[y, x, 2] = b
2430
2442
  return out
2431
2443
 
2432
- @njit
2444
+ @njit(cache=True)
2433
2445
  def f_lab_numba(t):
2434
2446
  delta = 6/29
2435
2447
  out = np.empty_like(t, dtype=np.float32)
@@ -2441,7 +2453,7 @@ def f_lab_numba(t):
2441
2453
  out.flat[i] = val/(3*delta*delta) + (4/29)
2442
2454
  return out
2443
2455
 
2444
- @njit(parallel=True, fastmath=True)
2456
+ @njit(parallel=True, fastmath=True, cache=True)
2445
2457
  def xyz_to_lab_numba(xyz):
2446
2458
  """
2447
2459
  xyz => shape(H,W,3), in D65.
@@ -2465,7 +2477,7 @@ def xyz_to_lab_numba(xyz):
2465
2477
  out[y, x, 2] = b
2466
2478
  return out
2467
2479
 
2468
- @njit(parallel=True, fastmath=True)
2480
+ @njit(parallel=True, fastmath=True, cache=True)
2469
2481
  def lab_to_xyz_numba(lab):
2470
2482
  """
2471
2483
  lab => shape(H,W,3): L in [0..100], a,b in ~[-128..127].
@@ -2504,7 +2516,7 @@ def lab_to_xyz_numba(lab):
2504
2516
  out[y, x, 2] = Z
2505
2517
  return out
2506
2518
 
2507
- @njit(parallel=True, fastmath=True)
2519
+ @njit(parallel=True, fastmath=True, cache=True)
2508
2520
  def rgb_to_hsv_numba(rgb):
2509
2521
  H, W, _ = rgb.shape
2510
2522
  out = np.empty((H,W,3), dtype=np.float32)
@@ -2535,7 +2547,7 @@ def rgb_to_hsv_numba(rgb):
2535
2547
  out[y,x,2] = v
2536
2548
  return out
2537
2549
 
2538
- @njit(parallel=True, fastmath=True)
2550
+ @njit(parallel=True, fastmath=True, cache=True)
2539
2551
  def hsv_to_rgb_numba(hsv):
2540
2552
  H, W, _ = hsv.shape
2541
2553
  out = np.empty((H,W,3), dtype=np.float32)
@@ -2568,7 +2580,7 @@ def hsv_to_rgb_numba(hsv):
2568
2580
  out[y,x,2] = (b + m)
2569
2581
  return out
2570
2582
 
2571
- @njit(parallel=True, fastmath=True)
2583
+ @njit(parallel=True, fastmath=True, cache=True)
2572
2584
  def _cosmetic_correction_core(src, dst, H, W, C,
2573
2585
  hot_sigma, cold_sigma,
2574
2586
  star_mean_ratio, # e.g. 0.18..0.30
@@ -2767,6 +2779,102 @@ def evaluate_polynomial(H: int, W: int, coeffs: np.ndarray, degree: int) -> np.n
2767
2779
  A_full = build_poly_terms(xx.ravel(), yy.ravel(), degree)
2768
2780
  return (A_full @ coeffs).reshape(H, W)
2769
2781
 
2782
+
2783
+ @njit(parallel=True, fastmath=True, cache=True)
2784
+ def numba_mono_final_formula(rescaled, median_rescaled, target_median):
2785
+ """
2786
+ Applies the final formula *after* we already have the rescaled values.
2787
+
2788
+ rescaled[y,x] = (original[y,x] - black_point) / (1 - black_point)
2789
+ median_rescaled = median(rescaled)
2790
+
2791
+ out_val = ((median_rescaled - 1) * target_median * r) /
2792
+ ( median_rescaled*(target_median + r -1) - target_median*r )
2793
+ """
2794
+ H, W = rescaled.shape
2795
+ out = np.empty_like(rescaled)
2796
+
2797
+ for y in prange(H):
2798
+ for x in range(W):
2799
+ r = rescaled[y, x]
2800
+ numer = (median_rescaled - 1.0) * target_median * r
2801
+ denom = median_rescaled * (target_median + r - 1.0) - target_median * r
2802
+ if np.abs(denom) < 1e-12:
2803
+ denom = 1e-12
2804
+ out[y, x] = numer / denom
2805
+
2806
+ return out
2807
+
2808
+ @njit(parallel=True, fastmath=True)
2809
+ def drizzle_deposit_numba_naive(image_data, affine_2x3, drizzle_buffer, coverage_buffer, scale, weight):
2810
+ """
2811
+ Naive drizzle deposit (point-to-point-ish) for Mono images.
2812
+ Maps input (x,y) -> output (u,v) via affine, deposits 'weight' at nearest integer pixel.
2813
+
2814
+ image_data: (H, W)
2815
+ affine_2x3: (2, 3) matrix mapping source->canvas
2816
+ drizzle_buffer: (Ho, Wo)
2817
+ coverage_buffer: (Ho, Wo)
2818
+ """
2819
+ H, W = image_data.shape
2820
+ Ho, Wo = drizzle_buffer.shape
2821
+
2822
+ # We iterate over source pixels
2823
+ for y in prange(H):
2824
+ for x in range(W):
2825
+ val = image_data[y, x]
2826
+ if val == 0:
2827
+ continue
2828
+
2829
+ # Project center of pixel (x, y)
2830
+ # u = a*x + b*y + tx
2831
+ # v = c*x + d*y + ty
2832
+
2833
+ u = affine_2x3[0, 0] * x + affine_2x3[0, 1] * y + affine_2x3[0, 2]
2834
+ v = affine_2x3[1, 0] * x + affine_2x3[1, 1] * y + affine_2x3[1, 2]
2835
+
2836
+ # Nearest neighbor deposit
2837
+ ui = int(round(u))
2838
+ vi = int(round(v))
2839
+
2840
+ if 0 <= ui < Wo and 0 <= vi < Ho:
2841
+ # Accumulate
2842
+ drizzle_buffer[vi, ui] += val * weight
2843
+ coverage_buffer[vi, ui] += weight
2844
+
2845
+ return drizzle_buffer, coverage_buffer
2846
+
2847
+
2848
+ @njit(parallel=True, fastmath=True)
2849
+ def drizzle_deposit_color_naive(image_data, affine_2x3, drizzle_buffer, coverage_buffer, scale, drop_shrink, weight):
2850
+ """
2851
+ Naive drizzle deposit for Color images (H,W,C).
2852
+ image_data: (H, W, C)
2853
+ affine_2x3: (2, 3)
2854
+ drizzle_buffer: (Ho, Wo, C)
2855
+ coverage_buffer: (Ho, Wo, C)
2856
+ """
2857
+ H, W, C = image_data.shape
2858
+ Ho, Wo, _ = drizzle_buffer.shape
2859
+
2860
+ for y in prange(H):
2861
+ for x in range(W):
2862
+ # Check if pixel has any data (simple check: if sum > 0 or checks per channel)
2863
+ # usually we just project.
2864
+
2865
+ u = affine_2x3[0, 0] * x + affine_2x3[0, 1] * y + affine_2x3[0, 2]
2866
+ v = affine_2x3[1, 0] * x + affine_2x3[1, 1] * y + affine_2x3[1, 2]
2867
+
2868
+ ui = int(round(u))
2869
+ vi = int(round(v))
2870
+
2871
+ if 0 <= ui < Wo and 0 <= vi < Ho:
2872
+ for c in range(C):
2873
+ val = image_data[y, x, c]
2874
+ drizzle_buffer[vi, ui, c] += val * weight
2875
+ coverage_buffer[vi, ui, c] += weight
2876
+
2877
+ return drizzle_buffer, coverage_buffer
2770
2878
  @njit(parallel=True, fastmath=True)
2771
2879
  def numba_mono_from_img(img, bp, denom, median_rescaled, target_median):
2772
2880
  H, W = img.shape
@@ -2960,7 +3068,7 @@ def generate_sample_points(image: np.ndarray, num_points: int = 100) -> np.ndarr
2960
3068
 
2961
3069
  return np.array(points, dtype=np.int32) # Return all collected points
2962
3070
 
2963
- @njit(parallel=True, fastmath=True)
3071
+ @njit(parallel=True, fastmath=True, cache=True)
2964
3072
  def numba_unstretch(image: np.ndarray, stretch_original_medians: np.ndarray, stretch_original_mins: np.ndarray) -> np.ndarray:
2965
3073
  """
2966
3074
  Numba-optimized function to undo the unlinked stretch.
@@ -2990,7 +3098,7 @@ def numba_unstretch(image: np.ndarray, stretch_original_medians: np.ndarray, str
2990
3098
  return np.clip(out, 0, 1) # Clip to valid range
2991
3099
 
2992
3100
 
2993
- @njit(fastmath=True)
3101
+ @njit(fastmath=True, cache=True)
2994
3102
  def drizzle_deposit_numba_naive(
2995
3103
  img_data, # shape (H, W), mono
2996
3104
  transform, # shape (2, 3), e.g. [[a,b,tx],[c,d,ty]]
@@ -3047,7 +3155,7 @@ def drizzle_deposit_numba_naive(
3047
3155
  return drizzle_buffer, coverage_buffer
3048
3156
 
3049
3157
 
3050
- @njit(fastmath=True)
3158
+ @njit(fastmath=True, cache=True)
3051
3159
  def drizzle_deposit_numba_footprint(
3052
3160
  img_data, # shape (H, W), mono
3053
3161
  transform, # shape (2, 3)
@@ -3384,7 +3492,7 @@ def finalize_drizzle_2d(drizzle_buffer, coverage_buffer, final_out):
3384
3492
  final_out[y, x] = drizzle_buffer[y, x] / cov
3385
3493
  return final_out
3386
3494
 
3387
- @njit(fastmath=True)
3495
+ @njit(fastmath=True, cache=True)
3388
3496
  def drizzle_deposit_color_naive(
3389
3497
  img_data, # shape (H,W,C)
3390
3498
  transform, # shape (2,3)
@@ -3437,7 +3545,7 @@ def drizzle_deposit_color_naive(
3437
3545
  coverage_buffer[Yo, Xo, cidx] += frame_weight
3438
3546
 
3439
3547
  return drizzle_buffer, coverage_buffer
3440
- @njit(fastmath=True)
3548
+ @njit(fastmath=True, cache=True)
3441
3549
  def drizzle_deposit_color_footprint(
3442
3550
  img_data, # shape (H,W,C)
3443
3551
  transform, # shape (2,3)
@@ -3521,7 +3629,7 @@ def drizzle_deposit_color_footprint(
3521
3629
  return drizzle_buffer, coverage_buffer
3522
3630
 
3523
3631
 
3524
- @njit
3632
+ @njit(cache=True)
3525
3633
  def finalize_drizzle_3d(drizzle_buffer, coverage_buffer, final_out):
3526
3634
  """
3527
3635
  final_out[y,x,c] = drizzle_buffer[y,x,c] / coverage_buffer[y,x,c]
@@ -3540,7 +3648,7 @@ def finalize_drizzle_3d(drizzle_buffer, coverage_buffer, final_out):
3540
3648
 
3541
3649
 
3542
3650
 
3543
- @njit
3651
+ @njit(cache=True)
3544
3652
  def piecewise_linear(val, xvals, yvals):
3545
3653
  """
3546
3654
  Performs piecewise linear interpolation:
@@ -3561,7 +3669,7 @@ def piecewise_linear(val, xvals, yvals):
3561
3669
  return yvals[i] + ratio * dy
3562
3670
  return yvals[-1]
3563
3671
 
3564
- @njit(parallel=True, fastmath=True)
3672
+ @njit(parallel=True, fastmath=True, cache=True)
3565
3673
  def apply_curves_numba(image, xvals, yvals):
3566
3674
  """
3567
3675
  Numba-accelerated routine to apply piecewise linear interpolation
@@ -3694,3 +3802,77 @@ def _drizzle_kernel_weights(kernel_code: int, Xo: float, Yo: float,
3694
3802
  return sum_w, cnt
3695
3803
 
3696
3804
 
3805
+
3806
+ @njit(fastmath=True, cache=True)
3807
+ def gradient_descent_to_dim_spot_numba(gray_small, start_x, start_y, patch_size):
3808
+ """
3809
+ Numba implementation of _gradient_descent_to_dim_spot.
3810
+ Walks to the local minimum (median-of-patch) around (start_x, start_y).
3811
+ gray_small: 2D float32 array
3812
+ """
3813
+ H, W = gray_small.shape
3814
+ half = patch_size // 2
3815
+
3816
+ cx = int(min(max(start_x, 0), W - 1))
3817
+ cy = int(min(max(start_y, 0), H - 1))
3818
+
3819
+ # Helper to compute patch median manually or efficiently
3820
+ # Numba supports np.median on arrays, but slicing inside a loop can be costly.
3821
+ # However, for small patches (e.g. 15x15), it should be okay.
3822
+
3823
+ for _ in range(60):
3824
+ # Current value
3825
+ x0 = max(0, cx - half)
3826
+ x1 = min(W, cx + half + 1)
3827
+ y0 = max(0, cy - half)
3828
+ y1 = min(H, cy + half + 1)
3829
+ sub = gray_small[y0:y1, x0:x1].flatten()
3830
+ if sub.size == 0:
3831
+ cur_val = 1e9 # Should not happen
3832
+ else:
3833
+ cur_val = np.median(sub)
3834
+
3835
+ best_x, best_y = cx, cy
3836
+ best_val = cur_val
3837
+
3838
+ # 3x3 search
3839
+ changed = False
3840
+
3841
+ # Unroll for strict 3x3 neighborhood
3842
+ for dy in range(-1, 2):
3843
+ for dx in range(-1, 2):
3844
+ if dx == 0 and dy == 0:
3845
+ continue
3846
+
3847
+ nx = cx + dx
3848
+ ny = cy + dy
3849
+
3850
+ if nx < 0 or ny < 0 or nx >= W or ny >= H:
3851
+ continue
3852
+
3853
+ # Compute median for neighbor
3854
+ nx0 = max(0, nx - half)
3855
+ nx1 = min(W, nx + half + 1)
3856
+ ny0 = max(0, ny - half)
3857
+ ny1 = min(H, ny + half + 1)
3858
+
3859
+ # In Numba, median on a slice creates a copy.
3860
+ # For small patches this is acceptable given the huge speedup vs Python interpreter overhead.
3861
+ n_sub = gray_small[ny0:ny1, nx0:nx1].flatten()
3862
+ if n_sub.size == 0:
3863
+ val = 1e9
3864
+ else:
3865
+ val = np.median(n_sub)
3866
+
3867
+ if val < best_val:
3868
+ best_val = val
3869
+ best_x = nx
3870
+ best_y = ny
3871
+ changed = True
3872
+
3873
+ if not changed:
3874
+ break
3875
+
3876
+ cx, cy = best_x, best_y
3877
+
3878
+ return cx, cy