setiastrosuitepro 1.7.3__py3-none-any.whl → 1.7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of setiastrosuitepro might be problematic. Click here for more details.

Files changed (47) hide show
  1. setiastro/saspro/__init__.py +15 -4
  2. setiastro/saspro/__main__.py +23 -5
  3. setiastro/saspro/_generated/build_info.py +2 -2
  4. setiastro/saspro/abe.py +4 -4
  5. setiastro/saspro/autostretch.py +29 -18
  6. setiastro/saspro/gui/main_window.py +5 -5
  7. setiastro/saspro/gui/mixins/toolbar_mixin.py +2 -2
  8. setiastro/saspro/legacy/numba_utils.py +301 -119
  9. setiastro/saspro/numba_utils.py +998 -270
  10. setiastro/saspro/ops/settings.py +6 -6
  11. setiastro/saspro/pixelmath.py +1 -1
  12. setiastro/saspro/planetprojection.py +310 -105
  13. setiastro/saspro/sfcc.py +14 -8
  14. setiastro/saspro/stacking_suite.py +292 -111
  15. setiastro/saspro/subwindow.py +28 -35
  16. setiastro/saspro/translations/all_source_strings.json +2 -2
  17. setiastro/saspro/translations/ar_translations.py +3 -3
  18. setiastro/saspro/translations/de_translations.py +2 -2
  19. setiastro/saspro/translations/es_translations.py +2 -2
  20. setiastro/saspro/translations/fr_translations.py +2 -2
  21. setiastro/saspro/translations/hi_translations.py +2 -2
  22. setiastro/saspro/translations/it_translations.py +2 -2
  23. setiastro/saspro/translations/ja_translations.py +2 -2
  24. setiastro/saspro/translations/pt_translations.py +2 -2
  25. setiastro/saspro/translations/ru_translations.py +2 -2
  26. setiastro/saspro/translations/saspro_ar.ts +2 -2
  27. setiastro/saspro/translations/saspro_de.ts +4 -4
  28. setiastro/saspro/translations/saspro_es.ts +2 -2
  29. setiastro/saspro/translations/saspro_fr.ts +2 -2
  30. setiastro/saspro/translations/saspro_hi.ts +2 -2
  31. setiastro/saspro/translations/saspro_it.ts +4 -4
  32. setiastro/saspro/translations/saspro_ja.ts +2 -2
  33. setiastro/saspro/translations/saspro_pt.ts +2 -2
  34. setiastro/saspro/translations/saspro_ru.ts +2 -2
  35. setiastro/saspro/translations/saspro_sw.ts +2 -2
  36. setiastro/saspro/translations/saspro_uk.ts +2 -2
  37. setiastro/saspro/translations/saspro_zh.ts +2 -2
  38. setiastro/saspro/translations/sw_translations.py +2 -2
  39. setiastro/saspro/translations/uk_translations.py +2 -2
  40. setiastro/saspro/translations/zh_translations.py +2 -2
  41. setiastro/saspro/window_shelf.py +62 -1
  42. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.4.dist-info}/METADATA +1 -1
  43. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.4.dist-info}/RECORD +47 -47
  44. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.4.dist-info}/entry_points.txt +1 -1
  45. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.4.dist-info}/WHEEL +0 -0
  46. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.4.dist-info}/licenses/LICENSE +0 -0
  47. {setiastrosuitepro-1.7.3.dist-info → setiastrosuitepro-1.7.4.dist-info}/licenses/license.txt +0 -0
@@ -1,5 +1,6 @@
1
1
  import numpy as np
2
2
  from numba import njit, prange
3
+ from numba.typed import List
3
4
  import cv2
4
5
  import math
5
6
 
@@ -317,7 +318,255 @@ def invert_image_numba(image):
317
318
  output[y, x, c] = 1.0 - image[y, x, c]
318
319
  return output
319
320
 
320
- @njit(parallel=True, cache=True)
321
+ @njit(parallel=True, fastmath=True)
322
+ def rotate_180_numba(image):
323
+ """
324
+ Rotates the image 180 degrees.
325
+ Works with both mono (2D) and color (3D) images.
326
+ """
327
+ if image.ndim == 2:
328
+ height, width = image.shape
329
+ output = np.empty((height, width), dtype=image.dtype)
330
+ for y in prange(height):
331
+ for x in prange(width):
332
+ output[y, x] = image[height - 1 - y, width - 1 - x]
333
+ return output
334
+ else:
335
+ height, width, channels = image.shape
336
+ output = np.empty((height, width, channels), dtype=image.dtype)
337
+ for y in prange(height):
338
+ for x in prange(width):
339
+ for c in range(channels):
340
+ output[y, x, c] = image[height - 1 - y, width - 1 - x, c]
341
+ return output
342
+
343
+ def normalize_flat_cfa_inplace(flat2d: np.ndarray, pattern: str, *, combine_greens: bool = True) -> np.ndarray:
344
+ """
345
+ Normalize a Bayer/mosaic flat so each CFA plane has median 1.0.
346
+ Operates in-place on flat2d and returns it.
347
+
348
+ pattern: 'RGGB','BGGR','GRBG','GBRG'
349
+ combine_greens: if True, use one median for both greens (reduces checkerboard risk)
350
+ """
351
+ pat = (pattern or "RGGB").strip().upper()
352
+ if pat not in ("RGGB", "BGGR", "GRBG", "GBRG"):
353
+ pat = "RGGB"
354
+
355
+ # map (row_parity, col_parity) -> plane key
356
+ # row0: even rows, row1: odd rows; col0: even cols, col1: odd cols
357
+ if pat == "RGGB":
358
+ m = {(0,0):"R", (0,1):"G1", (1,0):"G2", (1,1):"B"}
359
+ elif pat == "BGGR":
360
+ m = {(0,0):"B", (0,1):"G1", (1,0):"G2", (1,1):"R"}
361
+ elif pat == "GRBG":
362
+ m = {(0,0):"G1", (0,1):"R", (1,0):"B", (1,1):"G2"}
363
+ else: # "GBRG"
364
+ m = {(0,0):"G1", (0,1):"B", (1,0):"R", (1,1):"G2"}
365
+
366
+ # build slice views
367
+ planes = {
368
+ m[(0,0)]: flat2d[0::2, 0::2],
369
+ m[(0,1)]: flat2d[0::2, 1::2],
370
+ m[(1,0)]: flat2d[1::2, 0::2],
371
+ m[(1,1)]: flat2d[1::2, 1::2],
372
+ }
373
+
374
+ def safe_median(a: np.ndarray) -> float:
375
+ v = a[np.isfinite(a) & (a > 0)]
376
+ if v.size == 0:
377
+ return 1.0
378
+ d = float(np.median(v))
379
+ return d if np.isfinite(d) and d > 0 else 1.0
380
+
381
+ # greens
382
+ if combine_greens and ("G1" in planes) and ("G2" in planes):
383
+ g = np.concatenate([
384
+ planes["G1"][np.isfinite(planes["G1"]) & (planes["G1"] > 0)].ravel(),
385
+ planes["G2"][np.isfinite(planes["G2"]) & (planes["G2"] > 0)].ravel(),
386
+ ])
387
+ denom_g = float(np.median(g)) if g.size else 1.0
388
+ if not np.isfinite(denom_g) or denom_g <= 0:
389
+ denom_g = 1.0
390
+ planes["G1"][:] = planes["G1"] / denom_g
391
+ planes["G2"][:] = planes["G2"] / denom_g
392
+ else:
393
+ for k in ("G1","G2"):
394
+ if k in planes:
395
+ d = safe_median(planes[k])
396
+ planes[k][:] = planes[k] / d
397
+
398
+ # R / B
399
+ for k in ("R","B"):
400
+ if k in planes:
401
+ d = safe_median(planes[k])
402
+ planes[k][:] = planes[k] / d
403
+
404
+ # final safety
405
+ np.nan_to_num(flat2d, copy=False, nan=1.0, posinf=1.0, neginf=1.0)
406
+ flat2d[flat2d == 0] = 1.0
407
+ return flat2d
408
+
409
+
410
+
411
+ @njit(parallel=True, fastmath=True)
412
+ def _flat_div_2d(img, flat):
413
+ h, w = img.shape
414
+ for y in prange(h):
415
+ for x in range(w):
416
+ f = flat[y, x]
417
+ if (not np.isfinite(f)) or f <= 0.0:
418
+ f = 1.0
419
+ img[y, x] = img[y, x] / f
420
+ return img
421
+
422
+ @njit(parallel=True, fastmath=True)
423
+ def _flat_div_hwc(img, flat):
424
+ h, w, c = img.shape
425
+ flat_is_2d = (flat.ndim == 2)
426
+ for y in prange(h):
427
+ for x in range(w):
428
+ if flat_is_2d:
429
+ f0 = flat[y, x]
430
+ if (not np.isfinite(f0)) or f0 <= 0.0:
431
+ f0 = 1.0
432
+ for k in range(c):
433
+ img[y, x, k] = img[y, x, k] / f0
434
+ else:
435
+ for k in range(c):
436
+ f = flat[y, x, k]
437
+ if (not np.isfinite(f)) or f <= 0.0:
438
+ f = 1.0
439
+ img[y, x, k] = img[y, x, k] / f
440
+ return img
441
+
442
+ @njit(parallel=True, fastmath=True)
443
+ def _flat_div_chw(img, flat):
444
+ c, h, w = img.shape
445
+ flat_is_2d = (flat.ndim == 2)
446
+ for y in prange(h):
447
+ for x in range(w):
448
+ if flat_is_2d:
449
+ f0 = flat[y, x]
450
+ if (not np.isfinite(f0)) or f0 <= 0.0:
451
+ f0 = 1.0
452
+ for k in range(c):
453
+ img[k, y, x] = img[k, y, x] / f0
454
+ else:
455
+ for k in range(c):
456
+ f = flat[k, y, x]
457
+ if (not np.isfinite(f)) or f <= 0.0:
458
+ f = 1.0
459
+ img[k, y, x] = img[k, y, x] / f
460
+ return img
461
+
462
+ def apply_flat_division_numba(image, master_flat, master_bias=None):
463
+ """
464
+ Supports:
465
+ - 2D mono/bayer: (H,W)
466
+ - Color HWC: (H,W,3)
467
+ - Color CHW: (3,H,W)
468
+
469
+ NOTE: master_bias arg kept for API compatibility; do bias/dark subtraction outside.
470
+ """
471
+ if image.ndim == 2:
472
+ return _flat_div_2d(image, master_flat)
473
+
474
+ if image.ndim == 3:
475
+ # CHW common in your pipeline
476
+ if image.shape[0] == 3 and image.shape[-1] != 3:
477
+ return _flat_div_chw(image, master_flat)
478
+ # HWC
479
+ if image.shape[-1] == 3:
480
+ return _flat_div_hwc(image, master_flat)
481
+
482
+ # fallback: treat as HWC
483
+ return _flat_div_hwc(image, master_flat)
484
+
485
+ raise ValueError(f"apply_flat_division_numba: expected 2D or 3D, got shape {image.shape}")
486
+
487
+ def _bayerpat_to_id(pat: str) -> int:
488
+ pat = (pat or "RGGB").strip().upper()
489
+ if pat == "RGGB": return 0
490
+ if pat == "BGGR": return 1
491
+ if pat == "GRBG": return 2
492
+ if pat == "GBRG": return 3
493
+ return 0
494
+
495
+ def _bayer_plane_medians(flat2d: np.ndarray, pat: str) -> np.ndarray:
496
+ pat = (pat or "RGGB").strip().upper()
497
+ if pat == "RGGB":
498
+ r = np.median(flat2d[0::2, 0::2])
499
+ g1 = np.median(flat2d[0::2, 1::2])
500
+ g2 = np.median(flat2d[1::2, 0::2])
501
+ b = np.median(flat2d[1::2, 1::2])
502
+ elif pat == "BGGR":
503
+ b = np.median(flat2d[0::2, 0::2])
504
+ g1 = np.median(flat2d[0::2, 1::2])
505
+ g2 = np.median(flat2d[1::2, 0::2])
506
+ r = np.median(flat2d[1::2, 1::2])
507
+ elif pat == "GRBG":
508
+ g1 = np.median(flat2d[0::2, 0::2])
509
+ r = np.median(flat2d[0::2, 1::2])
510
+ b = np.median(flat2d[1::2, 0::2])
511
+ g2 = np.median(flat2d[1::2, 1::2])
512
+ else: # GBRG
513
+ g1 = np.median(flat2d[0::2, 0::2])
514
+ b = np.median(flat2d[0::2, 1::2])
515
+ r = np.median(flat2d[1::2, 0::2])
516
+ g2 = np.median(flat2d[1::2, 1::2])
517
+
518
+ med4 = np.array([r, g1, g2, b], dtype=np.float32)
519
+ med4[~np.isfinite(med4)] = 1.0
520
+ med4[med4 <= 0] = 1.0
521
+ return med4
522
+
523
+ @njit(parallel=True, fastmath=True)
524
+ def apply_flat_division_numba_bayer_2d(image, master_flat, med4, pat_id):
525
+ """
526
+ Bayer-aware mono division. image/master_flat are (H,W).
527
+ med4 is [R,G1,G2,B] for that master_flat, pat_id in {0..3}.
528
+ """
529
+ # parity index = (row&1)*2 + (col&1)
530
+ # med4 index order: 0=R, 1=G1, 2=G2, 3=B
531
+
532
+ # tables map parity_index -> med4 index
533
+ # parity_index: 0:(0,0) 1:(0,1) 2:(1,0) 3:(1,1)
534
+ if pat_id == 0: # RGGB: (0,0)R (0,1)G1 (1,0)G2 (1,1)B
535
+ t0, t1, t2, t3 = 0, 1, 2, 3
536
+ elif pat_id == 1: # BGGR: (0,0)B (0,1)G1 (1,0)G2 (1,1)R
537
+ t0, t1, t2, t3 = 3, 1, 2, 0
538
+ elif pat_id == 2: # GRBG: (0,0)G1 (0,1)R (1,0)B (1,1)G2
539
+ t0, t1, t2, t3 = 1, 0, 3, 2
540
+ else: # GBRG: (0,0)G1 (0,1)B (1,0)R (1,1)G2
541
+ t0, t1, t2, t3 = 1, 3, 0, 2
542
+
543
+ H, W = image.shape
544
+ for y in prange(H):
545
+ y1 = y & 1
546
+ for x in range(W):
547
+ x1 = x & 1
548
+ p = (y1 << 1) | x1 # 0..3
549
+ if p == 0:
550
+ pi = t0
551
+ elif p == 1:
552
+ pi = t1
553
+ elif p == 2:
554
+ pi = t2
555
+ else:
556
+ pi = t3
557
+
558
+ denom = master_flat[y, x] / med4[pi]
559
+ if denom == 0.0 or (not np.isfinite(denom)):
560
+ denom = 1.0
561
+ image[y, x] /= denom
562
+ return image
563
+
564
+ def apply_flat_division_bayer(image2d: np.ndarray, flat2d: np.ndarray, bayerpat: str):
565
+ med4 = _bayer_plane_medians(flat2d, bayerpat)
566
+ pid = _bayerpat_to_id(bayerpat)
567
+ return apply_flat_division_numba_bayer_2d(image2d, flat2d, med4, pid)
568
+
569
+ @njit(parallel=True)
321
570
  def subtract_dark_3d(frames, dark_frame):
322
571
  """
323
572
  For mono stack:
@@ -1395,34 +1644,31 @@ def subtract_dark_with_pedestal(frames, dark_frame, pedestal):
1395
1644
 
1396
1645
 
1397
1646
  @njit(parallel=True, fastmath=True, cache=True)
1398
- def parallel_measure_frames(images):
1399
- """
1400
- Parallel processing for measuring simple stats (mean only).
1401
- 'images' is a list (or array) of N images, each of which can be:
1402
- - 2D (H,W) for a single mono image
1403
- - 3D (H,W,C) for a single color image
1404
- - Possibly 3D or 4D if you're storing multi-frame stacks in 'images'
1405
- We just compute np.mean(...) of each image, no matter how many dims.
1406
- """
1407
- n = len(images)
1408
- means = np.zeros(n, dtype=np.float32)
1409
-
1647
+ def _parallel_measure_frames_stack(stack): # stack: float32[N,H,W] or float32[N,H,W,C]
1648
+ n = stack.shape[0]
1649
+ means = np.empty(n, np.float32)
1410
1650
  for i in prange(n):
1411
- arr = images[i]
1412
- # arr could have shape (H,W) or (H,W,C) or (F,H,W) etc.
1413
- # np.mean works for any dimension, so no special logic needed.
1414
- means[i] = np.float32(np.mean(arr))
1651
+ # Option A: mean then cast
1652
+ # m = np.mean(stack[i])
1653
+ # means[i] = np.float32(m)
1415
1654
 
1655
+ # Option B (often a hair faster): sum / size then cast
1656
+ s = np.sum(stack[i]) # no kwargs
1657
+ means[i] = np.float32(s / stack[i].size)
1416
1658
  return means
1417
1659
 
1418
-
1660
+ def parallel_measure_frames(images_py):
1661
+ a = [np.ascontiguousarray(x, dtype=np.float32) for x in images_py]
1662
+ a = [x[:, :, None] if x.ndim == 2 else x for x in a]
1663
+ stack = np.ascontiguousarray(np.stack(a, axis=0)) # (N,H,W,C)
1664
+ return _parallel_measure_frames_stack(stack)
1419
1665
  @njit(fastmath=True, cache=True)
1420
1666
  def fast_mad(image):
1421
1667
  """ Computes the Median Absolute Deviation (MAD) as a robust noise estimator. """
1422
- flat_image = image.ravel() # ✅ Flatten the 2D array into 1D
1668
+ flat_image = image.ravel() # Flatten the 2D array into 1D
1423
1669
  median_val = np.median(flat_image) # Compute median
1424
1670
  mad = np.median(np.abs(flat_image - median_val)) # Compute MAD
1425
- return mad * 1.4826 # ✅ Scale MAD to match standard deviation (for Gaussian noise)
1671
+ return mad * 1.4826 # Scale MAD to match standard deviation (for Gaussian noise)
1426
1672
 
1427
1673
 
1428
1674
 
@@ -1441,95 +1687,235 @@ def compute_noise(image):
1441
1687
  """ Estimates noise using Median Absolute Deviation (MAD). """
1442
1688
  return fast_mad(image)
1443
1689
 
1690
+ def _downsample_for_stars(img: np.ndarray, factor: int = 4) -> np.ndarray:
1691
+ """
1692
+ Very cheap spatial downsample for star counting.
1693
+ Works on mono or RGB. Returns float32 2D.
1694
+ """
1695
+ if img.ndim == 3 and img.shape[-1] == 3:
1696
+ # luma first
1697
+ r, g, b = img[..., 0], img[..., 1], img[..., 2]
1698
+ img = 0.2126*r + 0.7152*g + 0.0722*b
1699
+ img = np.asarray(img, dtype=np.float32, order="C")
1700
+ if factor <= 1:
1701
+ return img
1702
+ # stride (fast & cache friendly), not interpolation
1703
+ return img[::factor, ::factor]
1444
1704
 
1445
1705
 
1706
+ def fast_star_count_lite(img: np.ndarray,
1707
+ sample_stride: int = 8,
1708
+ localmax_k: int = 3,
1709
+ thr_sigma: float = 4.0,
1710
+ max_ecc_samples: int = 200) -> tuple[int, float]:
1711
+ """
1712
+ Super-fast star counter:
1713
+ • sample a tiny subset to estimate background mean/std
1714
+ • local-maxima on small image
1715
+ • optional rough eccentricity on a small random subset
1716
+ Returns (count, avg_ecc).
1717
+ """
1718
+ # img is 2D float32, already downsampled
1719
+ H, W = img.shape
1720
+ # 1) quick background stats on a sparse grid
1721
+ samp = img[::sample_stride, ::sample_stride]
1722
+ mu = float(np.mean(samp))
1723
+ sigma = float(np.std(samp))
1724
+ thr = mu + thr_sigma * max(sigma, 1e-6)
1725
+
1726
+ # 2) find local maxima above threshold
1727
+ # small structuring element; k must be odd
1728
+ k = localmax_k if (localmax_k % 2 == 1) else (localmax_k + 1)
1729
+ se = np.ones((k, k), np.uint8)
1730
+ # dilate the image (on float -> do it via cv2.dilate after scaling)
1731
+ # scale to 16-bit to keep numeric fidelity (cheap)
1732
+ scaled = (img * (65535.0 / max(np.max(img), 1e-6))).astype(np.uint16)
1733
+ dil = cv2.dilate(scaled, se)
1734
+ # peaks are pixels that equal the local max and exceed thr
1735
+ peaks = (scaled == dil) & (img > thr)
1736
+ count = int(np.count_nonzero(peaks))
1737
+
1738
+ # 3) (optional) rough eccentricity on a tiny subset
1739
+ if count == 0:
1740
+ return 0, 0.0
1741
+ if max_ecc_samples <= 0:
1742
+ return count, 0.0
1743
+
1744
+ ys, xs = np.where(peaks)
1745
+ if xs.size > max_ecc_samples:
1746
+ idx = np.random.choice(xs.size, max_ecc_samples, replace=False)
1747
+ xs, ys = xs[idx], ys[idx]
1748
+
1749
+ ecc_vals = []
1750
+ # small window around each peak
1751
+ r = 2 # 5×5 window
1752
+ for x, y in zip(xs, ys):
1753
+ x0, x1 = max(0, x - r), min(W, x + r + 1)
1754
+ y0, y1 = max(0, y - r), min(H, y + r + 1)
1755
+ patch = img[y0:y1, x0:x1]
1756
+ if patch.size < 9:
1757
+ continue
1758
+ # second moments for ellipse approximation
1759
+ yy, xx = np.mgrid[y0:y1, x0:x1]
1760
+ yy = yy.astype(np.float32) - y
1761
+ xx = xx.astype(np.float32) - x
1762
+ w = patch - patch.min()
1763
+ s = float(w.sum())
1764
+ if s <= 0:
1765
+ continue
1766
+ mxx = float((w * (xx*xx)).sum() / s)
1767
+ myy = float((w * (yy*yy)).sum() / s)
1768
+ # approximate major/minor from variances
1769
+ a = math.sqrt(max(mxx, myy))
1770
+ b = math.sqrt(min(mxx, myy))
1771
+ if a > 1e-6:
1772
+ e = math.sqrt(max(0.0, 1.0 - (b*b)/(a*a)))
1773
+ ecc_vals.append(e)
1774
+ avg_ecc = float(np.mean(ecc_vals)) if ecc_vals else 0.0
1775
+ return count, avg_ecc
1776
+
1777
+
1778
+
1779
+ def compute_star_count_fast_preview(preview_2d: np.ndarray) -> tuple[int, float]:
1780
+ """
1781
+ Wrapper used in measurement: downsample aggressively and run the lite counter.
1782
+ """
1783
+ tiny = _downsample_for_stars(preview_2d, factor=4) # try 4–8 depending on your sensor
1784
+ return fast_star_count_lite(tiny, sample_stride=8, localmax_k=3, thr_sigma=4.0, max_ecc_samples=120)
1446
1785
 
1447
1786
  def compute_star_count(image):
1448
- """ Uses fast star detection instead of DAOStarFinder. """
1449
1787
  return fast_star_count(image)
1450
1788
 
1451
-
1452
1789
  def fast_star_count(
1453
- image,
1454
- blur_size=15, # Smaller blur preserves faint/small stars
1455
- threshold_factor=0.8,
1456
- min_area=2,
1457
- max_area=5000
1790
+ image,
1791
+ blur_size=None,
1792
+ threshold_factor=0.8,
1793
+ min_area=None,
1794
+ max_area=None,
1795
+ *,
1796
+ # robust stretch controls (file1)
1797
+ stretch=True,
1798
+ gamma=0.45,
1799
+ p_lo=0.1,
1800
+ p_hi=99.8,
1801
+ # morphology behavior
1802
+ morph_open="auto", # "auto" | True | False
1458
1803
  ):
1459
1804
  """
1460
- Estimate star count + average eccentricity by:
1461
- 1) Convert to 8-bit grayscale
1462
- 2) Blur => subtract => enhance stars
1463
- 3) Otsu's threshold * threshold_factor => final threshold
1464
- 4) Contour detection + ellipse fit => eccentricity
1465
1805
  Returns (star_count, avg_ecc).
1806
+
1807
+ stretch=True (default): robust for linear astro images (percentile stretch + gamma)
1808
+ stretch=False: simple min/max normalize (legacy behavior)
1466
1809
  """
1467
1810
 
1468
- # 1) Convert to grayscale if needed
1811
+ # 1) grayscale float32
1469
1812
  if image.ndim == 3:
1470
- image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
1813
+ r, g, b = image[..., 0], image[..., 1], image[..., 2]
1814
+ img = (0.2126 * r + 0.7152 * g + 0.0722 * b).astype(np.float32, copy=False)
1815
+ else:
1816
+ img = np.asarray(image, dtype=np.float32, order="C")
1471
1817
 
1472
- # 2) Normalize to 8-bit
1473
- img_min, img_max = image.min(), image.max()
1474
- if img_max > img_min:
1475
- image_8u = (255.0 * (image - img_min) / (img_max - img_min)).astype(np.uint8)
1818
+ H, W = img.shape[:2]
1819
+ short_side = max(1, min(H, W))
1820
+
1821
+ # 2) adaptive params (file1 style)
1822
+ if blur_size is None:
1823
+ k = max(3, int(round(short_side / 80)))
1824
+ blur_size = k if (k % 2 == 1) else (k + 1)
1825
+
1826
+ if min_area is None:
1827
+ min_area = 1
1828
+
1829
+ if max_area is None:
1830
+ max_area = max(100, int(0.01 * H * W))
1831
+
1832
+ # 3) build 8-bit working image
1833
+ if stretch:
1834
+ lo = float(np.percentile(img, p_lo))
1835
+ hi = float(np.percentile(img, p_hi))
1836
+ if not (hi > lo):
1837
+ lo, hi = float(img.min()), float(img.max())
1838
+ if not (hi > lo):
1839
+ return 0, 0.0
1840
+
1841
+ norm = (img - lo) / max(1e-8, (hi - lo))
1842
+ norm = np.clip(norm, 0.0, 1.0)
1843
+
1844
+ if gamma and gamma > 0:
1845
+ norm = np.power(norm, gamma, dtype=np.float32)
1846
+
1847
+ image_8u = (norm * 255.0).astype(np.uint8)
1476
1848
  else:
1477
- return 0, 0.0 # All pixels identical => no stars
1849
+ img_min = float(img.min())
1850
+ img_max = float(img.max())
1851
+ if img_max > img_min:
1852
+ image_8u = (255.0 * (img - img_min) / (img_max - img_min)).astype(np.uint8)
1853
+ else:
1854
+ return 0, 0.0
1478
1855
 
1479
- # 3) Blur + subtract => enhance
1856
+ # 4) blur + subtract
1480
1857
  blurred = cv2.GaussianBlur(image_8u, (blur_size, blur_size), 0)
1481
- subtracted = cv2.absdiff(image_8u, blurred)
1858
+ sub = cv2.absdiff(image_8u, blurred)
1482
1859
 
1483
- # 4) Otsu's threshold on 'subtracted'
1484
- otsu_thresh_val, _ = cv2.threshold(subtracted, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
1485
- # Scale it down if we want to detect more/fainter stars
1486
- final_thresh_val = int(otsu_thresh_val * threshold_factor)
1487
- if final_thresh_val < 2:
1488
- final_thresh_val = 2 # avoid going below 2
1860
+ # 5) Otsu + threshold_factor
1861
+ otsu, _ = cv2.threshold(sub, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
1862
+ thr = max(2, int(otsu * threshold_factor))
1863
+ _, mask = cv2.threshold(sub, thr, 255, cv2.THRESH_BINARY)
1489
1864
 
1490
- # 5) Apply threshold
1491
- _, thresh = cv2.threshold(subtracted, final_thresh_val, 255, cv2.THRESH_BINARY)
1865
+ # 6) morphology
1866
+ do_morph = False
1867
+ if morph_open == "auto":
1868
+ do_morph = short_side >= 600
1869
+ elif morph_open is True:
1870
+ do_morph = True
1492
1871
 
1493
- # 6) (Optional) Morphological opening to remove single-pixel noise
1494
- # Adjust kernel size if you get too many/few stars
1495
- kernel = np.ones((2, 2), np.uint8)
1496
- thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
1872
+ if do_morph:
1873
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((2, 2), np.uint8))
1497
1874
 
1498
- # 7) Find contours
1499
- contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1875
+ # 7) contours -> ellipse ecc
1876
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1500
1877
 
1501
- # 8) Filter contours by area, fit ellipse => compute eccentricity
1502
1878
  star_count = 0
1503
1879
  ecc_values = []
1504
1880
  for c in contours:
1505
1881
  area = cv2.contourArea(c)
1506
1882
  if area < min_area or area > max_area:
1507
1883
  continue
1508
-
1509
1884
  if len(c) < 5:
1510
- continue # Need >=5 points to fit an ellipse
1511
-
1512
- # Fit ellipse
1513
- ellipse = cv2.fitEllipse(c)
1514
- (cx, cy), (major_axis, minor_axis), angle = ellipse
1515
-
1516
- # major_axis >= minor_axis
1517
- if minor_axis > major_axis:
1518
- major_axis, minor_axis = minor_axis, major_axis
1519
-
1520
- if major_axis > 0:
1521
- ecc = math.sqrt(1.0 - (minor_axis**2 / major_axis**2))
1522
- else:
1523
- ecc = 0.0
1524
-
1525
- ecc_values.append(ecc)
1885
+ continue
1886
+ (_, _), (a, b), _ = cv2.fitEllipse(c)
1887
+ if b > a:
1888
+ a, b = b, a
1889
+ e = math.sqrt(max(0.0, 1.0 - (b * b) / (a * a))) if a > 0 else 0.0
1890
+ ecc_values.append(e)
1526
1891
  star_count += 1
1527
1892
 
1528
- if star_count > 0:
1529
- avg_ecc = float(np.mean(ecc_values))
1530
- else:
1531
- avg_ecc = 0.0
1532
-
1893
+ # 8) fallback if too few
1894
+ if star_count < 5:
1895
+ k2 = max(3, (blur_size // 2) | 1)
1896
+ blurred2 = cv2.GaussianBlur(image_8u, (k2, k2), 0)
1897
+ sub2 = cv2.absdiff(image_8u, blurred2)
1898
+ otsu2, _ = cv2.threshold(sub2, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
1899
+ thr2 = max(2, int(otsu2 * 0.6))
1900
+ _, mask2 = cv2.threshold(sub2, thr2, 255, cv2.THRESH_BINARY)
1901
+ contours2, _ = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1902
+
1903
+ star_count = 0
1904
+ ecc_values = []
1905
+ for c in contours2:
1906
+ area = cv2.contourArea(c)
1907
+ if area < 1 or area > max_area:
1908
+ continue
1909
+ if len(c) < 5:
1910
+ continue
1911
+ (_, _), (a, b), _ = cv2.fitEllipse(c)
1912
+ if b > a:
1913
+ a, b = b, a
1914
+ e = math.sqrt(max(0.0, 1.0 - (b * b) / (a * a))) if a > 0 else 0.0
1915
+ ecc_values.append(e)
1916
+ star_count += 1
1917
+
1918
+ avg_ecc = float(np.mean(ecc_values)) if star_count > 0 else 0.0
1533
1919
  return star_count, avg_ecc
1534
1920
 
1535
1921
  @njit(parallel=True, fastmath=True, cache=True)
@@ -1600,6 +1986,33 @@ def normalize_images(stack, ref_median):
1600
1986
 
1601
1987
 
1602
1988
  @njit(parallel=True, fastmath=True, cache=True)
1989
+ def _bilinear_interpolate_numba(out):
1990
+ H, W, C = out.shape
1991
+ for c in range(C):
1992
+ for y in prange(H):
1993
+ for x in range(W):
1994
+ if out[y, x, c] == 0:
1995
+ sumv = 0.0
1996
+ cnt = 0
1997
+ # 3x3 neighborhood average of non-zero samples (simple & fast)
1998
+ for dy in (-1, 0, 1):
1999
+ yy = y + dy
2000
+ if yy < 0 or yy >= H:
2001
+ continue
2002
+ for dx in (-1, 0, 1):
2003
+ xx = x + dx
2004
+ if xx < 0 or xx >= W:
2005
+ continue
2006
+ v = out[yy, xx, c]
2007
+ if v != 0:
2008
+ sumv += v
2009
+ cnt += 1
2010
+ if cnt > 0:
2011
+ out[y, x, c] = sumv / cnt
2012
+ return out
2013
+
2014
+
2015
+ @njit(parallel=True, fastmath=True)
1603
2016
  def _edge_aware_interpolate_numba(out):
1604
2017
  """
1605
2018
  For each pixel in out (shape: (H,W,3)) where out[y,x,c] == 0,
@@ -1696,137 +2109,99 @@ def _edge_aware_interpolate_numba(out):
1696
2109
  # and that we want a full (H,W,3) output.
1697
2110
 
1698
2111
  @njit(parallel=True, fastmath=True, cache=True)
1699
- def debayer_RGGB_fullres_fast(image):
1700
- """
1701
- For an RGGB pattern:
1702
- - Even rows: even cols = Red, odd cols = Green.
1703
- - Odd rows: even cols = Green, odd cols = Blue.
1704
- """
2112
+ def debayer_RGGB_fullres_fast(image, interpolate=True):
1705
2113
  H, W = image.shape
1706
2114
  out = np.zeros((H, W, 3), dtype=image.dtype)
1707
2115
  for y in prange(H):
1708
2116
  for x in range(W):
1709
2117
  if (y & 1) == 0:
1710
- if (x & 1) == 0:
1711
- # Even row, even col: Red
1712
- out[y, x, 0] = image[y, x]
1713
- else:
1714
- # Even row, odd col: Green
1715
- out[y, x, 1] = image[y, x]
2118
+ if (x & 1) == 0: out[y, x, 0] = image[y, x] # R
2119
+ else: out[y, x, 1] = image[y, x] # G
1716
2120
  else:
1717
- if (x & 1) == 0:
1718
- # Odd row, even col: Green
1719
- out[y, x, 1] = image[y, x]
1720
- else:
1721
- # Odd row, odd col: Blue
1722
- out[y, x, 2] = image[y, x]
1723
- _edge_aware_interpolate_numba(out)
2121
+ if (x & 1) == 0: out[y, x, 1] = image[y, x] # G
2122
+ else: out[y, x, 2] = image[y, x] # B
2123
+ if interpolate:
2124
+ _edge_aware_interpolate_numba(out)
1724
2125
  return out
1725
2126
 
1726
2127
  @njit(parallel=True, fastmath=True, cache=True)
1727
- def debayer_BGGR_fullres_fast(image):
1728
- """
1729
- For a BGGR pattern:
1730
- - Even rows: even cols = Blue, odd cols = Green.
1731
- - Odd rows: even cols = Green, odd cols = Red.
1732
- """
2128
+ def debayer_BGGR_fullres_fast(image, interpolate=True):
1733
2129
  H, W = image.shape
1734
2130
  out = np.zeros((H, W, 3), dtype=image.dtype)
1735
2131
  for y in prange(H):
1736
2132
  for x in range(W):
1737
2133
  if (y & 1) == 0:
1738
- if (x & 1) == 0:
1739
- # Even row, even col: Blue
1740
- out[y, x, 2] = image[y, x]
1741
- else:
1742
- # Even row, odd col: Green
1743
- out[y, x, 1] = image[y, x]
2134
+ if (x & 1) == 0: out[y, x, 2] = image[y, x] # B
2135
+ else: out[y, x, 1] = image[y, x] # G
1744
2136
  else:
1745
- if (x & 1) == 0:
1746
- # Odd row, even col: Green
1747
- out[y, x, 1] = image[y, x]
1748
- else:
1749
- # Odd row, odd col: Red
1750
- out[y, x, 0] = image[y, x]
1751
- _edge_aware_interpolate_numba(out)
2137
+ if (x & 1) == 0: out[y, x, 1] = image[y, x] # G
2138
+ else: out[y, x, 0] = image[y, x] # R
2139
+ if interpolate:
2140
+ _edge_aware_interpolate_numba(out)
1752
2141
  return out
1753
2142
 
1754
2143
  @njit(parallel=True, fastmath=True, cache=True)
1755
- def debayer_GRBG_fullres_fast(image):
1756
- """
1757
- For a GRBG pattern:
1758
- - Even rows: even cols = Green, odd cols = Red.
1759
- - Odd rows: even cols = Blue, odd cols = Green.
1760
- """
2144
+ def debayer_GRBG_fullres_fast(image, interpolate=True):
1761
2145
  H, W = image.shape
1762
2146
  out = np.zeros((H, W, 3), dtype=image.dtype)
1763
2147
  for y in prange(H):
1764
2148
  for x in range(W):
1765
2149
  if (y & 1) == 0:
1766
- if (x & 1) == 0:
1767
- # Even row, even col: Green
1768
- out[y, x, 1] = image[y, x]
1769
- else:
1770
- # Even row, odd col: Red
1771
- out[y, x, 0] = image[y, x]
2150
+ if (x & 1) == 0: out[y, x, 1] = image[y, x] # G
2151
+ else: out[y, x, 0] = image[y, x] # R
1772
2152
  else:
1773
- if (x & 1) == 0:
1774
- # Odd row, even col: Blue
1775
- out[y, x, 2] = image[y, x]
1776
- else:
1777
- # Odd row, odd col: Green
1778
- out[y, x, 1] = image[y, x]
1779
- _edge_aware_interpolate_numba(out)
2153
+ if (x & 1) == 0: out[y, x, 2] = image[y, x] # B
2154
+ else: out[y, x, 1] = image[y, x] # G
2155
+ if interpolate:
2156
+ _edge_aware_interpolate_numba(out)
1780
2157
  return out
1781
2158
 
1782
2159
  @njit(parallel=True, fastmath=True, cache=True)
1783
- def debayer_GBRG_fullres_fast(image):
1784
- """
1785
- For a GBRG pattern:
1786
- - Even rows: even cols = Green, odd cols = Blue.
1787
- - Odd rows: even cols = Red, odd cols = Green.
1788
- """
2160
+ def debayer_GBRG_fullres_fast(image, interpolate=True):
1789
2161
  H, W = image.shape
1790
2162
  out = np.zeros((H, W, 3), dtype=image.dtype)
1791
2163
  for y in prange(H):
1792
2164
  for x in range(W):
1793
2165
  if (y & 1) == 0:
1794
- if (x & 1) == 0:
1795
- # Even row, even col: Green
1796
- out[y, x, 1] = image[y, x]
1797
- else:
1798
- # Even row, odd col: Blue
1799
- out[y, x, 2] = image[y, x]
2166
+ if (x & 1) == 0: out[y, x, 1] = image[y, x] # G
2167
+ else: out[y, x, 2] = image[y, x] # B
1800
2168
  else:
1801
- if (x & 1) == 0:
1802
- # Odd row, even col: Red
1803
- out[y, x, 0] = image[y, x]
1804
- else:
1805
- # Odd row, odd col: Green
1806
- out[y, x, 1] = image[y, x]
1807
- _edge_aware_interpolate_numba(out)
2169
+ if (x & 1) == 0: out[y, x, 0] = image[y, x] # R
2170
+ else: out[y, x, 1] = image[y, x] # G
2171
+ if interpolate:
2172
+ _edge_aware_interpolate_numba(out)
1808
2173
  return out
1809
2174
 
1810
- # === Python-Level Dispatch Function ===
1811
- # Since Numba cannot easily compare strings in nopython mode,
1812
- # we do the if/elif check here in Python and then call the appropriate njit function.
1813
-
1814
- def debayer_fits_fast(image_data, bayer_pattern):
1815
- bp = bayer_pattern.upper()
1816
- if bp == 'RGGB':
1817
- return debayer_RGGB_fullres_fast(image_data)
1818
- elif bp == 'BGGR':
1819
- return debayer_BGGR_fullres_fast(image_data)
1820
- elif bp == 'GRBG':
1821
- return debayer_GRBG_fullres_fast(image_data)
1822
- elif bp == 'GBRG':
1823
- return debayer_GBRG_fullres_fast(image_data)
2175
+
2176
+ def debayer_fits_fast(image_data, bayer_pattern, cfa_drizzle=False, method="edge"):
2177
+ bp = (bayer_pattern or "").upper()
2178
+ interpolate = not cfa_drizzle
2179
+
2180
+ # 1) lay down samples; skip interpolate here so we can select method later
2181
+ if bp == "RGGB":
2182
+ out = debayer_RGGB_fullres_fast(image_data, interpolate=False)
2183
+ elif bp == "BGGR":
2184
+ out = debayer_BGGR_fullres_fast(image_data, interpolate=False)
2185
+ elif bp == "GRBG":
2186
+ out = debayer_GRBG_fullres_fast(image_data, interpolate=False)
2187
+ elif bp == "GBRG":
2188
+ out = debayer_GBRG_fullres_fast(image_data, interpolate=False)
1824
2189
  else:
1825
2190
  raise ValueError(f"Unsupported Bayer pattern: {bayer_pattern}")
1826
2191
 
1827
- def debayer_raw_fast(raw_image_data, bayer_pattern="RGGB"):
1828
- # For RAW images, use the same full-resolution demosaicing logic.
1829
- return debayer_fits_fast(raw_image_data, bayer_pattern)
2192
+ # 2) interpolate unless CFA drizzle
2193
+ if interpolate:
2194
+ m = (method or "edge").lower()
2195
+ if m == "bilinear":
2196
+ _bilinear_interpolate_numba(out)
2197
+ else:
2198
+ _edge_aware_interpolate_numba(out)
2199
+
2200
+ return out
2201
+
2202
+
2203
+ def debayer_raw_fast(raw_image_data, bayer_pattern="RGGB", cfa_drizzle=False, method="edge"):
2204
+ return debayer_fits_fast(raw_image_data, bayer_pattern, cfa_drizzle=cfa_drizzle, method=method)
1830
2205
 
1831
2206
 
1832
2207
  @njit(parallel=True, fastmath=True, cache=True)
@@ -1896,7 +2271,7 @@ def adjust_saturation_numba(image_array, saturation_factor):
1896
2271
 
1897
2272
  r, g, b = r + m, g + m, b + m # Add m to shift brightness
1898
2273
 
1899
- # ✅ Fix: Explicitly cast indices to integers
2274
+ # Fix: Explicitly cast indices to integers
1900
2275
  output[int(y), int(x), 0] = r
1901
2276
  output[int(y), int(x), 1] = g
1902
2277
  output[int(y), int(x), 2] = b
@@ -1916,7 +2291,7 @@ def applySCNR_numba(image_array):
1916
2291
  r, g, b = image_array[y, x]
1917
2292
  g = min(g, (r + b) / 2) # Reduce green to the average of red & blue
1918
2293
 
1919
- # ✅ Fix: Assign channels individually instead of a tuple
2294
+ # Fix: Assign channels individually instead of a tuple
1920
2295
  output[int(y), int(x), 0] = r
1921
2296
  output[int(y), int(x), 1] = g
1922
2297
  output[int(y), int(x), 2] = b
@@ -2214,123 +2589,193 @@ def hsv_to_rgb_numba(hsv):
2214
2589
  return out
2215
2590
 
2216
2591
  @njit(parallel=True, fastmath=True, cache=True)
2217
- def _cosmetic_correction_numba_fixed(corrected, H, W, C, hot_sigma, cold_sigma):
2218
- """
2219
- Optimized Numba-compiled local outlier correction.
2220
- - Computes median and standard deviation from 8 surrounding pixels (excluding center).
2221
- - If the center pixel is greater than (median + hot_sigma * std_dev), it is replaced with the median.
2222
- - If the center pixel is less than (median - cold_sigma * std_dev), it is replaced with the median.
2223
- - Edge pixels are skipped (avoiding padding artifacts).
2224
- """
2225
- local_vals = np.empty(9, dtype=np.float32) # Holds 8 surrounding pixels
2226
-
2227
- # Process pixels in parallel, skipping edges
2228
- for y in prange(1, H - 1): # Skip first and last rows
2229
- for x in range(1, W - 1): # Skip first and last columns
2230
- # If the image is grayscale, set C=1 and handle accordingly
2231
- for c_i in prange(C if corrected.ndim == 3 else 1):
2592
+ def _cosmetic_correction_core(src, dst, H, W, C,
2593
+ hot_sigma, cold_sigma,
2594
+ star_mean_ratio, # e.g. 0.18..0.30
2595
+ star_max_ratio, # e.g. 0.45..0.65
2596
+ sat_threshold, # absolute cutoff in src units
2597
+ cold_cluster_max # max # of neighbors below low before we skip
2598
+ ):
2599
+ """
2600
+ Read from src, write to dst. Center is EXCLUDED from stats.
2601
+ Star guard: if ring mean or ring max are a decent fraction of center, skip (likely a PSF).
2602
+ Cold guard: if many neighbors are also low, skip (structure/shadow, not a dead pixel).
2603
+ """
2604
+ local_vals = np.empty(8, dtype=np.float32)
2605
+
2606
+ for y in prange(1, H-1):
2607
+ for x in range(1, W-1):
2608
+ for c in range(C if src.ndim == 3 else 1):
2609
+ # gather 8-neighbor ring (no center)
2232
2610
  k = 0
2233
- for dy in range(-1, 2): # -1, 0, +1
2234
- for dx in range(-1, 2): # -1, 0, +1
2235
- if corrected.ndim == 3: # Color image
2236
- local_vals[k] = corrected[y + dy, x + dx, c_i]
2237
- else: # Grayscale image
2238
- local_vals[k] = corrected[y + dy, x + dx]
2611
+ ring_sum = 0.0
2612
+ ring_max = -1e30
2613
+ for dy in (-1, 0, 1):
2614
+ for dx in (-1, 0, 1):
2615
+ if dy == 0 and dx == 0:
2616
+ continue
2617
+ if src.ndim == 3:
2618
+ v = src[y+dy, x+dx, c]
2619
+ else:
2620
+ v = src[y+dy, x+dx]
2621
+ local_vals[k] = v
2622
+ ring_sum += v
2623
+ if v > ring_max:
2624
+ ring_max = v
2239
2625
  k += 1
2240
2626
 
2241
- # Compute median
2627
+ # median and MAD from ring only
2242
2628
  M = np.median(local_vals)
2243
-
2244
- # Compute MAD manually
2245
- abs_devs = np.abs(local_vals - M)
2629
+ abs_devs = np.empty(8, dtype=np.float32)
2630
+ for i in range(8):
2631
+ abs_devs[i] = abs(local_vals[i] - M)
2246
2632
  MAD = np.median(abs_devs)
2633
+ sigma = 1.4826 * MAD + 1e-8 # epsilon guard
2247
2634
 
2248
- # Convert MAD to an approximation of standard deviation
2249
- sigma_mad = 1.4826 * MAD
2635
+ # center
2636
+ T = src[y, x, c] if src.ndim == 3 else src[y, x]
2250
2637
 
2251
- # Get center pixel
2252
- if corrected.ndim == 3:
2253
- T = corrected[y, x, c_i]
2254
- else:
2255
- T = corrected[y, x]
2638
+ # saturation guard
2639
+ if T >= sat_threshold:
2640
+ if src.ndim == 3: dst[y, x, c] = T
2641
+ else: dst[y, x] = T
2642
+ continue
2256
2643
 
2257
- threshold_high = M + (hot_sigma * sigma_mad)
2258
- threshold_low = M - (cold_sigma * sigma_mad)
2644
+ high = M + hot_sigma * sigma
2645
+ low = M - cold_sigma * sigma
2646
+
2647
+ replace = False
2648
+
2649
+ if T > high:
2650
+ # Star guard for HOT: neighbors should not form a footprint
2651
+ ring_mean = ring_sum / 8.0
2652
+ if (ring_mean / (T + 1e-8) < star_mean_ratio) and (ring_max / (T + 1e-8) < star_max_ratio):
2653
+ replace = True
2654
+ elif T < low:
2655
+ # Cold pixel: only if it's isolated (few neighbors also low)
2656
+ count_below = 0
2657
+ for i in range(8):
2658
+ if local_vals[i] < low:
2659
+ count_below += 1
2660
+ if count_below <= cold_cluster_max:
2661
+ replace = True
2662
+
2663
+ if replace:
2664
+ if src.ndim == 3: dst[y, x, c] = M
2665
+ else: dst[y, x] = M
2666
+ else:
2667
+ if src.ndim == 3: dst[y, x, c] = T
2668
+ else: dst[y, x] = T
2259
2669
 
2260
- # **Apply correction ONLY if center pixel is an outlier**
2261
- if T > threshold_high or T < threshold_low:
2262
- if corrected.ndim == 3:
2263
- corrected[y, x, c_i] = M # Replace center pixel in color image
2264
- else:
2265
- corrected[y, x] = M # Replace center pixel in grayscale image
2266
2670
 
2267
- def bulk_cosmetic_correction_bayer(image, hot_sigma=5.0, cold_sigma=5.0):
2671
+ def bulk_cosmetic_correction_numba(image,
2672
+ hot_sigma=5.0,
2673
+ cold_sigma=5.0,
2674
+ star_mean_ratio=0.22,
2675
+ star_max_ratio=0.55,
2676
+ sat_quantile=0.9995):
2268
2677
  """
2269
- Perform cosmetic correction on a single-channel Bayer mosaic.
2270
- Assumes a default Bayer pattern "RGGB":
2271
- - Red: even rows, even columns
2272
- - Green1: even rows, odd columns
2273
- - Green2: odd rows, even columns
2274
- - Blue: odd rows, odd columns
2275
- Applies cosmetic correction separately on each channel and reassembles them.
2678
+ Star-safe cosmetic correction for 2D (mono) or 3D (RGB) arrays.
2679
+ Reads from the original, writes to a new array (two-pass).
2680
+ - star_mean_ratio: how large neighbor mean must be vs center to *skip* (PSF)
2681
+ - star_max_ratio : how large neighbor max must be vs center to *skip* (PSF)
2682
+ - sat_quantile : top quantile to protect from edits (bright cores)
2276
2683
  """
2277
- H, W = image.shape
2278
- # Create a copy to hold the corrected image.
2279
- corrected = image.astype(np.float32).copy()
2280
-
2281
- # For each channel, extract the subarray and apply the standard correction.
2282
- # We use your existing bulk_cosmetic_correction_numba function, which accepts a 2D array.
2283
- # Red channel (even rows, even cols)
2284
- red = corrected[0:H:2, 0:W:2]
2285
- red_corrected = bulk_cosmetic_correction_numba(red, hot_sigma, cold_sigma)
2286
- corrected[0:H:2, 0:W:2] = red_corrected
2287
-
2288
- # Blue channel (odd rows, odd cols)
2289
- blue = corrected[1:H:2, 1:W:2]
2290
- blue_corrected = bulk_cosmetic_correction_numba(blue, hot_sigma, cold_sigma)
2291
- corrected[1:H:2, 1:W:2] = blue_corrected
2292
-
2293
- # Green channel: two sets:
2294
- # Green1 (even rows, odd cols)
2295
- green1 = corrected[0:H:2, 1:W:2]
2296
- green1_corrected = bulk_cosmetic_correction_numba(green1, hot_sigma, cold_sigma)
2297
- corrected[0:H:2, 1:W:2] = green1_corrected
2298
-
2299
- # Green2 (odd rows, even cols)
2300
- green2 = corrected[1:H:2, 0:W:2]
2301
- green2_corrected = bulk_cosmetic_correction_numba(green2, hot_sigma, cold_sigma)
2302
- corrected[1:H:2, 0:W:2] = green2_corrected
2684
+ img = image.astype(np.float32, copy=False)
2685
+ was_gray = (img.ndim == 2)
2686
+ if was_gray:
2687
+ src = img[:, :, None]
2688
+ else:
2689
+ src = img
2690
+
2691
+ H, W, C = src.shape
2692
+ dst = src.copy()
2693
+
2694
+ # per-channel saturation guards
2695
+ sat_thresholds = np.empty(C, dtype=np.float32)
2696
+ for ci in range(C):
2697
+ plane = src[:, :, ci]
2698
+ # Compute in Python (Numba doesn't support np.quantile well)
2699
+ sat_thresholds[ci] = float(np.quantile(plane, sat_quantile))
2700
+
2701
+ # run per-channel to use per-channel saturation
2702
+ for ci in range(C):
2703
+ _cosmetic_correction_core(src[:, :, ci], dst[:, :, ci],
2704
+ H, W, 1,
2705
+ float(hot_sigma), float(cold_sigma),
2706
+ float(star_mean_ratio), float(star_max_ratio),
2707
+ float(sat_thresholds[ci]),
2708
+ 1) # cold_cluster_max: allow 1 neighbor to be low
2303
2709
 
2304
- return corrected
2710
+ if was_gray:
2711
+ return dst[:, :, 0]
2712
+ return dst
2305
2713
 
2306
- def bulk_cosmetic_correction_numba(image, hot_sigma=3.0, cold_sigma=3.0, window_size=3):
2714
+
2715
+ def bulk_cosmetic_correction_bayer(image,
2716
+ hot_sigma=5.5,
2717
+ cold_sigma=5.0,
2718
+ star_mean_ratio=0.22,
2719
+ star_max_ratio=0.55,
2720
+ sat_quantile=0.9995,
2721
+ pattern="RGGB"):
2307
2722
  """
2308
- Optimized local outlier correction using Numba.
2309
- - Identifies hot and cold outliers based on local neighborhood statistics.
2310
- - Uses median and standard deviation from surrounding pixels to detect and replace outliers.
2311
- - Applies separate hot_sigma and cold_sigma thresholds.
2312
- - Skips edge pixels to avoid padding artifacts.
2723
+ Bayer-safe cosmetic correction. Work on same-color sub-planes (2-px stride),
2724
+ then write results back. Defaults assume normalized or 16/32f data.
2313
2725
  """
2726
+ H, W = image.shape
2727
+ corrected = image.astype(np.float32).copy()
2314
2728
 
2315
- was_gray = False
2729
+ if pattern.upper() not in ("RGGB", "BGGR", "GRBG", "GBRG"):
2730
+ pattern = "RGGB"
2731
+
2732
+ # index maps for each CFA pattern (row0,col0 offsets)
2733
+ if pattern.upper() == "RGGB":
2734
+ r0, c0 = 0, 0
2735
+ g1r, g1c = 0, 1
2736
+ g2r, g2c = 1, 0
2737
+ b0, b0c = 1, 1
2738
+ elif pattern.upper() == "BGGR":
2739
+ r0, c0 = 1, 1
2740
+ g1r, g1c = 1, 0
2741
+ g2r, g2c = 0, 1
2742
+ b0, b0c = 0, 0
2743
+ elif pattern.upper() == "GRBG":
2744
+ r0, c0 = 0, 1
2745
+ g1r, g1c = 0, 0
2746
+ g2r, g2c = 1, 1
2747
+ b0, b0c = 1, 0
2748
+ else: # GBRG
2749
+ r0, c0 = 1, 0
2750
+ g1r, g1c = 0, 0
2751
+ g2r, g2c = 1, 1
2752
+ b0, b0c = 0, 1
2753
+
2754
+ # helper to process a same-color plane view
2755
+ def _process_plane(view):
2756
+ return bulk_cosmetic_correction_numba(
2757
+ view,
2758
+ hot_sigma=hot_sigma,
2759
+ cold_sigma=cold_sigma,
2760
+ star_mean_ratio=star_mean_ratio,
2761
+ star_max_ratio=star_max_ratio,
2762
+ sat_quantile=sat_quantile
2763
+ )
2316
2764
 
2317
- if image.ndim == 2: # Convert grayscale to 3D
2318
- H, W = image.shape
2319
- C = 1
2320
- was_gray = True
2321
- image = image[:, :, np.newaxis] # Explicitly add a color channel dimension
2765
+ # Red
2766
+ red = corrected[r0:H:2, c0:W:2]
2767
+ corrected[r0:H:2, c0:W:2] = _process_plane(red)
2322
2768
 
2323
- else:
2324
- H, W, C = image.shape
2769
+ # Blue
2770
+ blue = corrected[b0:H:2, b0c:W:2]
2771
+ corrected[b0:H:2, b0c:W:2] = _process_plane(blue)
2325
2772
 
2326
- # Copy the image for modification
2327
- corrected = image.astype(np.float32).copy()
2773
+ # Greens
2774
+ g1 = corrected[g1r:H:2, g1c:W:2]
2775
+ corrected[g1r:H:2, g1c:W:2] = _process_plane(g1)
2328
2776
 
2329
- # Apply fast correction (no padding, edges skipped)
2330
- _cosmetic_correction_numba_fixed(corrected, H, W, C, hot_sigma, cold_sigma)
2331
-
2332
- if was_gray:
2333
- corrected = corrected[:, :, 0] # Convert back to 2D if originally grayscale
2777
+ g2 = corrected[g2r:H:2, g2c:W:2]
2778
+ corrected[g2r:H:2, g2c:W:2] = _process_plane(g2)
2334
2779
 
2335
2780
  return corrected
2336
2781
 
@@ -2803,8 +3248,245 @@ def drizzle_deposit_numba_footprint(
2803
3248
 
2804
3249
  return drizzle_buffer, coverage_buffer
2805
3250
 
3251
+ @njit(fastmath=True)
3252
+ def _drizzle_kernel_weights(kernel_code: int, Xo: float, Yo: float,
3253
+ min_x: int, max_x: int, min_y: int, max_y: int,
3254
+ sigma_out: float,
3255
+ weights_out): # preallocated 2D view (max_y-min_y+1, max_x-min_x+1)
3256
+ """
3257
+ Fill `weights_out` with unnormalized kernel weights centered at (Xo,Yo).
3258
+ Returns (sum_w, count_used).
3259
+ """
3260
+ H = max_y - min_y + 1
3261
+ W = max_x - min_x + 1
3262
+ r2_limit = sigma_out * sigma_out # for circle, sigma_out := radius
3263
+
3264
+ sum_w = 0.0
3265
+ cnt = 0
3266
+ for j in range(H):
3267
+ oy = min_y + j
3268
+ cy = (oy + 0.5) - Yo # pixel-center distance
3269
+ for i in range(W):
3270
+ ox = min_x + i
3271
+ cx = (ox + 0.5) - Xo
3272
+ w = 0.0
3273
+
3274
+ if kernel_code == 0:
3275
+ # square = uniform weight in the bounding box
3276
+ w = 1.0
3277
+ elif kernel_code == 1:
3278
+ # circle = uniform weight if inside radius
3279
+ if (cx*cx + cy*cy) <= r2_limit:
3280
+ w = 1.0
3281
+ else: # gaussian
3282
+ # gaussian centered at (Xo,Yo) with sigma_out
3283
+ z = (cx*cx + cy*cy) / (2.0 * sigma_out * sigma_out)
3284
+ # drop tiny far-away contributions to keep perf ok
3285
+ if z <= 9.0: # ~3σ
3286
+ w = math.exp(-z)
3287
+
3288
+ weights_out[j, i] = w
3289
+ sum_w += w
3290
+ if w > 0.0:
3291
+ cnt += 1
3292
+
3293
+ return sum_w, cnt
3294
+
3295
+
3296
+ @njit(fastmath=True)
3297
+ def drizzle_deposit_numba_kernel_mono(
3298
+ img_data, transform, drizzle_buffer, coverage_buffer,
3299
+ drizzle_factor: float, drop_shrink: float, frame_weight: float,
3300
+ kernel_code: int, gaussian_sigma_or_radius: float
3301
+ ):
3302
+ H, W = img_data.shape
3303
+ outH, outW = drizzle_buffer.shape
2806
3304
 
2807
- @njit(parallel=True, cache=True)
3305
+ # build 3x3
3306
+ M = np.zeros((3, 3), dtype=np.float32)
3307
+ M[0,0], M[0,1], M[0,2] = transform[0,0], transform[0,1], transform[0,2]
3308
+ M[1,0], M[1,1], M[1,2] = transform[1,0], transform[1,1], transform[1,2]
3309
+ M[2,2] = 1.0
3310
+
3311
+ v = np.zeros(3, dtype=np.float32); v[2] = 1.0
3312
+
3313
+ # interpret width parameter:
3314
+ # - square/circle: radius = drop_shrink * 0.5 (pixfrac-like)
3315
+ # - gaussian: sigma_out = max(gaussian_sigma_or_radius, drop_shrink * 0.5)
3316
+ radius = drop_shrink * 0.5
3317
+ sigma_out = gaussian_sigma_or_radius if kernel_code == 2 else radius
3318
+ if sigma_out < 1e-6:
3319
+ sigma_out = 1e-6
3320
+
3321
+ # temp weights tile (safely sized later per pixel)
3322
+ for y in range(H):
3323
+ for x in range(W):
3324
+ val = img_data[y, x]
3325
+ if val == 0.0:
3326
+ continue
3327
+
3328
+ v[0] = x; v[1] = y
3329
+ out_coords = M @ v
3330
+ Xo = out_coords[0] * drizzle_factor
3331
+ Yo = out_coords[1] * drizzle_factor
3332
+
3333
+ # choose bounds
3334
+ if kernel_code == 2:
3335
+ r = int(math.ceil(3.0 * sigma_out))
3336
+ else:
3337
+ r = int(math.ceil(radius))
3338
+
3339
+ if r <= 0:
3340
+ # degenerate → nearest pixel
3341
+ ox = int(Xo); oy = int(Yo)
3342
+ if 0 <= ox < outW and 0 <= oy < outH:
3343
+ drizzle_buffer[oy, ox] += val * frame_weight
3344
+ coverage_buffer[oy, ox] += frame_weight
3345
+ continue
3346
+
3347
+ min_x = int(math.floor(Xo - r))
3348
+ max_x = int(math.floor(Xo + r))
3349
+ min_y = int(math.floor(Yo - r))
3350
+ max_y = int(math.floor(Yo + r))
3351
+ if max_x < 0 or min_x >= outW or max_y < 0 or min_y >= outH:
3352
+ continue
3353
+ if min_x < 0: min_x = 0
3354
+ if min_y < 0: min_y = 0
3355
+ if max_x >= outW: max_x = outW - 1
3356
+ if max_y >= outH: max_y = outH - 1
3357
+
3358
+ Ht = max_y - min_y + 1
3359
+ Wt = max_x - min_x + 1
3360
+ if Ht <= 0 or Wt <= 0:
3361
+ continue
3362
+
3363
+ # allocate small tile (Numba-friendly: fixed-size via stack array)
3364
+ weights = np.zeros((Ht, Wt), dtype=np.float32)
3365
+ sum_w, cnt = _drizzle_kernel_weights(kernel_code, Xo, Yo,
3366
+ min_x, max_x, min_y, max_y,
3367
+ sigma_out, weights)
3368
+ if cnt == 0 or sum_w <= 1e-12:
3369
+ # fallback to nearest
3370
+ ox = int(Xo); oy = int(Yo)
3371
+ if 0 <= ox < outW and 0 <= oy < outH:
3372
+ drizzle_buffer[oy, ox] += val * frame_weight
3373
+ coverage_buffer[oy, ox] += frame_weight
3374
+ continue
3375
+
3376
+ scale = (val * frame_weight) / sum_w
3377
+ cov_scale = frame_weight / sum_w
3378
+ for j in range(Ht):
3379
+ oy = min_y + j
3380
+ for i in range(Wt):
3381
+ w = weights[j, i]
3382
+ if w > 0.0:
3383
+ ox = min_x + i
3384
+ drizzle_buffer[oy, ox] += w * scale
3385
+ coverage_buffer[oy, ox] += w * cov_scale
3386
+
3387
+ return drizzle_buffer, coverage_buffer
3388
+
3389
+
3390
+ @njit(fastmath=True)
3391
+ def drizzle_deposit_color_kernel(
3392
+ img_data, transform, drizzle_buffer, coverage_buffer,
3393
+ drizzle_factor: float, drop_shrink: float, frame_weight: float,
3394
+ kernel_code: int, gaussian_sigma_or_radius: float
3395
+ ):
3396
+ H, W, C = img_data.shape
3397
+ outH, outW, _ = drizzle_buffer.shape
3398
+
3399
+ M = np.zeros((3, 3), dtype=np.float32)
3400
+ M[0,0], M[0,1], M[0,2] = transform[0,0], transform[0,1], transform[0,2]
3401
+ M[1,0], M[1,1], M[1,2] = transform[1,0], transform[1,1], transform[1,2]
3402
+ M[2,2] = 1.0
3403
+
3404
+ v = np.zeros(3, dtype=np.float32); v[2] = 1.0
3405
+
3406
+ radius = drop_shrink * 0.5
3407
+ sigma_out = gaussian_sigma_or_radius if kernel_code == 2 else radius
3408
+ if sigma_out < 1e-6:
3409
+ sigma_out = 1e-6
3410
+
3411
+ for y in range(H):
3412
+ for x in range(W):
3413
+ # (minor optimization) skip all-zero triplets
3414
+ nz = False
3415
+ for cc in range(C):
3416
+ if img_data[y, x, cc] != 0.0:
3417
+ nz = True; break
3418
+ if not nz:
3419
+ continue
3420
+
3421
+ v[0] = x; v[1] = y
3422
+ out_coords = M @ v
3423
+ Xo = out_coords[0] * drizzle_factor
3424
+ Yo = out_coords[1] * drizzle_factor
3425
+
3426
+ if kernel_code == 2:
3427
+ r = int(math.ceil(3.0 * sigma_out))
3428
+ else:
3429
+ r = int(math.ceil(radius))
3430
+
3431
+ if r <= 0:
3432
+ ox = int(Xo); oy = int(Yo)
3433
+ if 0 <= ox < outW and 0 <= oy < outH:
3434
+ for c in range(C):
3435
+ val = img_data[y, x, c]
3436
+ if val != 0.0:
3437
+ drizzle_buffer[oy, ox, c] += val * frame_weight
3438
+ coverage_buffer[oy, ox, c] += frame_weight
3439
+ continue
3440
+
3441
+ min_x = int(math.floor(Xo - r))
3442
+ max_x = int(math.floor(Xo + r))
3443
+ min_y = int(math.floor(Yo - r))
3444
+ max_y = int(math.floor(Yo + r))
3445
+ if max_x < 0 or min_x >= outW or max_y < 0 or min_y >= outH:
3446
+ continue
3447
+ if min_x < 0: min_x = 0
3448
+ if min_y < 0: min_y = 0
3449
+ if max_x >= outW: max_x = outW - 1
3450
+ if max_y >= outH: max_y = outH - 1
3451
+
3452
+ Ht = max_y - min_y + 1
3453
+ Wt = max_x - min_x + 1
3454
+ if Ht <= 0 or Wt <= 0:
3455
+ continue
3456
+
3457
+ weights = np.zeros((Ht, Wt), dtype=np.float32)
3458
+ sum_w, cnt = _drizzle_kernel_weights(kernel_code, Xo, Yo,
3459
+ min_x, max_x, min_y, max_y,
3460
+ sigma_out, weights)
3461
+ if cnt == 0 or sum_w <= 1e-12:
3462
+ ox = int(Xo); oy = int(Yo)
3463
+ if 0 <= ox < outW and 0 <= oy < outH:
3464
+ for c in range(C):
3465
+ val = img_data[y, x, c]
3466
+ if val != 0.0:
3467
+ drizzle_buffer[oy, ox, c] += val * frame_weight
3468
+ coverage_buffer[oy, ox, c] += frame_weight
3469
+ continue
3470
+
3471
+ inv_sum = 1.0 / sum_w
3472
+ for c in range(C):
3473
+ val = img_data[y, x, c]
3474
+ if val == 0.0:
3475
+ continue
3476
+ scale = (val * frame_weight) * inv_sum
3477
+ cov_scale = frame_weight * inv_sum
3478
+ for j in range(Ht):
3479
+ oy = min_y + j
3480
+ for i in range(Wt):
3481
+ w = weights[j, i]
3482
+ if w > 0.0:
3483
+ ox = min_x + i
3484
+ drizzle_buffer[oy, ox, c] += w * scale
3485
+ coverage_buffer[oy, ox, c] += w * cov_scale
3486
+
3487
+ return drizzle_buffer, coverage_buffer
3488
+
3489
+ @njit(parallel=True)
2808
3490
  def finalize_drizzle_2d(drizzle_buffer, coverage_buffer, final_out):
2809
3491
  """
2810
3492
  parallel-friendly final step: final_out = drizzle_buffer / coverage_buffer,
@@ -3085,6 +3767,52 @@ def fast_star_detect(image,
3085
3767
  return np.array(star_positions, dtype=np.float32)
3086
3768
 
3087
3769
 
3770
+ @njit(fastmath=True)
3771
+ def _drizzle_kernel_weights(kernel_code: int, Xo: float, Yo: float,
3772
+ min_x: int, max_x: int, min_y: int, max_y: int,
3773
+ sigma_out: float,
3774
+ weights_out): # preallocated 2D view (max_y-min_y+1, max_x-min_x+1)
3775
+ """
3776
+ Fill `weights_out` with unnormalized kernel weights centered at (Xo,Yo).
3777
+ Returns (sum_w, count_used).
3778
+ """
3779
+ H = max_y - min_y + 1
3780
+ W = max_x - min_x + 1
3781
+ r2_limit = sigma_out * sigma_out # for circle, sigma_out := radius
3782
+
3783
+ sum_w = 0.0
3784
+ cnt = 0
3785
+ for j in range(H):
3786
+ oy = min_y + j
3787
+ cy = (oy + 0.5) - Yo # pixel-center distance
3788
+ for i in range(W):
3789
+ ox = min_x + i
3790
+ cx = (ox + 0.5) - Xo
3791
+ w = 0.0
3792
+
3793
+ if kernel_code == 0:
3794
+ # square = uniform weight in the bounding box
3795
+ w = 1.0
3796
+ elif kernel_code == 1:
3797
+ # circle = uniform weight if inside radius
3798
+ if (cx*cx + cy*cy) <= r2_limit:
3799
+ w = 1.0
3800
+ else: # gaussian
3801
+ # gaussian centered at (Xo,Yo) with sigma_out
3802
+ z = (cx*cx + cy*cy) / (2.0 * sigma_out * sigma_out)
3803
+ # drop tiny far-away contributions to keep perf ok
3804
+ if z <= 9.0: # ~3σ
3805
+ w = math.exp(-z)
3806
+
3807
+ weights_out[j, i] = w
3808
+ sum_w += w
3809
+ if w > 0.0:
3810
+ cnt += 1
3811
+
3812
+ return sum_w, cnt
3813
+
3814
+
3815
+
3088
3816
  @njit(fastmath=True, cache=True)
3089
3817
  def gradient_descent_to_dim_spot_numba(gray_small, start_x, start_y, patch_size):
3090
3818
  """