setiastrosuitepro 1.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of setiastrosuitepro might be problematic. Click here for more details.

Files changed (342) hide show
  1. setiastro/__init__.py +2 -0
  2. setiastro/data/SASP_data.fits +0 -0
  3. setiastro/data/catalogs/List_of_Galaxies_with_Distances_Gly.csv +488 -0
  4. setiastro/data/catalogs/astrobin_filters.csv +890 -0
  5. setiastro/data/catalogs/astrobin_filters_page1_local.csv +51 -0
  6. setiastro/data/catalogs/cali2.csv +63 -0
  7. setiastro/data/catalogs/cali2color.csv +65 -0
  8. setiastro/data/catalogs/celestial_catalog - original.csv +16471 -0
  9. setiastro/data/catalogs/celestial_catalog.csv +24031 -0
  10. setiastro/data/catalogs/detected_stars.csv +24784 -0
  11. setiastro/data/catalogs/fits_header_data.csv +46 -0
  12. setiastro/data/catalogs/test.csv +8 -0
  13. setiastro/data/catalogs/updated_celestial_catalog.csv +16471 -0
  14. setiastro/images/Astro_Spikes.png +0 -0
  15. setiastro/images/HRDiagram.png +0 -0
  16. setiastro/images/LExtract.png +0 -0
  17. setiastro/images/LInsert.png +0 -0
  18. setiastro/images/Oxygenation-atm-2.svg.png +0 -0
  19. setiastro/images/RGB080604.png +0 -0
  20. setiastro/images/abeicon.png +0 -0
  21. setiastro/images/aberration.png +0 -0
  22. setiastro/images/andromedatry.png +0 -0
  23. setiastro/images/andromedatry_satellited.png +0 -0
  24. setiastro/images/annotated.png +0 -0
  25. setiastro/images/aperture.png +0 -0
  26. setiastro/images/astrosuite.ico +0 -0
  27. setiastro/images/astrosuite.png +0 -0
  28. setiastro/images/astrosuitepro.icns +0 -0
  29. setiastro/images/astrosuitepro.ico +0 -0
  30. setiastro/images/astrosuitepro.png +0 -0
  31. setiastro/images/background.png +0 -0
  32. setiastro/images/background2.png +0 -0
  33. setiastro/images/benchmark.png +0 -0
  34. setiastro/images/big_moon_stabilizer_timeline.png +0 -0
  35. setiastro/images/big_moon_stabilizer_timeline_clean.png +0 -0
  36. setiastro/images/blaster.png +0 -0
  37. setiastro/images/blink.png +0 -0
  38. setiastro/images/clahe.png +0 -0
  39. setiastro/images/collage.png +0 -0
  40. setiastro/images/colorwheel.png +0 -0
  41. setiastro/images/contsub.png +0 -0
  42. setiastro/images/convo.png +0 -0
  43. setiastro/images/copyslot.png +0 -0
  44. setiastro/images/cosmic.png +0 -0
  45. setiastro/images/cosmicsat.png +0 -0
  46. setiastro/images/crop1.png +0 -0
  47. setiastro/images/cropicon.png +0 -0
  48. setiastro/images/curves.png +0 -0
  49. setiastro/images/cvs.png +0 -0
  50. setiastro/images/debayer.png +0 -0
  51. setiastro/images/denoise_cnn_custom.png +0 -0
  52. setiastro/images/denoise_cnn_graph.png +0 -0
  53. setiastro/images/disk.png +0 -0
  54. setiastro/images/dse.png +0 -0
  55. setiastro/images/exoicon.png +0 -0
  56. setiastro/images/eye.png +0 -0
  57. setiastro/images/fliphorizontal.png +0 -0
  58. setiastro/images/flipvertical.png +0 -0
  59. setiastro/images/font.png +0 -0
  60. setiastro/images/freqsep.png +0 -0
  61. setiastro/images/functionbundle.png +0 -0
  62. setiastro/images/graxpert.png +0 -0
  63. setiastro/images/green.png +0 -0
  64. setiastro/images/gridicon.png +0 -0
  65. setiastro/images/halo.png +0 -0
  66. setiastro/images/hdr.png +0 -0
  67. setiastro/images/histogram.png +0 -0
  68. setiastro/images/hubble.png +0 -0
  69. setiastro/images/imagecombine.png +0 -0
  70. setiastro/images/invert.png +0 -0
  71. setiastro/images/isophote.png +0 -0
  72. setiastro/images/isophote_demo_figure.png +0 -0
  73. setiastro/images/isophote_demo_image.png +0 -0
  74. setiastro/images/isophote_demo_model.png +0 -0
  75. setiastro/images/isophote_demo_residual.png +0 -0
  76. setiastro/images/jwstpupil.png +0 -0
  77. setiastro/images/linearfit.png +0 -0
  78. setiastro/images/livestacking.png +0 -0
  79. setiastro/images/mask.png +0 -0
  80. setiastro/images/maskapply.png +0 -0
  81. setiastro/images/maskcreate.png +0 -0
  82. setiastro/images/maskremove.png +0 -0
  83. setiastro/images/morpho.png +0 -0
  84. setiastro/images/mosaic.png +0 -0
  85. setiastro/images/multiscale_decomp.png +0 -0
  86. setiastro/images/nbtorgb.png +0 -0
  87. setiastro/images/neutral.png +0 -0
  88. setiastro/images/nuke.png +0 -0
  89. setiastro/images/openfile.png +0 -0
  90. setiastro/images/pedestal.png +0 -0
  91. setiastro/images/pen.png +0 -0
  92. setiastro/images/pixelmath.png +0 -0
  93. setiastro/images/platesolve.png +0 -0
  94. setiastro/images/ppp.png +0 -0
  95. setiastro/images/pro.png +0 -0
  96. setiastro/images/project.png +0 -0
  97. setiastro/images/psf.png +0 -0
  98. setiastro/images/redo.png +0 -0
  99. setiastro/images/redoicon.png +0 -0
  100. setiastro/images/rescale.png +0 -0
  101. setiastro/images/rgbalign.png +0 -0
  102. setiastro/images/rgbcombo.png +0 -0
  103. setiastro/images/rgbextract.png +0 -0
  104. setiastro/images/rotate180.png +0 -0
  105. setiastro/images/rotateclockwise.png +0 -0
  106. setiastro/images/rotatecounterclockwise.png +0 -0
  107. setiastro/images/satellite.png +0 -0
  108. setiastro/images/script.png +0 -0
  109. setiastro/images/selectivecolor.png +0 -0
  110. setiastro/images/simbad.png +0 -0
  111. setiastro/images/slot0.png +0 -0
  112. setiastro/images/slot1.png +0 -0
  113. setiastro/images/slot2.png +0 -0
  114. setiastro/images/slot3.png +0 -0
  115. setiastro/images/slot4.png +0 -0
  116. setiastro/images/slot5.png +0 -0
  117. setiastro/images/slot6.png +0 -0
  118. setiastro/images/slot7.png +0 -0
  119. setiastro/images/slot8.png +0 -0
  120. setiastro/images/slot9.png +0 -0
  121. setiastro/images/spcc.png +0 -0
  122. setiastro/images/spin_precession_vs_lunar_distance.png +0 -0
  123. setiastro/images/spinner.gif +0 -0
  124. setiastro/images/stacking.png +0 -0
  125. setiastro/images/staradd.png +0 -0
  126. setiastro/images/staralign.png +0 -0
  127. setiastro/images/starnet.png +0 -0
  128. setiastro/images/starregistration.png +0 -0
  129. setiastro/images/starspike.png +0 -0
  130. setiastro/images/starstretch.png +0 -0
  131. setiastro/images/statstretch.png +0 -0
  132. setiastro/images/supernova.png +0 -0
  133. setiastro/images/uhs.png +0 -0
  134. setiastro/images/undoicon.png +0 -0
  135. setiastro/images/upscale.png +0 -0
  136. setiastro/images/viewbundle.png +0 -0
  137. setiastro/images/whitebalance.png +0 -0
  138. setiastro/images/wimi_icon_256x256.png +0 -0
  139. setiastro/images/wimilogo.png +0 -0
  140. setiastro/images/wims.png +0 -0
  141. setiastro/images/wrench_icon.png +0 -0
  142. setiastro/images/xisfliberator.png +0 -0
  143. setiastro/saspro/__init__.py +20 -0
  144. setiastro/saspro/__main__.py +809 -0
  145. setiastro/saspro/_generated/__init__.py +7 -0
  146. setiastro/saspro/_generated/build_info.py +2 -0
  147. setiastro/saspro/abe.py +1295 -0
  148. setiastro/saspro/abe_preset.py +196 -0
  149. setiastro/saspro/aberration_ai.py +694 -0
  150. setiastro/saspro/aberration_ai_preset.py +224 -0
  151. setiastro/saspro/accel_installer.py +218 -0
  152. setiastro/saspro/accel_workers.py +30 -0
  153. setiastro/saspro/add_stars.py +621 -0
  154. setiastro/saspro/astrobin_exporter.py +1007 -0
  155. setiastro/saspro/astrospike.py +153 -0
  156. setiastro/saspro/astrospike_python.py +1839 -0
  157. setiastro/saspro/autostretch.py +196 -0
  158. setiastro/saspro/backgroundneutral.py +560 -0
  159. setiastro/saspro/batch_convert.py +325 -0
  160. setiastro/saspro/batch_renamer.py +519 -0
  161. setiastro/saspro/blemish_blaster.py +488 -0
  162. setiastro/saspro/blink_comparator_pro.py +2926 -0
  163. setiastro/saspro/bundles.py +61 -0
  164. setiastro/saspro/bundles_dock.py +114 -0
  165. setiastro/saspro/cheat_sheet.py +178 -0
  166. setiastro/saspro/clahe.py +342 -0
  167. setiastro/saspro/comet_stacking.py +1377 -0
  168. setiastro/saspro/common_tr.py +107 -0
  169. setiastro/saspro/config.py +38 -0
  170. setiastro/saspro/config_bootstrap.py +40 -0
  171. setiastro/saspro/config_manager.py +316 -0
  172. setiastro/saspro/continuum_subtract.py +1617 -0
  173. setiastro/saspro/convo.py +1397 -0
  174. setiastro/saspro/convo_preset.py +414 -0
  175. setiastro/saspro/copyastro.py +187 -0
  176. setiastro/saspro/cosmicclarity.py +1564 -0
  177. setiastro/saspro/cosmicclarity_preset.py +407 -0
  178. setiastro/saspro/crop_dialog_pro.py +956 -0
  179. setiastro/saspro/crop_preset.py +189 -0
  180. setiastro/saspro/curve_editor_pro.py +2544 -0
  181. setiastro/saspro/curves_preset.py +375 -0
  182. setiastro/saspro/debayer.py +670 -0
  183. setiastro/saspro/debug_utils.py +29 -0
  184. setiastro/saspro/dnd_mime.py +35 -0
  185. setiastro/saspro/doc_manager.py +2641 -0
  186. setiastro/saspro/exoplanet_detector.py +2166 -0
  187. setiastro/saspro/file_utils.py +284 -0
  188. setiastro/saspro/fitsmodifier.py +745 -0
  189. setiastro/saspro/fix_bom.py +32 -0
  190. setiastro/saspro/free_torch_memory.py +48 -0
  191. setiastro/saspro/frequency_separation.py +1343 -0
  192. setiastro/saspro/function_bundle.py +1594 -0
  193. setiastro/saspro/generate_translations.py +2378 -0
  194. setiastro/saspro/ghs_dialog_pro.py +660 -0
  195. setiastro/saspro/ghs_preset.py +284 -0
  196. setiastro/saspro/graxpert.py +634 -0
  197. setiastro/saspro/graxpert_preset.py +287 -0
  198. setiastro/saspro/gui/__init__.py +0 -0
  199. setiastro/saspro/gui/main_window.py +8567 -0
  200. setiastro/saspro/gui/mixins/__init__.py +33 -0
  201. setiastro/saspro/gui/mixins/dock_mixin.py +263 -0
  202. setiastro/saspro/gui/mixins/file_mixin.py +443 -0
  203. setiastro/saspro/gui/mixins/geometry_mixin.py +403 -0
  204. setiastro/saspro/gui/mixins/header_mixin.py +441 -0
  205. setiastro/saspro/gui/mixins/mask_mixin.py +421 -0
  206. setiastro/saspro/gui/mixins/menu_mixin.py +361 -0
  207. setiastro/saspro/gui/mixins/theme_mixin.py +367 -0
  208. setiastro/saspro/gui/mixins/toolbar_mixin.py +1457 -0
  209. setiastro/saspro/gui/mixins/update_mixin.py +309 -0
  210. setiastro/saspro/gui/mixins/view_mixin.py +435 -0
  211. setiastro/saspro/halobgon.py +462 -0
  212. setiastro/saspro/header_viewer.py +448 -0
  213. setiastro/saspro/headless_utils.py +88 -0
  214. setiastro/saspro/histogram.py +753 -0
  215. setiastro/saspro/history_explorer.py +939 -0
  216. setiastro/saspro/i18n.py +156 -0
  217. setiastro/saspro/image_combine.py +414 -0
  218. setiastro/saspro/image_peeker_pro.py +1601 -0
  219. setiastro/saspro/imageops/__init__.py +37 -0
  220. setiastro/saspro/imageops/mdi_snap.py +292 -0
  221. setiastro/saspro/imageops/scnr.py +36 -0
  222. setiastro/saspro/imageops/starbasedwhitebalance.py +210 -0
  223. setiastro/saspro/imageops/stretch.py +244 -0
  224. setiastro/saspro/isophote.py +1179 -0
  225. setiastro/saspro/layers.py +208 -0
  226. setiastro/saspro/layers_dock.py +714 -0
  227. setiastro/saspro/lazy_imports.py +193 -0
  228. setiastro/saspro/legacy/__init__.py +2 -0
  229. setiastro/saspro/legacy/image_manager.py +2226 -0
  230. setiastro/saspro/legacy/numba_utils.py +3659 -0
  231. setiastro/saspro/legacy/xisf.py +1071 -0
  232. setiastro/saspro/linear_fit.py +534 -0
  233. setiastro/saspro/live_stacking.py +1830 -0
  234. setiastro/saspro/log_bus.py +5 -0
  235. setiastro/saspro/logging_config.py +460 -0
  236. setiastro/saspro/luminancerecombine.py +309 -0
  237. setiastro/saspro/main_helpers.py +201 -0
  238. setiastro/saspro/mask_creation.py +928 -0
  239. setiastro/saspro/masks_core.py +56 -0
  240. setiastro/saspro/mdi_widgets.py +353 -0
  241. setiastro/saspro/memory_utils.py +666 -0
  242. setiastro/saspro/metadata_patcher.py +75 -0
  243. setiastro/saspro/mfdeconv.py +3826 -0
  244. setiastro/saspro/mfdeconv_earlystop.py +71 -0
  245. setiastro/saspro/mfdeconvcudnn.py +3263 -0
  246. setiastro/saspro/mfdeconvsport.py +2382 -0
  247. setiastro/saspro/minorbodycatalog.py +567 -0
  248. setiastro/saspro/morphology.py +382 -0
  249. setiastro/saspro/multiscale_decomp.py +1290 -0
  250. setiastro/saspro/nbtorgb_stars.py +531 -0
  251. setiastro/saspro/numba_utils.py +3044 -0
  252. setiastro/saspro/numba_warmup.py +141 -0
  253. setiastro/saspro/ops/__init__.py +9 -0
  254. setiastro/saspro/ops/command_help_dialog.py +623 -0
  255. setiastro/saspro/ops/command_runner.py +217 -0
  256. setiastro/saspro/ops/commands.py +1594 -0
  257. setiastro/saspro/ops/script_editor.py +1102 -0
  258. setiastro/saspro/ops/scripts.py +1413 -0
  259. setiastro/saspro/ops/settings.py +679 -0
  260. setiastro/saspro/parallel_utils.py +554 -0
  261. setiastro/saspro/pedestal.py +121 -0
  262. setiastro/saspro/perfect_palette_picker.py +1070 -0
  263. setiastro/saspro/pipeline.py +110 -0
  264. setiastro/saspro/pixelmath.py +1600 -0
  265. setiastro/saspro/plate_solver.py +2444 -0
  266. setiastro/saspro/project_io.py +797 -0
  267. setiastro/saspro/psf_utils.py +136 -0
  268. setiastro/saspro/psf_viewer.py +549 -0
  269. setiastro/saspro/pyi_rthook_astroquery.py +95 -0
  270. setiastro/saspro/remove_green.py +314 -0
  271. setiastro/saspro/remove_stars.py +1625 -0
  272. setiastro/saspro/remove_stars_preset.py +404 -0
  273. setiastro/saspro/resources.py +477 -0
  274. setiastro/saspro/rgb_combination.py +207 -0
  275. setiastro/saspro/rgb_extract.py +19 -0
  276. setiastro/saspro/rgbalign.py +723 -0
  277. setiastro/saspro/runtime_imports.py +7 -0
  278. setiastro/saspro/runtime_torch.py +754 -0
  279. setiastro/saspro/save_options.py +72 -0
  280. setiastro/saspro/selective_color.py +1552 -0
  281. setiastro/saspro/sfcc.py +1430 -0
  282. setiastro/saspro/shortcuts.py +3043 -0
  283. setiastro/saspro/signature_insert.py +1099 -0
  284. setiastro/saspro/stacking_suite.py +18181 -0
  285. setiastro/saspro/star_alignment.py +7420 -0
  286. setiastro/saspro/star_alignment_preset.py +329 -0
  287. setiastro/saspro/star_metrics.py +49 -0
  288. setiastro/saspro/star_spikes.py +681 -0
  289. setiastro/saspro/star_stretch.py +470 -0
  290. setiastro/saspro/stat_stretch.py +506 -0
  291. setiastro/saspro/status_log_dock.py +78 -0
  292. setiastro/saspro/subwindow.py +3267 -0
  293. setiastro/saspro/supernovaasteroidhunter.py +1716 -0
  294. setiastro/saspro/swap_manager.py +99 -0
  295. setiastro/saspro/torch_backend.py +89 -0
  296. setiastro/saspro/torch_rejection.py +434 -0
  297. setiastro/saspro/translations/de_translations.py +3733 -0
  298. setiastro/saspro/translations/es_translations.py +3923 -0
  299. setiastro/saspro/translations/fr_translations.py +3842 -0
  300. setiastro/saspro/translations/integrate_translations.py +234 -0
  301. setiastro/saspro/translations/it_translations.py +3662 -0
  302. setiastro/saspro/translations/ja_translations.py +3585 -0
  303. setiastro/saspro/translations/pt_translations.py +3853 -0
  304. setiastro/saspro/translations/saspro_de.qm +0 -0
  305. setiastro/saspro/translations/saspro_de.ts +253 -0
  306. setiastro/saspro/translations/saspro_es.qm +0 -0
  307. setiastro/saspro/translations/saspro_es.ts +12520 -0
  308. setiastro/saspro/translations/saspro_fr.qm +0 -0
  309. setiastro/saspro/translations/saspro_fr.ts +12514 -0
  310. setiastro/saspro/translations/saspro_it.qm +0 -0
  311. setiastro/saspro/translations/saspro_it.ts +12520 -0
  312. setiastro/saspro/translations/saspro_ja.qm +0 -0
  313. setiastro/saspro/translations/saspro_ja.ts +257 -0
  314. setiastro/saspro/translations/saspro_pt.qm +0 -0
  315. setiastro/saspro/translations/saspro_pt.ts +257 -0
  316. setiastro/saspro/translations/saspro_zh.qm +0 -0
  317. setiastro/saspro/translations/saspro_zh.ts +12520 -0
  318. setiastro/saspro/translations/zh_translations.py +3659 -0
  319. setiastro/saspro/versioning.py +71 -0
  320. setiastro/saspro/view_bundle.py +1555 -0
  321. setiastro/saspro/wavescale_hdr.py +624 -0
  322. setiastro/saspro/wavescale_hdr_preset.py +101 -0
  323. setiastro/saspro/wavescalede.py +658 -0
  324. setiastro/saspro/wavescalede_preset.py +230 -0
  325. setiastro/saspro/wcs_update.py +374 -0
  326. setiastro/saspro/whitebalance.py +456 -0
  327. setiastro/saspro/widgets/__init__.py +48 -0
  328. setiastro/saspro/widgets/common_utilities.py +306 -0
  329. setiastro/saspro/widgets/graphics_views.py +122 -0
  330. setiastro/saspro/widgets/image_utils.py +518 -0
  331. setiastro/saspro/widgets/preview_dialogs.py +280 -0
  332. setiastro/saspro/widgets/spinboxes.py +275 -0
  333. setiastro/saspro/widgets/themed_buttons.py +13 -0
  334. setiastro/saspro/widgets/wavelet_utils.py +299 -0
  335. setiastro/saspro/window_shelf.py +185 -0
  336. setiastro/saspro/xisf.py +1123 -0
  337. setiastrosuitepro-1.6.1.dist-info/METADATA +267 -0
  338. setiastrosuitepro-1.6.1.dist-info/RECORD +342 -0
  339. setiastrosuitepro-1.6.1.dist-info/WHEEL +4 -0
  340. setiastrosuitepro-1.6.1.dist-info/entry_points.txt +6 -0
  341. setiastrosuitepro-1.6.1.dist-info/licenses/LICENSE +674 -0
  342. setiastrosuitepro-1.6.1.dist-info/licenses/license.txt +2580 -0
@@ -0,0 +1,3044 @@
1
+ import numpy as np
2
+ from numba import njit, prange
3
+ import cv2
4
+ import math
5
+
6
+ @njit(parallel=True, fastmath=True, cache=True)
7
+ def blend_add_numba(A, B, alpha):
8
+ H, W, C = A.shape
9
+ out = np.empty_like(A)
10
+ for y in prange(H):
11
+ for x in range(W):
12
+ for c in range(C):
13
+ v = A[y,x,c] + B[y,x,c] * alpha
14
+ # clamp 0..1
15
+ if v < 0.0: v = 0.0
16
+ elif v > 1.0: v = 1.0
17
+ out[y,x,c] = v
18
+ return out
19
+
20
+ @njit(parallel=True, fastmath=True, cache=True)
21
+ def blend_subtract_numba(A, B, alpha):
22
+ H, W, C = A.shape
23
+ out = np.empty_like(A)
24
+ for y in prange(H):
25
+ for x in range(W):
26
+ for c in range(C):
27
+ v = A[y,x,c] - B[y,x,c] * alpha
28
+ if v < 0.0: v = 0.0
29
+ elif v > 1.0: v = 1.0
30
+ out[y,x,c] = v
31
+ return out
32
+
33
+ @njit(parallel=True, fastmath=True, cache=True)
34
+ def blend_multiply_numba(A, B, alpha):
35
+ H, W, C = A.shape
36
+ out = np.empty_like(A)
37
+ for y in prange(H):
38
+ for x in range(W):
39
+ for c in range(C):
40
+ v = (A[y,x,c] * (1-alpha)) + (A[y,x,c] * B[y,x,c] * alpha)
41
+ if v < 0.0: v = 0.0
42
+ elif v > 1.0: v = 1.0
43
+ out[y,x,c] = v
44
+ return out
45
+
46
+ @njit(parallel=True, fastmath=True, cache=True)
47
+ def blend_divide_numba(A, B, alpha):
48
+ H, W, C = A.shape
49
+ out = np.empty_like(A)
50
+ eps = 1e-6
51
+ for y in prange(H):
52
+ for x in range(W):
53
+ for c in range(C):
54
+ # avoid division by zero
55
+ b = A[y,x,c] / (B[y,x,c] + eps)
56
+ # clamp f(A,B)
57
+ if b < 0.0: b = 0.0
58
+ elif b > 1.0: b = 1.0
59
+ # mix with original
60
+ v = A[y,x,c] * (1.0 - alpha) + b * alpha
61
+ # clamp final
62
+ if v < 0.0: v = 0.0
63
+ elif v > 1.0: v = 1.0
64
+ out[y,x,c] = v
65
+ return out
66
+
67
+ @njit(parallel=True, fastmath=True, cache=True)
68
+ def blend_screen_numba(A, B, alpha):
69
+ H, W, C = A.shape
70
+ out = np.empty_like(A)
71
+ for y in prange(H):
72
+ for x in range(W):
73
+ for c in range(C):
74
+ # Screen: 1 - (1-A)*(1-B)
75
+ b = 1.0 - (1.0 - A[y,x,c]) * (1.0 - B[y,x,c])
76
+ if b < 0.0: b = 0.0
77
+ elif b > 1.0: b = 1.0
78
+ v = A[y,x,c] * (1.0 - alpha) + b * alpha
79
+ if v < 0.0: v = 0.0
80
+ elif v > 1.0: v = 1.0
81
+ out[y,x,c] = v
82
+ return out
83
+
84
+ @njit(parallel=True, fastmath=True, cache=True)
85
+ def blend_overlay_numba(A, B, alpha):
86
+ H, W, C = A.shape
87
+ out = np.empty_like(A)
88
+ for y in prange(H):
89
+ for x in range(W):
90
+ for c in range(C):
91
+ a = A[y,x,c]
92
+ b_in = B[y,x,c]
93
+ # Overlay: if a < .5: 2*a*b, else: 1 - 2*(1-a)*(1-b)
94
+ if a <= 0.5:
95
+ b = 2.0 * a * b_in
96
+ else:
97
+ b = 1.0 - 2.0 * (1.0 - a) * (1.0 - b_in)
98
+ if b < 0.0: b = 0.0
99
+ elif b > 1.0: b = 1.0
100
+ v = a * (1.0 - alpha) + b * alpha
101
+ if v < 0.0: v = 0.0
102
+ elif v > 1.0: v = 1.0
103
+ out[y,x,c] = v
104
+ return out
105
+
106
+ @njit(parallel=True, fastmath=True, cache=True)
107
+ def blend_difference_numba(A, B, alpha):
108
+ H, W, C = A.shape
109
+ out = np.empty_like(A)
110
+ for y in prange(H):
111
+ for x in range(W):
112
+ for c in range(C):
113
+ # Difference: |A - B|
114
+ b = A[y,x,c] - B[y,x,c]
115
+ if b < 0.0: b = -b
116
+ # clamp f(A,B) is redundant since abs() already >=0; we cap above 1
117
+ if b > 1.0: b = 1.0
118
+ v = A[y,x,c] * (1.0 - alpha) + b * alpha
119
+ if v < 0.0: v = 0.0
120
+ elif v > 1.0: v = 1.0
121
+ out[y,x,c] = v
122
+ return out
123
+
124
+ @njit(parallel=True, fastmath=True, cache=True)
125
+ def rescale_image_numba(image, factor):
126
+ """
127
+ Custom rescale function using bilinear interpolation optimized with numba.
128
+ Supports both mono (2D) and color (3D) images.
129
+ """
130
+ if image.ndim == 2:
131
+ height, width = image.shape
132
+ new_width = int(width * factor)
133
+ new_height = int(height * factor)
134
+ output = np.zeros((new_height, new_width), dtype=np.float32)
135
+ for y in prange(new_height):
136
+ for x in prange(new_width):
137
+ src_x = x / factor
138
+ src_y = y / factor
139
+ x0, y0 = int(src_x), int(src_y)
140
+ x1 = x0 + 1 if x0 + 1 < width else width - 1
141
+ y1 = y0 + 1 if y0 + 1 < height else height - 1
142
+ dx = src_x - x0
143
+ dy = src_y - y0
144
+ output[y, x] = (image[y0, x0] * (1 - dx) * (1 - dy) +
145
+ image[y0, x1] * dx * (1 - dy) +
146
+ image[y1, x0] * (1 - dx) * dy +
147
+ image[y1, x1] * dx * dy)
148
+ return output
149
+ else:
150
+ height, width, channels = image.shape
151
+ new_width = int(width * factor)
152
+ new_height = int(height * factor)
153
+ output = np.zeros((new_height, new_width, channels), dtype=np.float32)
154
+ for y in prange(new_height):
155
+ for x in prange(new_width):
156
+ src_x = x / factor
157
+ src_y = y / factor
158
+ x0, y0 = int(src_x), int(src_y)
159
+ x1 = x0 + 1 if x0 + 1 < width else width - 1
160
+ y1 = y0 + 1 if y0 + 1 < height else height - 1
161
+ dx = src_x - x0
162
+ dy = src_y - y0
163
+ for c in range(channels):
164
+ output[y, x, c] = (image[y0, x0, c] * (1 - dx) * (1 - dy) +
165
+ image[y0, x1, c] * dx * (1 - dy) +
166
+ image[y1, x0, c] * (1 - dx) * dy +
167
+ image[y1, x1, c] * dx * dy)
168
+ return output
169
+
170
+ @njit(parallel=True, fastmath=True, cache=True)
171
+ def bin2x2_numba(image):
172
+ """
173
+ Downsample the image by 2×2 via simple averaging (“integer binning”).
174
+ Works on 2D (H×W) or 3D (H×W×C) arrays. If dimensions aren’t even,
175
+ the last row/column is dropped.
176
+ """
177
+ h, w = image.shape[:2]
178
+ h2 = h // 2
179
+ w2 = w // 2
180
+
181
+ # allocate output
182
+ if image.ndim == 2:
183
+ out = np.empty((h2, w2), dtype=np.float32)
184
+ for i in prange(h2):
185
+ for j in prange(w2):
186
+ # average 2x2 block
187
+ s = image[2*i , 2*j ] \
188
+ + image[2*i+1, 2*j ] \
189
+ + image[2*i , 2*j+1] \
190
+ + image[2*i+1, 2*j+1]
191
+ out[i, j] = s * 0.25
192
+ else:
193
+ c = image.shape[2]
194
+ out = np.empty((h2, w2, c), dtype=np.float32)
195
+ for i in prange(h2):
196
+ for j in prange(w2):
197
+ for k in range(c):
198
+ s = image[2*i , 2*j , k] \
199
+ + image[2*i+1, 2*j , k] \
200
+ + image[2*i , 2*j+1, k] \
201
+ + image[2*i+1, 2*j+1, k]
202
+ out[i, j, k] = s * 0.25
203
+
204
+ return out
205
+
206
+ @njit(parallel=True, fastmath=True, cache=True)
207
+ def flip_horizontal_numba(image):
208
+ """
209
+ Flips an image horizontally using Numba JIT.
210
+ Works with both mono (2D) and color (3D) images.
211
+ """
212
+ if image.ndim == 2:
213
+ height, width = image.shape
214
+ output = np.empty((height, width), dtype=image.dtype)
215
+ for y in prange(height):
216
+ for x in prange(width):
217
+ output[y, x] = image[y, width - x - 1]
218
+ return output
219
+ else:
220
+ height, width, channels = image.shape
221
+ output = np.empty((height, width, channels), dtype=image.dtype)
222
+ for y in prange(height):
223
+ for x in prange(width):
224
+ for c in range(channels):
225
+ output[y, x, c] = image[y, width - x - 1, c]
226
+ return output
227
+
228
+
229
+ @njit(parallel=True, fastmath=True, cache=True)
230
+ def flip_vertical_numba(image):
231
+ """
232
+ Flips an image vertically using Numba JIT.
233
+ Works with both mono (2D) and color (3D) images.
234
+ """
235
+ if image.ndim == 2:
236
+ height, width = image.shape
237
+ output = np.empty((height, width), dtype=image.dtype)
238
+ for y in prange(height):
239
+ for x in prange(width):
240
+ output[y, x] = image[height - y - 1, x]
241
+ return output
242
+ else:
243
+ height, width, channels = image.shape
244
+ output = np.empty((height, width, channels), dtype=image.dtype)
245
+ for y in prange(height):
246
+ for x in prange(width):
247
+ for c in range(channels):
248
+ output[y, x, c] = image[height - y - 1, x, c]
249
+ return output
250
+
251
+
252
+ @njit(parallel=True, fastmath=True, cache=True)
253
+ def rotate_90_clockwise_numba(image):
254
+ """
255
+ Rotates the image 90 degrees clockwise.
256
+ Works with both mono (2D) and color (3D) images.
257
+ """
258
+ if image.ndim == 2:
259
+ height, width = image.shape
260
+ output = np.empty((width, height), dtype=image.dtype)
261
+ for y in prange(height):
262
+ for x in prange(width):
263
+ output[x, height - 1 - y] = image[y, x]
264
+ return output
265
+ else:
266
+ height, width, channels = image.shape
267
+ output = np.empty((width, height, channels), dtype=image.dtype)
268
+ for y in prange(height):
269
+ for x in prange(width):
270
+ for c in range(channels):
271
+ output[x, height - 1 - y, c] = image[y, x, c]
272
+ return output
273
+
274
+
275
+ @njit(parallel=True, fastmath=True, cache=True)
276
+ def rotate_90_counterclockwise_numba(image):
277
+ """
278
+ Rotates the image 90 degrees counterclockwise.
279
+ Works with both mono (2D) and color (3D) images.
280
+ """
281
+ if image.ndim == 2:
282
+ height, width = image.shape
283
+ output = np.empty((width, height), dtype=image.dtype)
284
+ for y in prange(height):
285
+ for x in prange(width):
286
+ output[width - 1 - x, y] = image[y, x]
287
+ return output
288
+ else:
289
+ height, width, channels = image.shape
290
+ output = np.empty((width, height, channels), dtype=image.dtype)
291
+ for y in prange(height):
292
+ for x in prange(width):
293
+ for c in range(channels):
294
+ output[width - 1 - x, y, c] = image[y, x, c]
295
+ return output
296
+
297
+
298
+ @njit(parallel=True, fastmath=True, cache=True)
299
+ def invert_image_numba(image):
300
+ """
301
+ Inverts an image (1 - pixel value) using Numba JIT.
302
+ Works with both mono (2D) and color (3D) images.
303
+ """
304
+ if image.ndim == 2:
305
+ height, width = image.shape
306
+ output = np.empty((height, width), dtype=image.dtype)
307
+ for y in prange(height):
308
+ for x in prange(width):
309
+ output[y, x] = 1.0 - image[y, x]
310
+ return output
311
+ else:
312
+ height, width, channels = image.shape
313
+ output = np.empty((height, width, channels), dtype=image.dtype)
314
+ for y in prange(height):
315
+ for x in prange(width):
316
+ for c in range(channels):
317
+ output[y, x, c] = 1.0 - image[y, x, c]
318
+ return output
319
+
320
+
321
+
322
+ @njit(parallel=True, fastmath=True, cache=True)
323
+ def apply_flat_division_numba_2d(image, master_flat, master_bias=None):
324
+ """
325
+ Mono version: image.shape == (H,W)
326
+ """
327
+ if master_bias is not None:
328
+ master_flat = master_flat - master_bias
329
+ image = image - master_bias
330
+
331
+ median_flat = np.mean(master_flat)
332
+ height, width = image.shape
333
+
334
+ for y in prange(height):
335
+ for x in range(width):
336
+ image[y, x] /= (master_flat[y, x] / median_flat)
337
+
338
+ return image
339
+
340
+
341
+ @njit(parallel=True, fastmath=True, cache=True)
342
+ def apply_flat_division_numba_3d(image, master_flat, master_bias=None):
343
+ """
344
+ Color version: image.shape == (H,W,C)
345
+ """
346
+ if master_bias is not None:
347
+ master_flat = master_flat - master_bias
348
+ image = image - master_bias
349
+
350
+ median_flat = np.mean(master_flat)
351
+ height, width, channels = image.shape
352
+
353
+ for y in prange(height):
354
+ for x in range(width):
355
+ for c in range(channels):
356
+ image[y, x, c] /= (master_flat[y, x, c] / median_flat)
357
+
358
+ return image
359
+
360
+ def apply_flat_division_numba(image, master_flat, master_bias=None):
361
+ """
362
+ Dispatcher that calls the correct Numba function
363
+ depending on whether 'image' is 2D or 3D.
364
+ """
365
+ if image.ndim == 2:
366
+ # Mono
367
+ return apply_flat_division_numba_2d(image, master_flat, master_bias)
368
+ elif image.ndim == 3:
369
+ # Color
370
+ return apply_flat_division_numba_3d(image, master_flat, master_bias)
371
+ else:
372
+ raise ValueError(f"apply_flat_division_numba: expected 2D or 3D, got shape {image.shape}")
373
+
374
+
375
+ @njit(parallel=True, cache=True)
376
+ def subtract_dark_3d(frames, dark_frame):
377
+ """
378
+ For mono stack:
379
+ frames.shape == (F,H,W)
380
+ dark_frame.shape == (H,W)
381
+ Returns the same shape (F,H,W).
382
+ """
383
+ num_frames, height, width = frames.shape
384
+ result = np.empty_like(frames, dtype=np.float32)
385
+
386
+ for i in prange(num_frames):
387
+ # Subtract the dark frame from each 2D slice
388
+ result[i] = frames[i] - dark_frame
389
+
390
+ return result
391
+
392
+
393
+ @njit(parallel=True, cache=True)
394
+ def subtract_dark_4d(frames, dark_frame):
395
+ """
396
+ For color stack:
397
+ frames.shape == (F,H,W,C)
398
+ dark_frame.shape == (H,W,C)
399
+ Returns the same shape (F,H,W,C).
400
+ """
401
+ num_frames, height, width, channels = frames.shape
402
+ result = np.empty_like(frames, dtype=np.float32)
403
+
404
+ for i in prange(num_frames):
405
+ for y in range(height):
406
+ for x in range(width):
407
+ for c in range(channels):
408
+ result[i, y, x, c] = frames[i, y, x, c] - dark_frame[y, x, c]
409
+
410
+ return result
411
+
412
+ def subtract_dark(frames, dark_frame):
413
+ """
414
+ Dispatcher function that calls the correct Numba function
415
+ depending on whether 'frames' is 3D or 4D.
416
+ """
417
+ if frames.ndim == 3:
418
+ # frames: (F,H,W), dark_frame: (H,W)
419
+ return subtract_dark_3d(frames, dark_frame)
420
+ elif frames.ndim == 4:
421
+ # frames: (F,H,W,C), dark_frame: (H,W,C)
422
+ return subtract_dark_4d(frames, dark_frame)
423
+ else:
424
+ raise ValueError(f"subtract_dark: frames must be 3D or 4D, got {frames.shape}")
425
+
426
+
427
+ import numpy as np
428
+ from numba import njit, prange
429
+
430
+ # -------------------------------
431
+ # Windsorized Sigma Clipping (Weighted, Iterative)
432
+ # -------------------------------
433
+
434
+ @njit(parallel=True, fastmath=True, cache=True)
435
+ def windsorized_sigma_clip_weighted_3d_iter(stack, weights, lower=2.5, upper=2.5, iterations=2):
436
+ """
437
+ Iterative Weighted Windsorized Sigma Clipping for a 3D mono stack.
438
+ stack.shape == (F,H,W)
439
+ weights.shape can be (F,) or (F,H,W).
440
+ Returns a tuple:
441
+ (clipped, rejection_mask)
442
+ where:
443
+ clipped is a 2D image (H,W),
444
+ rejection_mask is a boolean array of shape (F,H,W) with True indicating rejection.
445
+ """
446
+ num_frames, height, width = stack.shape
447
+ clipped = np.zeros((height, width), dtype=np.float32)
448
+ rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
449
+
450
+ # Check weights shape
451
+ if weights.ndim == 1 and weights.shape[0] == num_frames:
452
+ pass
453
+ elif weights.ndim == 3 and weights.shape == stack.shape:
454
+ pass
455
+ else:
456
+ raise ValueError("windsorized_sigma_clip_weighted_3d_iter: mismatch in shapes for 3D stack & weights")
457
+
458
+ for i in prange(height):
459
+ for j in range(width):
460
+ pixel_values = stack[:, i, j] # shape=(F,)
461
+ if weights.ndim == 1:
462
+ pixel_weights = weights[:] # shape (F,)
463
+ else:
464
+ pixel_weights = weights[:, i, j]
465
+ # Start with nonzero pixels as valid
466
+ valid_mask = pixel_values != 0
467
+ for _ in range(iterations):
468
+ if np.sum(valid_mask) == 0:
469
+ break
470
+ valid_vals = pixel_values[valid_mask]
471
+ median_val = np.median(valid_vals)
472
+ std_dev = np.std(valid_vals)
473
+ lower_bound = median_val - lower * std_dev
474
+ upper_bound = median_val + upper * std_dev
475
+ valid_mask = valid_mask & (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
476
+ # Record rejections: a pixel is rejected if not valid.
477
+ for f in range(num_frames):
478
+ rej_mask[f, i, j] = not valid_mask[f]
479
+ valid_vals = pixel_values[valid_mask]
480
+ valid_w = pixel_weights[valid_mask]
481
+ wsum = np.sum(valid_w)
482
+ if wsum > 0:
483
+ clipped[i, j] = np.sum(valid_vals * valid_w) / wsum
484
+ else:
485
+ nonzero = pixel_values[pixel_values != 0]
486
+ if nonzero.size > 0:
487
+ clipped[i, j] = np.median(nonzero)
488
+ else:
489
+ clipped[i, j] = 0.0
490
+ return clipped, rej_mask
491
+
492
+
493
+ @njit(parallel=True, fastmath=True, cache=True)
494
+ def windsorized_sigma_clip_weighted_4d_iter(stack, weights, lower=2.5, upper=2.5, iterations=2):
495
+ """
496
+ Iterative Weighted Windsorized Sigma Clipping for a 4D color stack.
497
+ stack.shape == (F,H,W,C)
498
+ weights.shape can be (F,) or (F,H,W,C).
499
+ Returns a tuple:
500
+ (clipped, rejection_mask)
501
+ where:
502
+ clipped is a 3D image (H,W,C),
503
+ rejection_mask is a boolean array of shape (F,H,W,C).
504
+ """
505
+ num_frames, height, width, channels = stack.shape
506
+ clipped = np.zeros((height, width, channels), dtype=np.float32)
507
+ rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
508
+
509
+ # Check weights shape
510
+ if weights.ndim == 1 and weights.shape[0] == num_frames:
511
+ pass
512
+ elif weights.ndim == 4 and weights.shape == stack.shape:
513
+ pass
514
+ else:
515
+ raise ValueError("windsorized_sigma_clip_weighted_4d_iter: mismatch in shapes for 4D stack & weights")
516
+
517
+ for i in prange(height):
518
+ for j in range(width):
519
+ for c in range(channels):
520
+ pixel_values = stack[:, i, j, c] # shape=(F,)
521
+ if weights.ndim == 1:
522
+ pixel_weights = weights[:]
523
+ else:
524
+ pixel_weights = weights[:, i, j, c]
525
+ valid_mask = pixel_values != 0
526
+ for _ in range(iterations):
527
+ if np.sum(valid_mask) == 0:
528
+ break
529
+ valid_vals = pixel_values[valid_mask]
530
+ median_val = np.median(valid_vals)
531
+ std_dev = np.std(valid_vals)
532
+ lower_bound = median_val - lower * std_dev
533
+ upper_bound = median_val + upper * std_dev
534
+ valid_mask = valid_mask & (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
535
+ for f in range(num_frames):
536
+ rej_mask[f, i, j, c] = not valid_mask[f]
537
+ valid_vals = pixel_values[valid_mask]
538
+ valid_w = pixel_weights[valid_mask]
539
+ wsum = np.sum(valid_w)
540
+ if wsum > 0:
541
+ clipped[i, j, c] = np.sum(valid_vals * valid_w) / wsum
542
+ else:
543
+ nonzero = pixel_values[pixel_values != 0]
544
+ if nonzero.size > 0:
545
+ clipped[i, j, c] = np.median(nonzero)
546
+ else:
547
+ clipped[i, j, c] = 0.0
548
+ return clipped, rej_mask
549
+
550
+
551
+ def windsorized_sigma_clip_weighted(stack, weights, lower=2.5, upper=2.5, iterations=2):
552
+ """
553
+ Dispatcher that calls the appropriate iterative Numba function.
554
+ Now returns (clipped, rejection_mask).
555
+ """
556
+ if stack.ndim == 3:
557
+ return windsorized_sigma_clip_weighted_3d_iter(stack, weights, lower, upper, iterations)
558
+ elif stack.ndim == 4:
559
+ return windsorized_sigma_clip_weighted_4d_iter(stack, weights, lower, upper, iterations)
560
+ else:
561
+ raise ValueError(f"windsorized_sigma_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
562
+
563
+
564
+ # -------------------------------
565
+ # Kappa-Sigma Clipping (Weighted)
566
+ # -------------------------------
567
+
568
+ @njit(parallel=True, fastmath=True, cache=True)
569
+ def kappa_sigma_clip_weighted_3d(stack, weights, kappa=2.5, iterations=3):
570
+ """
571
+ Kappa-Sigma Clipping for a 3D mono stack.
572
+ stack.shape == (F,H,W)
573
+ Returns a tuple: (clipped, rejection_mask)
574
+ where rejection_mask is of shape (F,H,W) indicating per-frame rejections.
575
+ """
576
+ num_frames, height, width = stack.shape
577
+ clipped = np.empty((height, width), dtype=np.float32)
578
+ rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
579
+
580
+ for i in prange(height):
581
+ for j in range(width):
582
+ pixel_values = stack[:, i, j].copy()
583
+ if weights.ndim == 1:
584
+ pixel_weights = weights[:]
585
+ else:
586
+ pixel_weights = weights[:, i, j].copy()
587
+ # Initialize tracking of indices
588
+ current_idx = np.empty(num_frames, dtype=np.int64)
589
+ for f in range(num_frames):
590
+ current_idx[f] = f
591
+ current_vals = pixel_values
592
+ current_w = pixel_weights
593
+ current_indices = current_idx
594
+ med = 0.0
595
+ for _ in range(iterations):
596
+ if current_vals.size == 0:
597
+ break
598
+ med = np.median(current_vals)
599
+ std = np.std(current_vals)
600
+ lower_bound = med - kappa * std
601
+ upper_bound = med + kappa * std
602
+ valid = (current_vals != 0) & (current_vals >= lower_bound) & (current_vals <= upper_bound)
603
+ current_vals = current_vals[valid]
604
+ current_w = current_w[valid]
605
+ current_indices = current_indices[valid]
606
+ # Mark rejected: frames not in current_indices are rejected.
607
+ for f in range(num_frames):
608
+ # Check if f is in current_indices
609
+ found = False
610
+ for k in range(current_indices.size):
611
+ if current_indices[k] == f:
612
+ found = True
613
+ break
614
+ if not found:
615
+ rej_mask[f, i, j] = True
616
+ else:
617
+ rej_mask[f, i, j] = False
618
+ if current_w.size > 0 and current_w.sum() > 0:
619
+ clipped[i, j] = np.sum(current_vals * current_w) / current_w.sum()
620
+ else:
621
+ clipped[i, j] = med
622
+ return clipped, rej_mask
623
+
624
+
625
+ @njit(parallel=True, fastmath=True, cache=True)
626
+ def kappa_sigma_clip_weighted_4d(stack, weights, kappa=2.5, iterations=3):
627
+ """
628
+ Kappa-Sigma Clipping for a 4D color stack.
629
+ stack.shape == (F,H,W,C)
630
+ Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
631
+ """
632
+ num_frames, height, width, channels = stack.shape
633
+ clipped = np.empty((height, width, channels), dtype=np.float32)
634
+ rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
635
+
636
+ for i in prange(height):
637
+ for j in range(width):
638
+ for c in range(channels):
639
+ pixel_values = stack[:, i, j, c].copy()
640
+ if weights.ndim == 1:
641
+ pixel_weights = weights[:]
642
+ else:
643
+ pixel_weights = weights[:, i, j, c].copy()
644
+ current_idx = np.empty(num_frames, dtype=np.int64)
645
+ for f in range(num_frames):
646
+ current_idx[f] = f
647
+ current_vals = pixel_values
648
+ current_w = pixel_weights
649
+ current_indices = current_idx
650
+ med = 0.0
651
+ for _ in range(iterations):
652
+ if current_vals.size == 0:
653
+ break
654
+ med = np.median(current_vals)
655
+ std = np.std(current_vals)
656
+ lower_bound = med - kappa * std
657
+ upper_bound = med + kappa * std
658
+ valid = (current_vals != 0) & (current_vals >= lower_bound) & (current_vals <= upper_bound)
659
+ current_vals = current_vals[valid]
660
+ current_w = current_w[valid]
661
+ current_indices = current_indices[valid]
662
+ for f in range(num_frames):
663
+ found = False
664
+ for k in range(current_indices.size):
665
+ if current_indices[k] == f:
666
+ found = True
667
+ break
668
+ if not found:
669
+ rej_mask[f, i, j, c] = True
670
+ else:
671
+ rej_mask[f, i, j, c] = False
672
+ if current_w.size > 0 and current_w.sum() > 0:
673
+ clipped[i, j, c] = np.sum(current_vals * current_w) / current_w.sum()
674
+ else:
675
+ clipped[i, j, c] = med
676
+ return clipped, rej_mask
677
+
678
+
679
+ def kappa_sigma_clip_weighted(stack, weights, kappa=2.5, iterations=3):
680
+ """
681
+ Dispatcher that returns (clipped, rejection_mask) for kappa-sigma clipping.
682
+ """
683
+ if stack.ndim == 3:
684
+ return kappa_sigma_clip_weighted_3d(stack, weights, kappa, iterations)
685
+ elif stack.ndim == 4:
686
+ return kappa_sigma_clip_weighted_4d(stack, weights, kappa, iterations)
687
+ else:
688
+ raise ValueError(f"kappa_sigma_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
689
+
690
+
691
+ # -------------------------------
692
+ # Trimmed Mean (Weighted)
693
+ # -------------------------------
694
+
695
+ @njit(parallel=True, fastmath=True, cache=True)
696
+ def trimmed_mean_weighted_3d(stack, weights, trim_fraction=0.1):
697
+ """
698
+ Trimmed Mean for a 3D mono stack.
699
+ stack.shape == (F,H,W)
700
+ Returns (clipped, rejection_mask) where rejection_mask (F,H,W) flags frames that were trimmed.
701
+ """
702
+ num_frames, height, width = stack.shape
703
+ clipped = np.empty((height, width), dtype=np.float32)
704
+ rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
705
+
706
+ for i in prange(height):
707
+ for j in range(width):
708
+ pix_all = stack[:, i, j]
709
+ if weights.ndim == 1:
710
+ w_all = weights[:]
711
+ else:
712
+ w_all = weights[:, i, j]
713
+ # Exclude zeros and record original indices.
714
+ valid = pix_all != 0
715
+ pix = pix_all[valid]
716
+ w = w_all[valid]
717
+ orig_idx = np.empty(pix_all.shape[0], dtype=np.int64)
718
+ count = 0
719
+ for f in range(num_frames):
720
+ if valid[f]:
721
+ orig_idx[count] = f
722
+ count += 1
723
+ n = pix.size
724
+ if n == 0:
725
+ clipped[i, j] = 0.0
726
+ # Mark all as rejected.
727
+ for f in range(num_frames):
728
+ if not valid[f]:
729
+ rej_mask[f, i, j] = True
730
+ continue
731
+ trim = int(trim_fraction * n)
732
+ order = np.argsort(pix)
733
+ # Determine which indices (in the valid list) are kept.
734
+ if n > 2 * trim:
735
+ keep_order = order[trim:n - trim]
736
+ else:
737
+ keep_order = order
738
+ # Build a mask for the valid pixels (length n) that are kept.
739
+ keep_mask = np.zeros(n, dtype=np.bool_)
740
+ for k in range(keep_order.size):
741
+ keep_mask[keep_order[k]] = True
742
+ # Map back to original frame indices.
743
+ for idx in range(n):
744
+ frame = orig_idx[idx]
745
+ if not keep_mask[idx]:
746
+ rej_mask[frame, i, j] = True
747
+ else:
748
+ rej_mask[frame, i, j] = False
749
+ # Compute weighted average of kept values.
750
+ sorted_pix = pix[order]
751
+ sorted_w = w[order]
752
+ if n > 2 * trim:
753
+ trimmed_values = sorted_pix[trim:n - trim]
754
+ trimmed_weights = sorted_w[trim:n - trim]
755
+ else:
756
+ trimmed_values = sorted_pix
757
+ trimmed_weights = sorted_w
758
+ wsum = trimmed_weights.sum()
759
+ if wsum > 0:
760
+ clipped[i, j] = np.sum(trimmed_values * trimmed_weights) / wsum
761
+ else:
762
+ clipped[i, j] = np.median(trimmed_values)
763
+ return clipped, rej_mask
764
+
765
+
766
+ @njit(parallel=True, fastmath=True, cache=True)
767
+ def trimmed_mean_weighted_4d(stack, weights, trim_fraction=0.1):
768
+ """
769
+ Trimmed Mean for a 4D color stack.
770
+ stack.shape == (F,H,W,C)
771
+ Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
772
+ """
773
+ num_frames, height, width, channels = stack.shape
774
+ clipped = np.empty((height, width, channels), dtype=np.float32)
775
+ rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
776
+
777
+ for i in prange(height):
778
+ for j in range(width):
779
+ for c in range(channels):
780
+ pix_all = stack[:, i, j, c]
781
+ if weights.ndim == 1:
782
+ w_all = weights[:]
783
+ else:
784
+ w_all = weights[:, i, j, c]
785
+ valid = pix_all != 0
786
+ pix = pix_all[valid]
787
+ w = w_all[valid]
788
+ orig_idx = np.empty(pix_all.shape[0], dtype=np.int64)
789
+ count = 0
790
+ for f in range(num_frames):
791
+ if valid[f]:
792
+ orig_idx[count] = f
793
+ count += 1
794
+ n = pix.size
795
+ if n == 0:
796
+ clipped[i, j, c] = 0.0
797
+ for f in range(num_frames):
798
+ if not valid[f]:
799
+ rej_mask[f, i, j, c] = True
800
+ continue
801
+ trim = int(trim_fraction * n)
802
+ order = np.argsort(pix)
803
+ if n > 2 * trim:
804
+ keep_order = order[trim:n - trim]
805
+ else:
806
+ keep_order = order
807
+ keep_mask = np.zeros(n, dtype=np.bool_)
808
+ for k in range(keep_order.size):
809
+ keep_mask[keep_order[k]] = True
810
+ for idx in range(n):
811
+ frame = orig_idx[idx]
812
+ if not keep_mask[idx]:
813
+ rej_mask[frame, i, j, c] = True
814
+ else:
815
+ rej_mask[frame, i, j, c] = False
816
+ sorted_pix = pix[order]
817
+ sorted_w = w[order]
818
+ if n > 2 * trim:
819
+ trimmed_values = sorted_pix[trim:n - trim]
820
+ trimmed_weights = sorted_w[trim:n - trim]
821
+ else:
822
+ trimmed_values = sorted_pix
823
+ trimmed_weights = sorted_w
824
+ wsum = trimmed_weights.sum()
825
+ if wsum > 0:
826
+ clipped[i, j, c] = np.sum(trimmed_values * trimmed_weights) / wsum
827
+ else:
828
+ clipped[i, j, c] = np.median(trimmed_values)
829
+ return clipped, rej_mask
830
+
831
+
832
+ def trimmed_mean_weighted(stack, weights, trim_fraction=0.1):
833
+ """
834
+ Dispatcher that returns (clipped, rejection_mask) for trimmed mean.
835
+ """
836
+ if stack.ndim == 3:
837
+ return trimmed_mean_weighted_3d(stack, weights, trim_fraction)
838
+ elif stack.ndim == 4:
839
+ return trimmed_mean_weighted_4d(stack, weights, trim_fraction)
840
+ else:
841
+ raise ValueError(f"trimmed_mean_weighted: stack must be 3D or 4D, got {stack.shape}")
842
+
843
+
844
+ # -------------------------------
845
+ # Extreme Studentized Deviate (ESD) Clipping (Weighted)
846
+ # -------------------------------
847
+
848
+ @njit(parallel=True, fastmath=True, cache=True)
849
+ def esd_clip_weighted_3d(stack, weights, threshold=3.0):
850
+ """
851
+ ESD Clipping for a 3D mono stack.
852
+ stack.shape == (F,H,W)
853
+ Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W).
854
+ """
855
+ num_frames, height, width = stack.shape
856
+ clipped = np.empty((height, width), dtype=np.float32)
857
+ rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
858
+
859
+ if weights.ndim == 1 and weights.shape[0] == num_frames:
860
+ pass
861
+ elif weights.ndim == 3 and weights.shape == stack.shape:
862
+ pass
863
+ else:
864
+ raise ValueError("esd_clip_weighted_3d: mismatch in shapes for 3D stack & weights")
865
+
866
+ for i in prange(height):
867
+ for j in range(width):
868
+ pix = stack[:, i, j]
869
+ if weights.ndim == 1:
870
+ w = weights[:]
871
+ else:
872
+ w = weights[:, i, j]
873
+ valid = pix != 0
874
+ values = pix[valid]
875
+ wvals = w[valid]
876
+ if values.size == 0:
877
+ clipped[i, j] = 0.0
878
+ for f in range(num_frames):
879
+ if not valid[f]:
880
+ rej_mask[f, i, j] = True
881
+ continue
882
+ mean_val = np.mean(values)
883
+ std_val = np.std(values)
884
+ if std_val == 0:
885
+ clipped[i, j] = mean_val
886
+ for f in range(num_frames):
887
+ rej_mask[f, i, j] = False
888
+ continue
889
+ z_scores = np.abs((values - mean_val) / std_val)
890
+ valid2 = z_scores < threshold
891
+ # Mark rejected: for the valid entries, use valid2.
892
+ idx = 0
893
+ for f in range(num_frames):
894
+ if valid[f]:
895
+ if not valid2[idx]:
896
+ rej_mask[f, i, j] = True
897
+ else:
898
+ rej_mask[f, i, j] = False
899
+ idx += 1
900
+ else:
901
+ rej_mask[f, i, j] = True
902
+ values = values[valid2]
903
+ wvals = wvals[valid2]
904
+ wsum = wvals.sum()
905
+ if wsum > 0:
906
+ clipped[i, j] = np.sum(values * wvals) / wsum
907
+ else:
908
+ clipped[i, j] = mean_val
909
+ return clipped, rej_mask
910
+
911
+
912
+ @njit(parallel=True, fastmath=True, cache=True)
913
+ def esd_clip_weighted_4d(stack, weights, threshold=3.0):
914
+ """
915
+ ESD Clipping for a 4D color stack.
916
+ stack.shape == (F,H,W,C)
917
+ Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
918
+ """
919
+ num_frames, height, width, channels = stack.shape
920
+ clipped = np.empty((height, width, channels), dtype=np.float32)
921
+ rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
922
+
923
+ if weights.ndim == 1 and weights.shape[0] == num_frames:
924
+ pass
925
+ elif weights.ndim == 4 and weights.shape == stack.shape:
926
+ pass
927
+ else:
928
+ raise ValueError("esd_clip_weighted_4d: mismatch in shapes for 4D stack & weights")
929
+
930
+ for i in prange(height):
931
+ for j in range(width):
932
+ for c in range(channels):
933
+ pix = stack[:, i, j, c]
934
+ if weights.ndim == 1:
935
+ w = weights[:]
936
+ else:
937
+ w = weights[:, i, j, c]
938
+ valid = pix != 0
939
+ values = pix[valid]
940
+ wvals = w[valid]
941
+ if values.size == 0:
942
+ clipped[i, j, c] = 0.0
943
+ for f in range(num_frames):
944
+ if not valid[f]:
945
+ rej_mask[f, i, j, c] = True
946
+ continue
947
+ mean_val = np.mean(values)
948
+ std_val = np.std(values)
949
+ if std_val == 0:
950
+ clipped[i, j, c] = mean_val
951
+ for f in range(num_frames):
952
+ rej_mask[f, i, j, c] = False
953
+ continue
954
+ z_scores = np.abs((values - mean_val) / std_val)
955
+ valid2 = z_scores < threshold
956
+ idx = 0
957
+ for f in range(num_frames):
958
+ if valid[f]:
959
+ if not valid2[idx]:
960
+ rej_mask[f, i, j, c] = True
961
+ else:
962
+ rej_mask[f, i, j, c] = False
963
+ idx += 1
964
+ else:
965
+ rej_mask[f, i, j, c] = True
966
+ values = values[valid2]
967
+ wvals = wvals[valid2]
968
+ wsum = wvals.sum()
969
+ if wsum > 0:
970
+ clipped[i, j, c] = np.sum(values * wvals) / wsum
971
+ else:
972
+ clipped[i, j, c] = mean_val
973
+ return clipped, rej_mask
974
+
975
+
976
+ def esd_clip_weighted(stack, weights, threshold=3.0):
977
+ """
978
+ Dispatcher that returns (clipped, rejection_mask) for ESD clipping.
979
+ """
980
+ if stack.ndim == 3:
981
+ return esd_clip_weighted_3d(stack, weights, threshold)
982
+ elif stack.ndim == 4:
983
+ return esd_clip_weighted_4d(stack, weights, threshold)
984
+ else:
985
+ raise ValueError(f"esd_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
986
+
987
+
988
+ # -------------------------------
989
+ # Biweight Location (Weighted)
990
+ # -------------------------------
991
+
992
+ @njit(parallel=True, fastmath=True, cache=True)
993
+ def biweight_location_weighted_3d(stack, weights, tuning_constant=6.0):
994
+ """
995
+ Biweight Location for a 3D mono stack.
996
+ stack.shape == (F,H,W)
997
+ Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W).
998
+ """
999
+ num_frames, height, width = stack.shape
1000
+ clipped = np.empty((height, width), dtype=np.float32)
1001
+ rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
1002
+
1003
+ if weights.ndim == 1 and weights.shape[0] == num_frames:
1004
+ pass
1005
+ elif weights.ndim == 3 and weights.shape == stack.shape:
1006
+ pass
1007
+ else:
1008
+ raise ValueError("biweight_location_weighted_3d: mismatch in shapes for 3D stack & weights")
1009
+
1010
+ for i in prange(height):
1011
+ for j in range(width):
1012
+ x = stack[:, i, j]
1013
+ if weights.ndim == 1:
1014
+ w = weights[:]
1015
+ else:
1016
+ w = weights[:, i, j]
1017
+ valid = x != 0
1018
+ x_valid = x[valid]
1019
+ w_valid = w[valid]
1020
+ # Record rejections for zeros:
1021
+ for f in range(num_frames):
1022
+ if not valid[f]:
1023
+ rej_mask[f, i, j] = True
1024
+ else:
1025
+ rej_mask[f, i, j] = False # initialize as accepted; may update below
1026
+ n = x_valid.size
1027
+ if n == 0:
1028
+ clipped[i, j] = 0.0
1029
+ continue
1030
+ M = np.median(x_valid)
1031
+ mad = np.median(np.abs(x_valid - M))
1032
+ if mad == 0:
1033
+ clipped[i, j] = M
1034
+ continue
1035
+ u = (x_valid - M) / (tuning_constant * mad)
1036
+ mask = np.abs(u) < 1
1037
+ # Mark frames that were excluded by the biweight rejection:
1038
+ idx = 0
1039
+ for f in range(num_frames):
1040
+ if valid[f]:
1041
+ if not mask[idx]:
1042
+ rej_mask[f, i, j] = True
1043
+ idx += 1
1044
+ x_masked = x_valid[mask]
1045
+ w_masked = w_valid[mask]
1046
+ numerator = ((x_masked - M) * (1 - u[mask]**2)**2 * w_masked).sum()
1047
+ denominator = ((1 - u[mask]**2)**2 * w_masked).sum()
1048
+ if denominator != 0:
1049
+ biweight = M + numerator / denominator
1050
+ else:
1051
+ biweight = M
1052
+ clipped[i, j] = biweight
1053
+ return clipped, rej_mask
1054
+
1055
+
1056
+ @njit(parallel=True, fastmath=True, cache=True)
1057
+ def biweight_location_weighted_4d(stack, weights, tuning_constant=6.0):
1058
+ """
1059
+ Biweight Location for a 4D color stack.
1060
+ stack.shape == (F,H,W,C)
1061
+ Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
1062
+ """
1063
+ num_frames, height, width, channels = stack.shape
1064
+ clipped = np.empty((height, width, channels), dtype=np.float32)
1065
+ rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
1066
+
1067
+ if weights.ndim == 1 and weights.shape[0] == num_frames:
1068
+ pass
1069
+ elif weights.ndim == 4 and weights.shape == stack.shape:
1070
+ pass
1071
+ else:
1072
+ raise ValueError("biweight_location_weighted_4d: mismatch in shapes for 4D stack & weights")
1073
+
1074
+ for i in prange(height):
1075
+ for j in range(width):
1076
+ for c in range(channels):
1077
+ x = stack[:, i, j, c]
1078
+ if weights.ndim == 1:
1079
+ w = weights[:]
1080
+ else:
1081
+ w = weights[:, i, j, c]
1082
+ valid = x != 0
1083
+ x_valid = x[valid]
1084
+ w_valid = w[valid]
1085
+ for f in range(num_frames):
1086
+ if not valid[f]:
1087
+ rej_mask[f, i, j, c] = True
1088
+ else:
1089
+ rej_mask[f, i, j, c] = False
1090
+ n = x_valid.size
1091
+ if n == 0:
1092
+ clipped[i, j, c] = 0.0
1093
+ continue
1094
+ M = np.median(x_valid)
1095
+ mad = np.median(np.abs(x_valid - M))
1096
+ if mad == 0:
1097
+ clipped[i, j, c] = M
1098
+ continue
1099
+ u = (x_valid - M) / (tuning_constant * mad)
1100
+ mask = np.abs(u) < 1
1101
+ idx = 0
1102
+ for f in range(num_frames):
1103
+ if valid[f]:
1104
+ if not mask[idx]:
1105
+ rej_mask[f, i, j, c] = True
1106
+ idx += 1
1107
+ x_masked = x_valid[mask]
1108
+ w_masked = w_valid[mask]
1109
+ numerator = ((x_masked - M) * (1 - u[mask]**2)**2 * w_masked).sum()
1110
+ denominator = ((1 - u[mask]**2)**2 * w_masked).sum()
1111
+ if denominator != 0:
1112
+ biweight = M + numerator / denominator
1113
+ else:
1114
+ biweight = M
1115
+ clipped[i, j, c] = biweight
1116
+ return clipped, rej_mask
1117
+
1118
+
1119
+ def biweight_location_weighted(stack, weights, tuning_constant=6.0):
1120
+ """
1121
+ Dispatcher that returns (clipped, rejection_mask) for biweight location.
1122
+ """
1123
+ if stack.ndim == 3:
1124
+ return biweight_location_weighted_3d(stack, weights, tuning_constant)
1125
+ elif stack.ndim == 4:
1126
+ return biweight_location_weighted_4d(stack, weights, tuning_constant)
1127
+ else:
1128
+ raise ValueError(f"biweight_location_weighted: stack must be 3D or 4D, got {stack.shape}")
1129
+
1130
+
1131
+ # -------------------------------
1132
+ # Modified Z-Score Clipping (Weighted)
1133
+ # -------------------------------
1134
+
1135
+ @njit(parallel=True, fastmath=True, cache=True)
1136
+ def modified_zscore_clip_weighted_3d(stack, weights, threshold=3.5):
1137
+ """
1138
+ Modified Z-Score Clipping for a 3D mono stack.
1139
+ stack.shape == (F,H,W)
1140
+ Returns (clipped, rejection_mask) with rejection_mask shape (F,H,W).
1141
+ """
1142
+ num_frames, height, width = stack.shape
1143
+ clipped = np.empty((height, width), dtype=np.float32)
1144
+ rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
1145
+
1146
+ if weights.ndim == 1 and weights.shape[0] == num_frames:
1147
+ pass
1148
+ elif weights.ndim == 3 and weights.shape == stack.shape:
1149
+ pass
1150
+ else:
1151
+ raise ValueError("modified_zscore_clip_weighted_3d: mismatch in shapes for 3D stack & weights")
1152
+
1153
+ for i in prange(height):
1154
+ for j in range(width):
1155
+ x = stack[:, i, j]
1156
+ if weights.ndim == 1:
1157
+ w = weights[:]
1158
+ else:
1159
+ w = weights[:, i, j]
1160
+ valid = x != 0
1161
+ x_valid = x[valid]
1162
+ w_valid = w[valid]
1163
+ if x_valid.size == 0:
1164
+ clipped[i, j] = 0.0
1165
+ for f in range(num_frames):
1166
+ if not valid[f]:
1167
+ rej_mask[f, i, j] = True
1168
+ continue
1169
+ median_val = np.median(x_valid)
1170
+ mad = np.median(np.abs(x_valid - median_val))
1171
+ if mad == 0:
1172
+ clipped[i, j] = median_val
1173
+ for f in range(num_frames):
1174
+ rej_mask[f, i, j] = False
1175
+ continue
1176
+ modified_z = 0.6745 * (x_valid - median_val) / mad
1177
+ valid2 = np.abs(modified_z) < threshold
1178
+ idx = 0
1179
+ for f in range(num_frames):
1180
+ if valid[f]:
1181
+ if not valid2[idx]:
1182
+ rej_mask[f, i, j] = True
1183
+ else:
1184
+ rej_mask[f, i, j] = False
1185
+ idx += 1
1186
+ else:
1187
+ rej_mask[f, i, j] = True
1188
+ x_final = x_valid[valid2]
1189
+ w_final = w_valid[valid2]
1190
+ wsum = w_final.sum()
1191
+ if wsum > 0:
1192
+ clipped[i, j] = np.sum(x_final * w_final) / wsum
1193
+ else:
1194
+ clipped[i, j] = median_val
1195
+ return clipped, rej_mask
1196
+
1197
+
1198
+ @njit(parallel=True, fastmath=True, cache=True)
1199
+ def modified_zscore_clip_weighted_4d(stack, weights, threshold=3.5):
1200
+ """
1201
+ Modified Z-Score Clipping for a 4D color stack.
1202
+ stack.shape == (F,H,W,C)
1203
+ Returns (clipped, rejection_mask) with rejection_mask shape (F,H,W,C).
1204
+ """
1205
+ num_frames, height, width, channels = stack.shape
1206
+ clipped = np.empty((height, width, channels), dtype=np.float32)
1207
+ rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
1208
+
1209
+ if weights.ndim == 1 and weights.shape[0] == num_frames:
1210
+ pass
1211
+ elif weights.ndim == 4 and weights.shape == stack.shape:
1212
+ pass
1213
+ else:
1214
+ raise ValueError("modified_zscore_clip_weighted_4d: mismatch in shapes for 4D stack & weights")
1215
+
1216
+ for i in prange(height):
1217
+ for j in range(width):
1218
+ for c in range(channels):
1219
+ x = stack[:, i, j, c]
1220
+ if weights.ndim == 1:
1221
+ w = weights[:]
1222
+ else:
1223
+ w = weights[:, i, j, c]
1224
+ valid = x != 0
1225
+ x_valid = x[valid]
1226
+ w_valid = w[valid]
1227
+ if x_valid.size == 0:
1228
+ clipped[i, j, c] = 0.0
1229
+ for f in range(num_frames):
1230
+ if not valid[f]:
1231
+ rej_mask[f, i, j, c] = True
1232
+ continue
1233
+ median_val = np.median(x_valid)
1234
+ mad = np.median(np.abs(x_valid - median_val))
1235
+ if mad == 0:
1236
+ clipped[i, j, c] = median_val
1237
+ for f in range(num_frames):
1238
+ rej_mask[f, i, j, c] = False
1239
+ continue
1240
+ modified_z = 0.6745 * (x_valid - median_val) / mad
1241
+ valid2 = np.abs(modified_z) < threshold
1242
+ idx = 0
1243
+ for f in range(num_frames):
1244
+ if valid[f]:
1245
+ if not valid2[idx]:
1246
+ rej_mask[f, i, j, c] = True
1247
+ else:
1248
+ rej_mask[f, i, j, c] = False
1249
+ idx += 1
1250
+ else:
1251
+ rej_mask[f, i, j, c] = True
1252
+ x_final = x_valid[valid2]
1253
+ w_final = w_valid[valid2]
1254
+ wsum = w_final.sum()
1255
+ if wsum > 0:
1256
+ clipped[i, j, c] = np.sum(x_final * w_final) / wsum
1257
+ else:
1258
+ clipped[i, j, c] = median_val
1259
+ return clipped, rej_mask
1260
+
1261
+
1262
+ def modified_zscore_clip_weighted(stack, weights, threshold=3.5):
1263
+ """
1264
+ Dispatcher that returns (clipped, rejection_mask) for modified z-score clipping.
1265
+ """
1266
+ if stack.ndim == 3:
1267
+ return modified_zscore_clip_weighted_3d(stack, weights, threshold)
1268
+ elif stack.ndim == 4:
1269
+ return modified_zscore_clip_weighted_4d(stack, weights, threshold)
1270
+ else:
1271
+ raise ValueError(f"modified_zscore_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
1272
+
1273
+
1274
+ # -------------------------------
1275
+ # Windsorized Sigma Clipping (Non-weighted)
1276
+ # -------------------------------
1277
+
1278
+ @njit(parallel=True, fastmath=True, cache=True)
1279
+ def windsorized_sigma_clip_3d(stack, lower=2.5, upper=2.5):
1280
+ """
1281
+ Windsorized Sigma Clipping for a 3D mono stack (non-weighted).
1282
+ stack.shape == (F,H,W)
1283
+ Returns (clipped, rejection_mask) where rejection_mask is (F,H,W).
1284
+ """
1285
+ num_frames, height, width = stack.shape
1286
+ clipped = np.zeros((height, width), dtype=np.float32)
1287
+ rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
1288
+
1289
+ for i in prange(height):
1290
+ for j in range(width):
1291
+ pixel_values = stack[:, i, j]
1292
+ median_val = np.median(pixel_values)
1293
+ std_dev = np.std(pixel_values)
1294
+ lower_bound = median_val - lower * std_dev
1295
+ upper_bound = median_val + upper * std_dev
1296
+ valid = (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
1297
+ for f in range(num_frames):
1298
+ rej_mask[f, i, j] = not valid[f]
1299
+ valid_vals = pixel_values[valid]
1300
+ if valid_vals.size > 0:
1301
+ clipped[i, j] = np.mean(valid_vals)
1302
+ else:
1303
+ clipped[i, j] = median_val
1304
+ return clipped, rej_mask
1305
+
1306
+
1307
+ @njit(parallel=True, fastmath=True, cache=True)
1308
+ def windsorized_sigma_clip_4d(stack, lower=2.5, upper=2.5):
1309
+ """
1310
+ Windsorized Sigma Clipping for a 4D color stack (non-weighted).
1311
+ stack.shape == (F,H,W,C)
1312
+ Returns (clipped, rejection_mask) where rejection_mask is (F,H,W,C).
1313
+ """
1314
+ num_frames, height, width, channels = stack.shape
1315
+ clipped = np.zeros((height, width, channels), dtype=np.float32)
1316
+ rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
1317
+
1318
+ for i in prange(height):
1319
+ for j in range(width):
1320
+ for c in range(channels):
1321
+ pixel_values = stack[:, i, j, c]
1322
+ median_val = np.median(pixel_values)
1323
+ std_dev = np.std(pixel_values)
1324
+ lower_bound = median_val - lower * std_dev
1325
+ upper_bound = median_val + upper * std_dev
1326
+ valid = (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
1327
+ for f in range(num_frames):
1328
+ rej_mask[f, i, j, c] = not valid[f]
1329
+ valid_vals = pixel_values[valid]
1330
+ if valid_vals.size > 0:
1331
+ clipped[i, j, c] = np.mean(valid_vals)
1332
+ else:
1333
+ clipped[i, j, c] = median_val
1334
+ return clipped, rej_mask
1335
+
1336
+
1337
+ def windsorized_sigma_clip(stack, lower=2.5, upper=2.5):
1338
+ """
1339
+ Dispatcher function that calls either the 3D or 4D specialized Numba function,
1340
+ depending on 'stack.ndim'.
1341
+ """
1342
+ if stack.ndim == 3:
1343
+ return windsorized_sigma_clip_3d(stack, lower, upper)
1344
+ elif stack.ndim == 4:
1345
+ return windsorized_sigma_clip_4d(stack, lower, upper)
1346
+ else:
1347
+ raise ValueError(f"windsorized_sigma_clip: stack must be 3D or 4D, got {stack.shape}")
1348
+
1349
+ def max_value_stack(stack, weights=None):
1350
+ """
1351
+ Stacking by taking the maximum value along the frame axis.
1352
+ Returns (clipped, rejection_mask) for compatibility:
1353
+ - clipped: H×W (or H×W×C)
1354
+ - rejection_mask: same shape as stack, all False
1355
+ """
1356
+ clipped = np.max(stack, axis=0)
1357
+ rej_mask = np.zeros(stack.shape, dtype=bool)
1358
+ return clipped, rej_mask
1359
+
1360
+ @njit(parallel=True, cache=True)
1361
+ def subtract_dark_with_pedestal_3d(frames, dark_frame, pedestal):
1362
+ """
1363
+ For mono stack:
1364
+ frames.shape == (F,H,W)
1365
+ dark_frame.shape == (H,W)
1366
+ Adds 'pedestal' after subtracting dark_frame from each frame.
1367
+ Returns the same shape (F,H,W).
1368
+ """
1369
+ num_frames, height, width = frames.shape
1370
+ result = np.empty_like(frames, dtype=np.float32)
1371
+
1372
+ # Validate dark_frame shape
1373
+ if dark_frame.ndim != 2 or dark_frame.shape != (height, width):
1374
+ raise ValueError(
1375
+ "subtract_dark_with_pedestal_3d: for 3D frames, dark_frame must be 2D (H,W)"
1376
+ )
1377
+
1378
+ for i in prange(num_frames):
1379
+ for y in range(height):
1380
+ for x in range(width):
1381
+ result[i, y, x] = frames[i, y, x] - dark_frame[y, x] + pedestal
1382
+
1383
+ return result
1384
+
1385
+ @njit(parallel=True, cache=True)
1386
+ def subtract_dark_with_pedestal_4d(frames, dark_frame, pedestal):
1387
+ """
1388
+ For color stack:
1389
+ frames.shape == (F,H,W,C)
1390
+ dark_frame.shape == (H,W,C)
1391
+ Adds 'pedestal' after subtracting dark_frame from each frame.
1392
+ Returns the same shape (F,H,W,C).
1393
+ """
1394
+ num_frames, height, width, channels = frames.shape
1395
+ result = np.empty_like(frames, dtype=np.float32)
1396
+
1397
+ # Validate dark_frame shape
1398
+ if dark_frame.ndim != 3 or dark_frame.shape != (height, width, channels):
1399
+ raise ValueError(
1400
+ "subtract_dark_with_pedestal_4d: for 4D frames, dark_frame must be 3D (H,W,C)"
1401
+ )
1402
+
1403
+ for i in prange(num_frames):
1404
+ for y in range(height):
1405
+ for x in range(width):
1406
+ for c in range(channels):
1407
+ result[i, y, x, c] = frames[i, y, x, c] - dark_frame[y, x, c] + pedestal
1408
+
1409
+ return result
1410
+
1411
+ def subtract_dark_with_pedestal(frames, dark_frame, pedestal):
1412
+ """
1413
+ Dispatcher function that calls either the 3D or 4D specialized Numba function
1414
+ depending on 'frames.ndim'.
1415
+ """
1416
+ if frames.ndim == 3:
1417
+ return subtract_dark_with_pedestal_3d(frames, dark_frame, pedestal)
1418
+ elif frames.ndim == 4:
1419
+ return subtract_dark_with_pedestal_4d(frames, dark_frame, pedestal)
1420
+ else:
1421
+ raise ValueError(
1422
+ f"subtract_dark_with_pedestal: frames must be 3D or 4D, got {frames.shape}"
1423
+ )
1424
+
1425
+
1426
+ @njit(parallel=True, fastmath=True, cache=True)
1427
+ def parallel_measure_frames(images):
1428
+ """
1429
+ Parallel processing for measuring simple stats (mean only).
1430
+ 'images' is a list (or array) of N images, each of which can be:
1431
+ - 2D (H,W) for a single mono image
1432
+ - 3D (H,W,C) for a single color image
1433
+ - Possibly 3D or 4D if you're storing multi-frame stacks in 'images'
1434
+ We just compute np.mean(...) of each image, no matter how many dims.
1435
+ """
1436
+ n = len(images)
1437
+ means = np.zeros(n, dtype=np.float32)
1438
+
1439
+ for i in prange(n):
1440
+ arr = images[i]
1441
+ # arr could have shape (H,W) or (H,W,C) or (F,H,W) etc.
1442
+ # np.mean works for any dimension, so no special logic needed.
1443
+ means[i] = np.float32(np.mean(arr))
1444
+
1445
+ return means
1446
+
1447
+
1448
+ @njit(fastmath=True, cache=True)
1449
+ def fast_mad(image):
1450
+ """ Computes the Median Absolute Deviation (MAD) as a robust noise estimator. """
1451
+ flat_image = image.ravel() # ✅ Flatten the 2D array into 1D
1452
+ median_val = np.median(flat_image) # Compute median
1453
+ mad = np.median(np.abs(flat_image - median_val)) # Compute MAD
1454
+ return mad * 1.4826 # ✅ Scale MAD to match standard deviation (for Gaussian noise)
1455
+
1456
+
1457
+
1458
+ @njit(fastmath=True, cache=True)
1459
+ def compute_snr(image):
1460
+ """ Computes the Signal-to-Noise Ratio (SNR) using fast Numba std. """
1461
+ mean_signal = np.mean(image)
1462
+ noise = compute_noise(image)
1463
+ return mean_signal / noise if noise > 0 else 0
1464
+
1465
+
1466
+
1467
+
1468
+ @njit(fastmath=True, cache=True)
1469
+ def compute_noise(image):
1470
+ """ Estimates noise using Median Absolute Deviation (MAD). """
1471
+ return fast_mad(image)
1472
+
1473
+
1474
+
1475
+
1476
+ def compute_star_count(image):
1477
+ """ Uses fast star detection instead of DAOStarFinder. """
1478
+ return fast_star_count(image)
1479
+
1480
+
1481
+ def fast_star_count(
1482
+ image,
1483
+ blur_size=15, # Smaller blur preserves faint/small stars
1484
+ threshold_factor=0.8,
1485
+ min_area=2,
1486
+ max_area=5000
1487
+ ):
1488
+ """
1489
+ Estimate star count + average eccentricity by:
1490
+ 1) Convert to 8-bit grayscale
1491
+ 2) Blur => subtract => enhance stars
1492
+ 3) Otsu's threshold * threshold_factor => final threshold
1493
+ 4) Contour detection + ellipse fit => eccentricity
1494
+ Returns (star_count, avg_ecc).
1495
+ """
1496
+
1497
+ # 1) Convert to grayscale if needed
1498
+ if image.ndim == 3:
1499
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
1500
+
1501
+ # 2) Normalize to 8-bit
1502
+ img_min, img_max = image.min(), image.max()
1503
+ if img_max > img_min:
1504
+ image_8u = (255.0 * (image - img_min) / (img_max - img_min)).astype(np.uint8)
1505
+ else:
1506
+ return 0, 0.0 # All pixels identical => no stars
1507
+
1508
+ # 3) Blur + subtract => enhance
1509
+ blurred = cv2.GaussianBlur(image_8u, (blur_size, blur_size), 0)
1510
+ subtracted = cv2.absdiff(image_8u, blurred)
1511
+
1512
+ # 4) Otsu's threshold on 'subtracted'
1513
+ otsu_thresh_val, _ = cv2.threshold(subtracted, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
1514
+ # Scale it down if we want to detect more/fainter stars
1515
+ final_thresh_val = int(otsu_thresh_val * threshold_factor)
1516
+ if final_thresh_val < 2:
1517
+ final_thresh_val = 2 # avoid going below 2
1518
+
1519
+ # 5) Apply threshold
1520
+ _, thresh = cv2.threshold(subtracted, final_thresh_val, 255, cv2.THRESH_BINARY)
1521
+
1522
+ # 6) (Optional) Morphological opening to remove single-pixel noise
1523
+ # Adjust kernel size if you get too many/few stars
1524
+ kernel = np.ones((2, 2), np.uint8)
1525
+ thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
1526
+
1527
+ # 7) Find contours
1528
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1529
+
1530
+ # 8) Filter contours by area, fit ellipse => compute eccentricity
1531
+ star_count = 0
1532
+ ecc_values = []
1533
+ for c in contours:
1534
+ area = cv2.contourArea(c)
1535
+ if area < min_area or area > max_area:
1536
+ continue
1537
+
1538
+ if len(c) < 5:
1539
+ continue # Need >=5 points to fit an ellipse
1540
+
1541
+ # Fit ellipse
1542
+ ellipse = cv2.fitEllipse(c)
1543
+ (cx, cy), (major_axis, minor_axis), angle = ellipse
1544
+
1545
+ # major_axis >= minor_axis
1546
+ if minor_axis > major_axis:
1547
+ major_axis, minor_axis = minor_axis, major_axis
1548
+
1549
+ if major_axis > 0:
1550
+ ecc = math.sqrt(1.0 - (minor_axis**2 / major_axis**2))
1551
+ else:
1552
+ ecc = 0.0
1553
+
1554
+ ecc_values.append(ecc)
1555
+ star_count += 1
1556
+
1557
+ if star_count > 0:
1558
+ avg_ecc = float(np.mean(ecc_values))
1559
+ else:
1560
+ avg_ecc = 0.0
1561
+
1562
+ return star_count, avg_ecc
1563
+
1564
+ @njit(parallel=True, fastmath=True, cache=True)
1565
+ def normalize_images_3d(stack, ref_median):
1566
+ """
1567
+ Normalizes each frame in a 3D mono stack (F,H,W)
1568
+ so that its median equals ref_median.
1569
+
1570
+ Returns a 3D result (F,H,W).
1571
+ """
1572
+ num_frames, height, width = stack.shape
1573
+ normalized_stack = np.zeros_like(stack, dtype=np.float32)
1574
+
1575
+ for i in prange(num_frames):
1576
+ # shape of one frame: (H,W)
1577
+ img = stack[i]
1578
+ img_median = np.median(img)
1579
+
1580
+ # Prevent division by zero
1581
+ scale_factor = ref_median / max(img_median, 1e-6)
1582
+ # Scale the entire 2D frame
1583
+ normalized_stack[i] = img * scale_factor
1584
+
1585
+ return normalized_stack
1586
+
1587
+ @njit(parallel=True, fastmath=True, cache=True)
1588
+ def normalize_images_4d(stack, ref_median):
1589
+ """
1590
+ Normalizes each frame in a 4D color stack (F,H,W,C)
1591
+ so that its median equals ref_median.
1592
+
1593
+ Returns a 4D result (F,H,W,C).
1594
+ """
1595
+ num_frames, height, width, channels = stack.shape
1596
+ normalized_stack = np.zeros_like(stack, dtype=np.float32)
1597
+
1598
+ for i in prange(num_frames):
1599
+ # shape of one frame: (H,W,C)
1600
+ img = stack[i] # (H,W,C)
1601
+ # Flatten to 1D to compute median across all channels/pixels
1602
+ img_median = np.median(img.ravel())
1603
+
1604
+ # Prevent division by zero
1605
+ scale_factor = ref_median / max(img_median, 1e-6)
1606
+
1607
+ # Scale the entire 3D frame
1608
+ for y in range(height):
1609
+ for x in range(width):
1610
+ for c in range(channels):
1611
+ normalized_stack[i, y, x, c] = img[y, x, c] * scale_factor
1612
+
1613
+ return normalized_stack
1614
+
1615
+ def normalize_images(stack, ref_median):
1616
+ """
1617
+ Dispatcher that calls either the 3D or 4D specialized Numba function
1618
+ depending on 'stack.ndim'.
1619
+
1620
+ - If stack.ndim == 3, we assume shape (F,H,W).
1621
+ - If stack.ndim == 4, we assume shape (F,H,W,C).
1622
+ """
1623
+ if stack.ndim == 3:
1624
+ return normalize_images_3d(stack, ref_median)
1625
+ elif stack.ndim == 4:
1626
+ return normalize_images_4d(stack, ref_median)
1627
+ else:
1628
+ raise ValueError(f"normalize_images: stack must be 3D or 4D, got shape {stack.shape}")
1629
+
1630
+
1631
+ @njit(parallel=True, fastmath=True, cache=True)
1632
+ def _edge_aware_interpolate_numba(out):
1633
+ """
1634
+ For each pixel in out (shape: (H,W,3)) where out[y,x,c] == 0,
1635
+ use a simple edge-aware approach:
1636
+ 1) Compute horizontal gradient = abs( left - right )
1637
+ 2) Compute vertical gradient = abs( top - bottom )
1638
+ 3) Choose the direction with the smaller gradient => average neighbors
1639
+ 4) If neighbors are missing or zero, fallback to a small 3x3 average
1640
+
1641
+ This is simpler than AHD but usually better than naive bilinear
1642
+ for high-contrast features like star cores.
1643
+ """
1644
+ H, W, C = out.shape
1645
+
1646
+ for c in range(C):
1647
+ for y in prange(H):
1648
+ for x in range(W):
1649
+ if out[y, x, c] == 0:
1650
+ # Gather immediate neighbors
1651
+ left = 0.0
1652
+ right = 0.0
1653
+ top = 0.0
1654
+ bottom = 0.0
1655
+ have_left = False
1656
+ have_right = False
1657
+ have_top = False
1658
+ have_bottom = False
1659
+
1660
+ # Left
1661
+ if x - 1 >= 0:
1662
+ val = out[y, x - 1, c]
1663
+ if val != 0:
1664
+ left = val
1665
+ have_left = True
1666
+
1667
+ # Right
1668
+ if x + 1 < W:
1669
+ val = out[y, x + 1, c]
1670
+ if val != 0:
1671
+ right = val
1672
+ have_right = True
1673
+
1674
+ # Top
1675
+ if y - 1 >= 0:
1676
+ val = out[y - 1, x, c]
1677
+ if val != 0:
1678
+ top = val
1679
+ have_top = True
1680
+
1681
+ # Bottom
1682
+ if y + 1 < H:
1683
+ val = out[y + 1, x, c]
1684
+ if val != 0:
1685
+ bottom = val
1686
+ have_bottom = True
1687
+
1688
+ # Compute gradients
1689
+ # If we don't have valid neighbors for that direction,
1690
+ # set the gradient to a large number => won't be chosen
1691
+ gh = 1e6
1692
+ gv = 1e6
1693
+
1694
+ if have_left and have_right:
1695
+ gh = abs(left - right)
1696
+ if have_top and have_bottom:
1697
+ gv = abs(top - bottom)
1698
+
1699
+ # Decide which direction to interpolate
1700
+ if gh < gv and have_left and have_right:
1701
+ # Horizontal interpolation
1702
+ out[y, x, c] = 0.5 * (left + right)
1703
+ elif gv <= gh and have_top and have_bottom:
1704
+ # Vertical interpolation
1705
+ out[y, x, c] = 0.5 * (top + bottom)
1706
+ else:
1707
+ # Fallback: average 3×3 region
1708
+ sumv = 0.0
1709
+ count = 0
1710
+ for dy in range(-1, 2):
1711
+ for dx in range(-1, 2):
1712
+ yy = y + dy
1713
+ xx = x + dx
1714
+ if 0 <= yy < H and 0 <= xx < W:
1715
+ val = out[yy, xx, c]
1716
+ if val != 0:
1717
+ sumv += val
1718
+ count += 1
1719
+ if count > 0:
1720
+ out[y, x, c] = sumv / count
1721
+
1722
+ return out
1723
+ # === Separate Full-Resolution Demosaicing Kernels ===
1724
+ # These njit functions assume the raw image is arranged in a Bayer pattern
1725
+ # and that we want a full (H,W,3) output.
1726
+
1727
+ @njit(parallel=True, fastmath=True, cache=True)
1728
+ def debayer_RGGB_fullres_fast(image):
1729
+ """
1730
+ For an RGGB pattern:
1731
+ - Even rows: even cols = Red, odd cols = Green.
1732
+ - Odd rows: even cols = Green, odd cols = Blue.
1733
+ """
1734
+ H, W = image.shape
1735
+ out = np.zeros((H, W, 3), dtype=image.dtype)
1736
+ for y in prange(H):
1737
+ for x in range(W):
1738
+ if (y & 1) == 0:
1739
+ if (x & 1) == 0:
1740
+ # Even row, even col: Red
1741
+ out[y, x, 0] = image[y, x]
1742
+ else:
1743
+ # Even row, odd col: Green
1744
+ out[y, x, 1] = image[y, x]
1745
+ else:
1746
+ if (x & 1) == 0:
1747
+ # Odd row, even col: Green
1748
+ out[y, x, 1] = image[y, x]
1749
+ else:
1750
+ # Odd row, odd col: Blue
1751
+ out[y, x, 2] = image[y, x]
1752
+ _edge_aware_interpolate_numba(out)
1753
+ return out
1754
+
1755
+ @njit(parallel=True, fastmath=True, cache=True)
1756
+ def debayer_BGGR_fullres_fast(image):
1757
+ """
1758
+ For a BGGR pattern:
1759
+ - Even rows: even cols = Blue, odd cols = Green.
1760
+ - Odd rows: even cols = Green, odd cols = Red.
1761
+ """
1762
+ H, W = image.shape
1763
+ out = np.zeros((H, W, 3), dtype=image.dtype)
1764
+ for y in prange(H):
1765
+ for x in range(W):
1766
+ if (y & 1) == 0:
1767
+ if (x & 1) == 0:
1768
+ # Even row, even col: Blue
1769
+ out[y, x, 2] = image[y, x]
1770
+ else:
1771
+ # Even row, odd col: Green
1772
+ out[y, x, 1] = image[y, x]
1773
+ else:
1774
+ if (x & 1) == 0:
1775
+ # Odd row, even col: Green
1776
+ out[y, x, 1] = image[y, x]
1777
+ else:
1778
+ # Odd row, odd col: Red
1779
+ out[y, x, 0] = image[y, x]
1780
+ _edge_aware_interpolate_numba(out)
1781
+ return out
1782
+
1783
+ @njit(parallel=True, fastmath=True, cache=True)
1784
+ def debayer_GRBG_fullres_fast(image):
1785
+ """
1786
+ For a GRBG pattern:
1787
+ - Even rows: even cols = Green, odd cols = Red.
1788
+ - Odd rows: even cols = Blue, odd cols = Green.
1789
+ """
1790
+ H, W = image.shape
1791
+ out = np.zeros((H, W, 3), dtype=image.dtype)
1792
+ for y in prange(H):
1793
+ for x in range(W):
1794
+ if (y & 1) == 0:
1795
+ if (x & 1) == 0:
1796
+ # Even row, even col: Green
1797
+ out[y, x, 1] = image[y, x]
1798
+ else:
1799
+ # Even row, odd col: Red
1800
+ out[y, x, 0] = image[y, x]
1801
+ else:
1802
+ if (x & 1) == 0:
1803
+ # Odd row, even col: Blue
1804
+ out[y, x, 2] = image[y, x]
1805
+ else:
1806
+ # Odd row, odd col: Green
1807
+ out[y, x, 1] = image[y, x]
1808
+ _edge_aware_interpolate_numba(out)
1809
+ return out
1810
+
1811
+ @njit(parallel=True, fastmath=True, cache=True)
1812
+ def debayer_GBRG_fullres_fast(image):
1813
+ """
1814
+ For a GBRG pattern:
1815
+ - Even rows: even cols = Green, odd cols = Blue.
1816
+ - Odd rows: even cols = Red, odd cols = Green.
1817
+ """
1818
+ H, W = image.shape
1819
+ out = np.zeros((H, W, 3), dtype=image.dtype)
1820
+ for y in prange(H):
1821
+ for x in range(W):
1822
+ if (y & 1) == 0:
1823
+ if (x & 1) == 0:
1824
+ # Even row, even col: Green
1825
+ out[y, x, 1] = image[y, x]
1826
+ else:
1827
+ # Even row, odd col: Blue
1828
+ out[y, x, 2] = image[y, x]
1829
+ else:
1830
+ if (x & 1) == 0:
1831
+ # Odd row, even col: Red
1832
+ out[y, x, 0] = image[y, x]
1833
+ else:
1834
+ # Odd row, odd col: Green
1835
+ out[y, x, 1] = image[y, x]
1836
+ _edge_aware_interpolate_numba(out)
1837
+ return out
1838
+
1839
+ # === Python-Level Dispatch Function ===
1840
+ # Since Numba cannot easily compare strings in nopython mode,
1841
+ # we do the if/elif check here in Python and then call the appropriate njit function.
1842
+
1843
+ def debayer_fits_fast(image_data, bayer_pattern):
1844
+ bp = bayer_pattern.upper()
1845
+ if bp == 'RGGB':
1846
+ return debayer_RGGB_fullres_fast(image_data)
1847
+ elif bp == 'BGGR':
1848
+ return debayer_BGGR_fullres_fast(image_data)
1849
+ elif bp == 'GRBG':
1850
+ return debayer_GRBG_fullres_fast(image_data)
1851
+ elif bp == 'GBRG':
1852
+ return debayer_GBRG_fullres_fast(image_data)
1853
+ else:
1854
+ raise ValueError(f"Unsupported Bayer pattern: {bayer_pattern}")
1855
+
1856
+ def debayer_raw_fast(raw_image_data, bayer_pattern="RGGB"):
1857
+ # For RAW images, use the same full-resolution demosaicing logic.
1858
+ return debayer_fits_fast(raw_image_data, bayer_pattern)
1859
+
1860
+
1861
+ @njit(parallel=True, fastmath=True, cache=True)
1862
+ def applyPixelMath_numba(image_array, amount):
1863
+ factor = 3 ** amount
1864
+ denom_factor = 3 ** amount - 1
1865
+ height, width, channels = image_array.shape
1866
+ output = np.empty_like(image_array, dtype=np.float32)
1867
+
1868
+ for y in prange(height):
1869
+ for x in prange(width):
1870
+ for c in prange(channels):
1871
+ val = (factor * image_array[y, x, c]) / (denom_factor * image_array[y, x, c] + 1)
1872
+ output[y, x, c] = min(max(val, 0.0), 1.0) # Equivalent to np.clip()
1873
+
1874
+ return output
1875
+
1876
+ @njit(parallel=True, fastmath=True, cache=True)
1877
+ def adjust_saturation_numba(image_array, saturation_factor):
1878
+ height, width, channels = image_array.shape
1879
+ output = np.empty_like(image_array, dtype=np.float32)
1880
+
1881
+ for y in prange(int(height)): # Ensure y is an integer
1882
+ for x in prange(int(width)): # Ensure x is an integer
1883
+ r, g, b = image_array[int(y), int(x)] # Force integer indexing
1884
+
1885
+ # Convert RGB to HSV manually
1886
+ max_val = max(r, g, b)
1887
+ min_val = min(r, g, b)
1888
+ delta = max_val - min_val
1889
+
1890
+ # Compute Hue (H)
1891
+ if delta == 0:
1892
+ h = 0
1893
+ elif max_val == r:
1894
+ h = (60 * ((g - b) / delta) + 360) % 360
1895
+ elif max_val == g:
1896
+ h = (60 * ((b - r) / delta) + 120) % 360
1897
+ else:
1898
+ h = (60 * ((r - g) / delta) + 240) % 360
1899
+
1900
+ # Compute Saturation (S)
1901
+ s = (delta / max_val) if max_val != 0 else 0
1902
+ s *= saturation_factor # Apply saturation adjustment
1903
+ s = min(max(s, 0.0), 1.0) # Clip saturation
1904
+
1905
+ # Convert back to RGB
1906
+ if s == 0:
1907
+ r, g, b = max_val, max_val, max_val
1908
+ else:
1909
+ c = s * max_val
1910
+ x_val = c * (1 - abs((h / 60) % 2 - 1))
1911
+ m = max_val - c
1912
+
1913
+ if 0 <= h < 60:
1914
+ r, g, b = c, x_val, 0
1915
+ elif 60 <= h < 120:
1916
+ r, g, b = x_val, c, 0
1917
+ elif 120 <= h < 180:
1918
+ r, g, b = 0, c, x_val
1919
+ elif 180 <= h < 240:
1920
+ r, g, b = 0, x_val, c
1921
+ elif 240 <= h < 300:
1922
+ r, g, b = x_val, 0, c
1923
+ else:
1924
+ r, g, b = c, 0, x_val
1925
+
1926
+ r, g, b = r + m, g + m, b + m # Add m to shift brightness
1927
+
1928
+ # ✅ Fix: Explicitly cast indices to integers
1929
+ output[int(y), int(x), 0] = r
1930
+ output[int(y), int(x), 1] = g
1931
+ output[int(y), int(x), 2] = b
1932
+
1933
+ return output
1934
+
1935
+
1936
+
1937
+
1938
+ @njit(parallel=True, fastmath=True, cache=True)
1939
+ def applySCNR_numba(image_array):
1940
+ height, width, _ = image_array.shape
1941
+ output = np.empty_like(image_array, dtype=np.float32)
1942
+
1943
+ for y in prange(int(height)):
1944
+ for x in prange(int(width)):
1945
+ r, g, b = image_array[y, x]
1946
+ g = min(g, (r + b) / 2) # Reduce green to the average of red & blue
1947
+
1948
+ # ✅ Fix: Assign channels individually instead of a tuple
1949
+ output[int(y), int(x), 0] = r
1950
+ output[int(y), int(x), 1] = g
1951
+ output[int(y), int(x), 2] = b
1952
+
1953
+
1954
+ return output
1955
+
1956
+ # D65 reference
1957
+ _Xn, _Yn, _Zn = 0.95047, 1.00000, 1.08883
1958
+
1959
+ # Matrix for RGB -> XYZ (sRGB => D65)
1960
+ _M_rgb2xyz = np.array([
1961
+ [0.4124564, 0.3575761, 0.1804375],
1962
+ [0.2126729, 0.7151522, 0.0721750],
1963
+ [0.0193339, 0.1191920, 0.9503041]
1964
+ ], dtype=np.float32)
1965
+
1966
+ # Matrix for XYZ -> RGB (sRGB => D65)
1967
+ _M_xyz2rgb = np.array([
1968
+ [ 3.2404542, -1.5371385, -0.4985314],
1969
+ [-0.9692660, 1.8760108, 0.0415560],
1970
+ [ 0.0556434, -0.2040259, 1.0572252]
1971
+ ], dtype=np.float32)
1972
+
1973
+
1974
+
1975
+ @njit(parallel=True, fastmath=True, cache=True)
1976
+ def apply_lut_gray(image_in, lut):
1977
+ """
1978
+ Numba-accelerated application of 'lut' to a single-channel image_in in [0..1].
1979
+ 'lut' is a 1D array of shape (size,) also in [0..1].
1980
+ """
1981
+ out = np.empty_like(image_in)
1982
+ height, width = image_in.shape
1983
+ size_lut = len(lut) - 1
1984
+
1985
+ for y in prange(height):
1986
+ for x in range(width):
1987
+ v = image_in[y, x]
1988
+ idx = int(v * size_lut + 0.5)
1989
+ if idx < 0: idx = 0
1990
+ elif idx > size_lut: idx = size_lut
1991
+ out[y, x] = lut[idx]
1992
+
1993
+ return out
1994
+
1995
+ @njit(parallel=True, fastmath=True, cache=True)
1996
+ def apply_lut_color(image_in, lut):
1997
+ """
1998
+ Numba-accelerated application of 'lut' to a 3-channel image_in in [0..1].
1999
+ 'lut' is a 1D array of shape (size,) also in [0..1].
2000
+ """
2001
+ out = np.empty_like(image_in)
2002
+ height, width, channels = image_in.shape
2003
+ size_lut = len(lut) - 1
2004
+
2005
+ for y in prange(height):
2006
+ for x in range(width):
2007
+ for c in range(channels):
2008
+ v = image_in[y, x, c]
2009
+ idx = int(v * size_lut + 0.5)
2010
+ if idx < 0: idx = 0
2011
+ elif idx > size_lut: idx = size_lut
2012
+ out[y, x, c] = lut[idx]
2013
+
2014
+ return out
2015
+
2016
+ @njit(parallel=True, fastmath=True, cache=True)
2017
+ def apply_lut_mono_inplace(array2d, lut):
2018
+ """
2019
+ In-place LUT application on a single-channel 2D array in [0..1].
2020
+ 'lut' has shape (size,) also in [0..1].
2021
+ """
2022
+ H, W = array2d.shape
2023
+ size_lut = len(lut) - 1
2024
+ for y in prange(H):
2025
+ for x in prange(W):
2026
+ v = array2d[y, x]
2027
+ idx = int(v * size_lut + 0.5)
2028
+ if idx < 0:
2029
+ idx = 0
2030
+ elif idx > size_lut:
2031
+ idx = size_lut
2032
+ array2d[y, x] = lut[idx]
2033
+
2034
+ @njit(parallel=True, fastmath=True, cache=True)
2035
+ def apply_lut_color_inplace(array3d, lut):
2036
+ """
2037
+ In-place LUT application on a 3-channel array in [0..1].
2038
+ 'lut' has shape (size,) also in [0..1].
2039
+ """
2040
+ H, W, C = array3d.shape
2041
+ size_lut = len(lut) - 1
2042
+ for y in prange(H):
2043
+ for x in prange(W):
2044
+ for c in range(C):
2045
+ v = array3d[y, x, c]
2046
+ idx = int(v * size_lut + 0.5)
2047
+ if idx < 0:
2048
+ idx = 0
2049
+ elif idx > size_lut:
2050
+ idx = size_lut
2051
+ array3d[y, x, c] = lut[idx]
2052
+
2053
+ @njit(parallel=True, fastmath=True, cache=True)
2054
+ def rgb_to_xyz_numba(rgb):
2055
+ """
2056
+ Convert an image from sRGB to XYZ (D65).
2057
+ rgb: float32 array in [0..1], shape (H,W,3)
2058
+ returns xyz in [0..maybe >1], shape (H,W,3)
2059
+ """
2060
+ H, W, _ = rgb.shape
2061
+ out = np.empty((H, W, 3), dtype=np.float32)
2062
+ for y in prange(H):
2063
+ for x in prange(W):
2064
+ r = rgb[y, x, 0]
2065
+ g = rgb[y, x, 1]
2066
+ b = rgb[y, x, 2]
2067
+ # Multiply by M_rgb2xyz
2068
+ X = _M_rgb2xyz[0,0]*r + _M_rgb2xyz[0,1]*g + _M_rgb2xyz[0,2]*b
2069
+ Y = _M_rgb2xyz[1,0]*r + _M_rgb2xyz[1,1]*g + _M_rgb2xyz[1,2]*b
2070
+ Z = _M_rgb2xyz[2,0]*r + _M_rgb2xyz[2,1]*g + _M_rgb2xyz[2,2]*b
2071
+ out[y, x, 0] = X
2072
+ out[y, x, 1] = Y
2073
+ out[y, x, 2] = Z
2074
+ return out
2075
+
2076
+ @njit(parallel=True, fastmath=True, cache=True)
2077
+ def xyz_to_rgb_numba(xyz):
2078
+ """
2079
+ Convert an image from XYZ (D65) to sRGB.
2080
+ xyz: float32 array, shape (H,W,3)
2081
+ returns rgb in [0..1], shape (H,W,3)
2082
+ """
2083
+ H, W, _ = xyz.shape
2084
+ out = np.empty((H, W, 3), dtype=np.float32)
2085
+ for y in prange(H):
2086
+ for x in prange(W):
2087
+ X = xyz[y, x, 0]
2088
+ Y = xyz[y, x, 1]
2089
+ Z = xyz[y, x, 2]
2090
+ # Multiply by M_xyz2rgb
2091
+ r = _M_xyz2rgb[0,0]*X + _M_xyz2rgb[0,1]*Y + _M_xyz2rgb[0,2]*Z
2092
+ g = _M_xyz2rgb[1,0]*X + _M_xyz2rgb[1,1]*Y + _M_xyz2rgb[1,2]*Z
2093
+ b = _M_xyz2rgb[2,0]*X + _M_xyz2rgb[2,1]*Y + _M_xyz2rgb[2,2]*Z
2094
+ # Clip to [0..1]
2095
+ if r < 0: r = 0
2096
+ elif r > 1: r = 1
2097
+ if g < 0: g = 0
2098
+ elif g > 1: g = 1
2099
+ if b < 0: b = 0
2100
+ elif b > 1: b = 1
2101
+ out[y, x, 0] = r
2102
+ out[y, x, 1] = g
2103
+ out[y, x, 2] = b
2104
+ return out
2105
+
2106
+ @njit(cache=True)
2107
+ def f_lab_numba(t):
2108
+ delta = 6/29
2109
+ out = np.empty_like(t, dtype=np.float32)
2110
+ for i in range(t.size):
2111
+ val = t.flat[i]
2112
+ if val > delta**3:
2113
+ out.flat[i] = val**(1/3)
2114
+ else:
2115
+ out.flat[i] = val/(3*delta*delta) + (4/29)
2116
+ return out
2117
+
2118
+ @njit(parallel=True, fastmath=True, cache=True)
2119
+ def xyz_to_lab_numba(xyz):
2120
+ """
2121
+ xyz => shape(H,W,3), in D65.
2122
+ returns lab in shape(H,W,3): L in [0..100], a,b in ~[-128..127].
2123
+ """
2124
+ H, W, _ = xyz.shape
2125
+ out = np.empty((H,W,3), dtype=np.float32)
2126
+ for y in prange(H):
2127
+ for x in prange(W):
2128
+ X = xyz[y, x, 0] / _Xn
2129
+ Y = xyz[y, x, 1] / _Yn
2130
+ Z = xyz[y, x, 2] / _Zn
2131
+ fx = (X)**(1/3) if X > (6/29)**3 else X/(3*(6/29)**2) + 4/29
2132
+ fy = (Y)**(1/3) if Y > (6/29)**3 else Y/(3*(6/29)**2) + 4/29
2133
+ fz = (Z)**(1/3) if Z > (6/29)**3 else Z/(3*(6/29)**2) + 4/29
2134
+ L = 116*fy - 16
2135
+ a = 500*(fx - fy)
2136
+ b = 200*(fy - fz)
2137
+ out[y, x, 0] = L
2138
+ out[y, x, 1] = a
2139
+ out[y, x, 2] = b
2140
+ return out
2141
+
2142
+ @njit(parallel=True, fastmath=True, cache=True)
2143
+ def lab_to_xyz_numba(lab):
2144
+ """
2145
+ lab => shape(H,W,3): L in [0..100], a,b in ~[-128..127].
2146
+ returns xyz shape(H,W,3).
2147
+ """
2148
+ H, W, _ = lab.shape
2149
+ out = np.empty((H,W,3), dtype=np.float32)
2150
+ delta = 6/29
2151
+ for y in prange(H):
2152
+ for x in prange(W):
2153
+ L = lab[y, x, 0]
2154
+ a = lab[y, x, 1]
2155
+ b = lab[y, x, 2]
2156
+ fy = (L+16)/116
2157
+ fx = fy + a/500
2158
+ fz = fy - b/200
2159
+
2160
+ if fx > delta:
2161
+ xr = fx**3
2162
+ else:
2163
+ xr = 3*delta*delta*(fx - 4/29)
2164
+ if fy > delta:
2165
+ yr = fy**3
2166
+ else:
2167
+ yr = 3*delta*delta*(fy - 4/29)
2168
+ if fz > delta:
2169
+ zr = fz**3
2170
+ else:
2171
+ zr = 3*delta*delta*(fz - 4/29)
2172
+
2173
+ X = _Xn * xr
2174
+ Y = _Yn * yr
2175
+ Z = _Zn * zr
2176
+ out[y, x, 0] = X
2177
+ out[y, x, 1] = Y
2178
+ out[y, x, 2] = Z
2179
+ return out
2180
+
2181
+ @njit(parallel=True, fastmath=True, cache=True)
2182
+ def rgb_to_hsv_numba(rgb):
2183
+ H, W, _ = rgb.shape
2184
+ out = np.empty((H,W,3), dtype=np.float32)
2185
+ for y in prange(H):
2186
+ for x in prange(W):
2187
+ r = rgb[y,x,0]
2188
+ g = rgb[y,x,1]
2189
+ b = rgb[y,x,2]
2190
+ cmax = max(r,g,b)
2191
+ cmin = min(r,g,b)
2192
+ delta = cmax - cmin
2193
+ # Hue
2194
+ h = 0.0
2195
+ if delta != 0.0:
2196
+ if cmax == r:
2197
+ h = 60*(((g-b)/delta) % 6)
2198
+ elif cmax == g:
2199
+ h = 60*(((b-r)/delta) + 2)
2200
+ else:
2201
+ h = 60*(((r-g)/delta) + 4)
2202
+ # Saturation
2203
+ s = 0.0
2204
+ if cmax > 0.0:
2205
+ s = delta / cmax
2206
+ v = cmax
2207
+ out[y,x,0] = h
2208
+ out[y,x,1] = s
2209
+ out[y,x,2] = v
2210
+ return out
2211
+
2212
+ @njit(parallel=True, fastmath=True, cache=True)
2213
+ def hsv_to_rgb_numba(hsv):
2214
+ H, W, _ = hsv.shape
2215
+ out = np.empty((H,W,3), dtype=np.float32)
2216
+ for y in prange(H):
2217
+ for x in prange(W):
2218
+ h = hsv[y,x,0]
2219
+ s = hsv[y,x,1]
2220
+ v = hsv[y,x,2]
2221
+ c = v*s
2222
+ hh = (h/60.0) % 6
2223
+ x_ = c*(1 - abs(hh % 2 - 1))
2224
+ m = v - c
2225
+ r = 0.0
2226
+ g = 0.0
2227
+ b = 0.0
2228
+ if 0 <= hh < 1:
2229
+ r,g,b = c,x_,0
2230
+ elif 1 <= hh < 2:
2231
+ r,g,b = x_,c,0
2232
+ elif 2 <= hh < 3:
2233
+ r,g,b = 0,c,x_
2234
+ elif 3 <= hh < 4:
2235
+ r,g,b = 0,x_,c
2236
+ elif 4 <= hh < 5:
2237
+ r,g,b = x_,0,c
2238
+ else:
2239
+ r,g,b = c,0,x_
2240
+ out[y,x,0] = (r + m)
2241
+ out[y,x,1] = (g + m)
2242
+ out[y,x,2] = (b + m)
2243
+ return out
2244
+
2245
+ @njit(parallel=True, fastmath=True, cache=True)
2246
+ def _cosmetic_correction_numba_fixed(corrected, H, W, C, hot_sigma, cold_sigma):
2247
+ """
2248
+ Optimized Numba-compiled local outlier correction.
2249
+ - Computes median and standard deviation from 8 surrounding pixels (excluding center).
2250
+ - If the center pixel is greater than (median + hot_sigma * std_dev), it is replaced with the median.
2251
+ - If the center pixel is less than (median - cold_sigma * std_dev), it is replaced with the median.
2252
+ - Edge pixels are skipped (avoiding padding artifacts).
2253
+ """
2254
+ local_vals = np.empty(9, dtype=np.float32) # Holds 8 surrounding pixels
2255
+
2256
+ # Process pixels in parallel, skipping edges
2257
+ for y in prange(1, H - 1): # Skip first and last rows
2258
+ for x in range(1, W - 1): # Skip first and last columns
2259
+ # If the image is grayscale, set C=1 and handle accordingly
2260
+ for c_i in prange(C if corrected.ndim == 3 else 1):
2261
+ k = 0
2262
+ for dy in range(-1, 2): # -1, 0, +1
2263
+ for dx in range(-1, 2): # -1, 0, +1
2264
+ if corrected.ndim == 3: # Color image
2265
+ local_vals[k] = corrected[y + dy, x + dx, c_i]
2266
+ else: # Grayscale image
2267
+ local_vals[k] = corrected[y + dy, x + dx]
2268
+ k += 1
2269
+
2270
+ # Compute median
2271
+ M = np.median(local_vals)
2272
+
2273
+ # Compute MAD manually
2274
+ abs_devs = np.abs(local_vals - M)
2275
+ MAD = np.median(abs_devs)
2276
+
2277
+ # Convert MAD to an approximation of standard deviation
2278
+ sigma_mad = 1.4826 * MAD
2279
+
2280
+ # Get center pixel
2281
+ if corrected.ndim == 3:
2282
+ T = corrected[y, x, c_i]
2283
+ else:
2284
+ T = corrected[y, x]
2285
+
2286
+ threshold_high = M + (hot_sigma * sigma_mad)
2287
+ threshold_low = M - (cold_sigma * sigma_mad)
2288
+
2289
+ # **Apply correction ONLY if center pixel is an outlier**
2290
+ if T > threshold_high or T < threshold_low:
2291
+ if corrected.ndim == 3:
2292
+ corrected[y, x, c_i] = M # Replace center pixel in color image
2293
+ else:
2294
+ corrected[y, x] = M # Replace center pixel in grayscale image
2295
+
2296
+ def bulk_cosmetic_correction_bayer(image, hot_sigma=5.0, cold_sigma=5.0):
2297
+ """
2298
+ Perform cosmetic correction on a single-channel Bayer mosaic.
2299
+ Assumes a default Bayer pattern "RGGB":
2300
+ - Red: even rows, even columns
2301
+ - Green1: even rows, odd columns
2302
+ - Green2: odd rows, even columns
2303
+ - Blue: odd rows, odd columns
2304
+ Applies cosmetic correction separately on each channel and reassembles them.
2305
+ """
2306
+ H, W = image.shape
2307
+ # Create a copy to hold the corrected image.
2308
+ corrected = image.astype(np.float32).copy()
2309
+
2310
+ # For each channel, extract the subarray and apply the standard correction.
2311
+ # We use your existing bulk_cosmetic_correction_numba function, which accepts a 2D array.
2312
+ # Red channel (even rows, even cols)
2313
+ red = corrected[0:H:2, 0:W:2]
2314
+ red_corrected = bulk_cosmetic_correction_numba(red, hot_sigma, cold_sigma)
2315
+ corrected[0:H:2, 0:W:2] = red_corrected
2316
+
2317
+ # Blue channel (odd rows, odd cols)
2318
+ blue = corrected[1:H:2, 1:W:2]
2319
+ blue_corrected = bulk_cosmetic_correction_numba(blue, hot_sigma, cold_sigma)
2320
+ corrected[1:H:2, 1:W:2] = blue_corrected
2321
+
2322
+ # Green channel: two sets:
2323
+ # Green1 (even rows, odd cols)
2324
+ green1 = corrected[0:H:2, 1:W:2]
2325
+ green1_corrected = bulk_cosmetic_correction_numba(green1, hot_sigma, cold_sigma)
2326
+ corrected[0:H:2, 1:W:2] = green1_corrected
2327
+
2328
+ # Green2 (odd rows, even cols)
2329
+ green2 = corrected[1:H:2, 0:W:2]
2330
+ green2_corrected = bulk_cosmetic_correction_numba(green2, hot_sigma, cold_sigma)
2331
+ corrected[1:H:2, 0:W:2] = green2_corrected
2332
+
2333
+ return corrected
2334
+
2335
+ def bulk_cosmetic_correction_numba(image, hot_sigma=3.0, cold_sigma=3.0, window_size=3):
2336
+ """
2337
+ Optimized local outlier correction using Numba.
2338
+ - Identifies hot and cold outliers based on local neighborhood statistics.
2339
+ - Uses median and standard deviation from surrounding pixels to detect and replace outliers.
2340
+ - Applies separate hot_sigma and cold_sigma thresholds.
2341
+ - Skips edge pixels to avoid padding artifacts.
2342
+ """
2343
+
2344
+ was_gray = False
2345
+
2346
+ if image.ndim == 2: # Convert grayscale to 3D
2347
+ H, W = image.shape
2348
+ C = 1
2349
+ was_gray = True
2350
+ image = image[:, :, np.newaxis] # Explicitly add a color channel dimension
2351
+
2352
+ else:
2353
+ H, W, C = image.shape
2354
+
2355
+ # Copy the image for modification
2356
+ corrected = image.astype(np.float32).copy()
2357
+
2358
+ # Apply fast correction (no padding, edges skipped)
2359
+ _cosmetic_correction_numba_fixed(corrected, H, W, C, hot_sigma, cold_sigma)
2360
+
2361
+ if was_gray:
2362
+ corrected = corrected[:, :, 0] # Convert back to 2D if originally grayscale
2363
+
2364
+ return corrected
2365
+
2366
+ def evaluate_polynomial(H: int, W: int, coeffs: np.ndarray, degree: int) -> np.ndarray:
2367
+ """
2368
+ Evaluates the polynomial function over the entire image domain.
2369
+ """
2370
+ xx, yy = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing="xy")
2371
+ A_full = build_poly_terms(xx.ravel(), yy.ravel(), degree)
2372
+ return (A_full @ coeffs).reshape(H, W)
2373
+
2374
+
2375
+
2376
+ @njit(parallel=True, fastmath=True, cache=True)
2377
+ def numba_mono_final_formula(rescaled, median_rescaled, target_median):
2378
+ """
2379
+ Applies the final formula *after* we already have the rescaled values.
2380
+
2381
+ rescaled[y,x] = (original[y,x] - black_point) / (1 - black_point)
2382
+ median_rescaled = median(rescaled)
2383
+
2384
+ out_val = ((median_rescaled - 1) * target_median * r) /
2385
+ ( median_rescaled*(target_median + r -1) - target_median*r )
2386
+ """
2387
+ H, W = rescaled.shape
2388
+ out = np.empty_like(rescaled)
2389
+
2390
+ for y in prange(H):
2391
+ for x in range(W):
2392
+ r = rescaled[y, x]
2393
+ numer = (median_rescaled - 1.0) * target_median * r
2394
+ denom = median_rescaled * (target_median + r - 1.0) - target_median * r
2395
+ if np.abs(denom) < 1e-12:
2396
+ denom = 1e-12
2397
+ out[y, x] = numer / denom
2398
+
2399
+ return out
2400
+
2401
+ @njit(parallel=True, fastmath=True)
2402
+ def drizzle_deposit_numba_naive(image_data, affine_2x3, drizzle_buffer, coverage_buffer, scale, weight):
2403
+ """
2404
+ Naive drizzle deposit (point-to-point-ish) for Mono images.
2405
+ Maps input (x,y) -> output (u,v) via affine, deposits 'weight' at nearest integer pixel.
2406
+
2407
+ image_data: (H, W)
2408
+ affine_2x3: (2, 3) matrix mapping source->canvas
2409
+ drizzle_buffer: (Ho, Wo)
2410
+ coverage_buffer: (Ho, Wo)
2411
+ """
2412
+ H, W = image_data.shape
2413
+ Ho, Wo = drizzle_buffer.shape
2414
+
2415
+ # We iterate over source pixels
2416
+ for y in prange(H):
2417
+ for x in range(W):
2418
+ val = image_data[y, x]
2419
+ if val == 0:
2420
+ continue
2421
+
2422
+ # Project center of pixel (x, y)
2423
+ # u = a*x + b*y + tx
2424
+ # v = c*x + d*y + ty
2425
+
2426
+ u = affine_2x3[0, 0] * x + affine_2x3[0, 1] * y + affine_2x3[0, 2]
2427
+ v = affine_2x3[1, 0] * x + affine_2x3[1, 1] * y + affine_2x3[1, 2]
2428
+
2429
+ # Nearest neighbor deposit
2430
+ ui = int(round(u))
2431
+ vi = int(round(v))
2432
+
2433
+ if 0 <= ui < Wo and 0 <= vi < Ho:
2434
+ # Accumulate
2435
+ drizzle_buffer[vi, ui] += val * weight
2436
+ coverage_buffer[vi, ui] += weight
2437
+
2438
+ return drizzle_buffer, coverage_buffer
2439
+
2440
+
2441
+ @njit(parallel=True, fastmath=True)
2442
+ def drizzle_deposit_color_naive(image_data, affine_2x3, drizzle_buffer, coverage_buffer, scale, drop_shrink, weight):
2443
+ """
2444
+ Naive drizzle deposit for Color images (H,W,C).
2445
+ image_data: (H, W, C)
2446
+ affine_2x3: (2, 3)
2447
+ drizzle_buffer: (Ho, Wo, C)
2448
+ coverage_buffer: (Ho, Wo, C)
2449
+ """
2450
+ H, W, C = image_data.shape
2451
+ Ho, Wo, _ = drizzle_buffer.shape
2452
+
2453
+ for y in prange(H):
2454
+ for x in range(W):
2455
+ # Check if pixel has any data (simple check: if sum > 0 or checks per channel)
2456
+ # usually we just project.
2457
+
2458
+ u = affine_2x3[0, 0] * x + affine_2x3[0, 1] * y + affine_2x3[0, 2]
2459
+ v = affine_2x3[1, 0] * x + affine_2x3[1, 1] * y + affine_2x3[1, 2]
2460
+
2461
+ ui = int(round(u))
2462
+ vi = int(round(v))
2463
+
2464
+ if 0 <= ui < Wo and 0 <= vi < Ho:
2465
+ for c in range(C):
2466
+ val = image_data[y, x, c]
2467
+ drizzle_buffer[vi, ui, c] += val * weight
2468
+ coverage_buffer[vi, ui, c] += weight
2469
+
2470
+ return drizzle_buffer, coverage_buffer
2471
+
2472
+ @njit(parallel=True, fastmath=True, cache=True)
2473
+ def numba_color_final_formula_linked(rescaled, median_rescaled, target_median):
2474
+ """
2475
+ Linked color transform: we use one median_rescaled for all channels.
2476
+ rescaled: (H,W,3), already = (image - black_point)/(1 - black_point)
2477
+ median_rescaled = median of *all* pixels in rescaled
2478
+ """
2479
+ H, W, C = rescaled.shape
2480
+ out = np.empty_like(rescaled)
2481
+
2482
+ for y in prange(H):
2483
+ for x in range(W):
2484
+ for c in range(C):
2485
+ r = rescaled[y, x, c]
2486
+ numer = (median_rescaled - 1.0) * target_median * r
2487
+ denom = median_rescaled * (target_median + r - 1.0) - target_median * r
2488
+ if np.abs(denom) < 1e-12:
2489
+ denom = 1e-12
2490
+ out[y, x, c] = numer / denom
2491
+
2492
+ return out
2493
+
2494
+ @njit(parallel=True, fastmath=True, cache=True)
2495
+ def numba_color_final_formula_unlinked(rescaled, medians_rescaled, target_median):
2496
+ """
2497
+ Unlinked color transform: a separate median_rescaled per channel.
2498
+ rescaled: (H,W,3), where each channel is already (val - black_point[c]) / (1 - black_point[c])
2499
+ medians_rescaled: shape (3,) with median of each channel in the rescaled array.
2500
+ """
2501
+ H, W, C = rescaled.shape
2502
+ out = np.empty_like(rescaled)
2503
+
2504
+ for y in prange(H):
2505
+ for x in range(W):
2506
+ for c in range(C):
2507
+ r = rescaled[y, x, c]
2508
+ med = medians_rescaled[c]
2509
+ numer = (med - 1.0) * target_median * r
2510
+ denom = med * (target_median + r - 1.0) - target_median * r
2511
+ if np.abs(denom) < 1e-12:
2512
+ denom = 1e-12
2513
+ out[y, x, c] = numer / denom
2514
+
2515
+ return out
2516
+
2517
+
2518
+ def build_poly_terms(x_array: np.ndarray, y_array: np.ndarray, degree: int) -> np.ndarray:
2519
+ """
2520
+ Precomputes polynomial basis terms efficiently using NumPy, supporting up to degree 6.
2521
+ """
2522
+ ones = np.ones_like(x_array, dtype=np.float32)
2523
+
2524
+ if degree == 1:
2525
+ return np.column_stack((ones, x_array, y_array))
2526
+
2527
+ elif degree == 2:
2528
+ return np.column_stack((ones, x_array, y_array,
2529
+ x_array**2, x_array * y_array, y_array**2))
2530
+
2531
+ elif degree == 3:
2532
+ return np.column_stack((ones, x_array, y_array,
2533
+ x_array**2, x_array * y_array, y_array**2,
2534
+ x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3))
2535
+
2536
+ elif degree == 4:
2537
+ return np.column_stack((ones, x_array, y_array,
2538
+ x_array**2, x_array * y_array, y_array**2,
2539
+ x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3,
2540
+ x_array**4, x_array**3 * y_array, x_array**2 * y_array**2, x_array * y_array**3, y_array**4))
2541
+
2542
+ elif degree == 5:
2543
+ return np.column_stack((ones, x_array, y_array,
2544
+ x_array**2, x_array * y_array, y_array**2,
2545
+ x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3,
2546
+ x_array**4, x_array**3 * y_array, x_array**2 * y_array**2, x_array * y_array**3, y_array**4,
2547
+ x_array**5, x_array**4 * y_array, x_array**3 * y_array**2, x_array**2 * y_array**3, x_array * y_array**4, y_array**5))
2548
+
2549
+ elif degree == 6:
2550
+ return np.column_stack((ones, x_array, y_array,
2551
+ x_array**2, x_array * y_array, y_array**2,
2552
+ x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3,
2553
+ x_array**4, x_array**3 * y_array, x_array**2 * y_array**2, x_array * y_array**3, y_array**4,
2554
+ x_array**5, x_array**4 * y_array, x_array**3 * y_array**2, x_array**2 * y_array**3, x_array * y_array**4, y_array**5,
2555
+ x_array**6, x_array**5 * y_array, x_array**4 * y_array**2, x_array**3 * y_array**3, x_array**2 * y_array**4, x_array * y_array**5, y_array**6))
2556
+
2557
+ else:
2558
+ raise ValueError(f"Unsupported polynomial degree={degree}. Max supported is 6.")
2559
+
2560
+
2561
+
2562
+
2563
+ def generate_sample_points(image: np.ndarray, num_points: int = 100) -> np.ndarray:
2564
+ """
2565
+ Generates sample points uniformly across the image.
2566
+
2567
+ - Places points in a uniform grid (no randomization).
2568
+ - Avoids border pixels.
2569
+ - Skips any points with value 0.000 or above 0.85.
2570
+
2571
+ Returns:
2572
+ np.ndarray: Array of shape (N, 2) containing (x, y) coordinates of sample points.
2573
+ """
2574
+ H, W = image.shape[:2]
2575
+ points = []
2576
+
2577
+ # Create a uniform grid (avoiding the border)
2578
+ grid_size = int(np.sqrt(num_points)) # Roughly equal spacing
2579
+ x_vals = np.linspace(10, W - 10, grid_size, dtype=int) # Avoids border
2580
+ y_vals = np.linspace(10, H - 10, grid_size, dtype=int)
2581
+
2582
+ for y in y_vals:
2583
+ for x in x_vals:
2584
+ # Skip values that are too dark (0.000) or too bright (> 0.85)
2585
+ if np.any(image[int(y), int(x)] == 0.000) or np.any(image[int(y), int(x)] > 0.85):
2586
+ continue # Skip this pixel
2587
+
2588
+ points.append((int(x), int(y)))
2589
+
2590
+ if len(points) >= num_points:
2591
+ return np.array(points, dtype=np.int32) # Return only valid points
2592
+
2593
+ return np.array(points, dtype=np.int32) # Return all collected points
2594
+
2595
+ @njit(parallel=True, fastmath=True, cache=True)
2596
+ def numba_unstretch(image: np.ndarray, stretch_original_medians: np.ndarray, stretch_original_mins: np.ndarray) -> np.ndarray:
2597
+ """
2598
+ Numba-optimized function to undo the unlinked stretch.
2599
+ Restores each channel separately.
2600
+ """
2601
+ H, W, C = image.shape
2602
+ out = np.empty_like(image, dtype=np.float32)
2603
+
2604
+ for c in prange(C): # Parallelize per channel
2605
+ cmed_stretched = np.median(image[..., c])
2606
+ orig_med = stretch_original_medians[c]
2607
+ orig_min = stretch_original_mins[c]
2608
+
2609
+ if cmed_stretched != 0 and orig_med != 0:
2610
+ for y in prange(H):
2611
+ for x in range(W):
2612
+ r = image[y, x, c]
2613
+ numerator = (cmed_stretched - 1) * orig_med * r
2614
+ denominator = cmed_stretched * (orig_med + r - 1) - orig_med * r
2615
+ if denominator == 0:
2616
+ denominator = 1e-6 # Avoid division by zero
2617
+ out[y, x, c] = numerator / denominator
2618
+
2619
+ # Restore the original black point
2620
+ out[..., c] += orig_min
2621
+
2622
+ return np.clip(out, 0, 1) # Clip to valid range
2623
+
2624
+
2625
+ @njit(fastmath=True, cache=True)
2626
+ def drizzle_deposit_numba_naive(
2627
+ img_data, # shape (H, W), mono
2628
+ transform, # shape (2, 3), e.g. [[a,b,tx],[c,d,ty]]
2629
+ drizzle_buffer, # shape (outH, outW)
2630
+ coverage_buffer,# shape (outH, outW)
2631
+ drizzle_factor: float,
2632
+ frame_weight: float
2633
+ ):
2634
+ """
2635
+ Naive deposit: each input pixel is mapped to exactly one output pixel,
2636
+ ignoring drop_shrink. 2D single-channel version (mono).
2637
+ """
2638
+ h, w = img_data.shape
2639
+ out_h, out_w = drizzle_buffer.shape
2640
+
2641
+ # Build a 3×3 matrix M
2642
+ # transform is 2×3, so we expand to 3×3 for the standard [x, y, 1] approach
2643
+ M = np.zeros((3, 3), dtype=np.float32)
2644
+ M[0, 0] = transform[0, 0] # a
2645
+ M[0, 1] = transform[0, 1] # b
2646
+ M[0, 2] = transform[0, 2] # tx
2647
+ M[1, 0] = transform[1, 0] # c
2648
+ M[1, 1] = transform[1, 1] # d
2649
+ M[1, 2] = transform[1, 2] # ty
2650
+ M[2, 2] = 1.0
2651
+
2652
+ # We'll reuse a small input vector for each pixel
2653
+ in_coords = np.zeros(3, dtype=np.float32)
2654
+ in_coords[2] = 1.0
2655
+
2656
+ for y in range(h):
2657
+ for x in range(w):
2658
+ val = img_data[y, x]
2659
+ if val == 0:
2660
+ continue
2661
+
2662
+ # Fill the input vector
2663
+ in_coords[0] = x
2664
+ in_coords[1] = y
2665
+
2666
+ # Multiply
2667
+ out_coords = M @ in_coords
2668
+ X = out_coords[0]
2669
+ Y = out_coords[1]
2670
+
2671
+ # Multiply by drizzle_factor
2672
+ Xo = int(X * drizzle_factor)
2673
+ Yo = int(Y * drizzle_factor)
2674
+
2675
+ if 0 <= Xo < out_w and 0 <= Yo < out_h:
2676
+ drizzle_buffer[Yo, Xo] += val * frame_weight
2677
+ coverage_buffer[Yo, Xo] += frame_weight
2678
+
2679
+ return drizzle_buffer, coverage_buffer
2680
+
2681
+
2682
+ @njit(fastmath=True, cache=True)
2683
+ def drizzle_deposit_numba_footprint(
2684
+ img_data, # shape (H, W), mono
2685
+ transform, # shape (2, 3)
2686
+ drizzle_buffer, # shape (outH, outW)
2687
+ coverage_buffer,# shape (outH, outW)
2688
+ drizzle_factor: float,
2689
+ drop_shrink: float,
2690
+ frame_weight: float
2691
+ ):
2692
+ """
2693
+ Distributes each input pixel over a bounding box of width=drop_shrink
2694
+ in the drizzle (out) plane. (Mono 2D version)
2695
+ """
2696
+ h, w = img_data.shape
2697
+ out_h, out_w = drizzle_buffer.shape
2698
+
2699
+ # Build a 3×3 matrix M
2700
+ M = np.zeros((3, 3), dtype=np.float32)
2701
+ M[0, 0] = transform[0, 0] # a
2702
+ M[0, 1] = transform[0, 1] # b
2703
+ M[0, 2] = transform[0, 2] # tx
2704
+ M[1, 0] = transform[1, 0] # c
2705
+ M[1, 1] = transform[1, 1] # d
2706
+ M[1, 2] = transform[1, 2] # ty
2707
+ M[2, 2] = 1.0
2708
+
2709
+ in_coords = np.zeros(3, dtype=np.float32)
2710
+ in_coords[2] = 1.0
2711
+
2712
+ footprint_radius = drop_shrink * 0.5
2713
+
2714
+ for y in range(h):
2715
+ for x in range(w):
2716
+ val = img_data[y, x]
2717
+ if val == 0:
2718
+ continue
2719
+
2720
+ # Transform to output coords
2721
+ in_coords[0] = x
2722
+ in_coords[1] = y
2723
+ out_coords = M @ in_coords
2724
+ X = out_coords[0]
2725
+ Y = out_coords[1]
2726
+
2727
+ # Upsample
2728
+ Xo = X * drizzle_factor
2729
+ Yo = Y * drizzle_factor
2730
+
2731
+ # bounding box
2732
+ min_x = int(np.floor(Xo - footprint_radius))
2733
+ max_x = int(np.floor(Xo + footprint_radius))
2734
+ min_y = int(np.floor(Yo - footprint_radius))
2735
+ max_y = int(np.floor(Yo + footprint_radius))
2736
+
2737
+ # clip
2738
+ if max_x < 0 or min_x >= out_w or max_y < 0 or min_y >= out_h:
2739
+ continue
2740
+ if min_x < 0:
2741
+ min_x = 0
2742
+ if max_x >= out_w:
2743
+ max_x = out_w - 1
2744
+ if min_y < 0:
2745
+ min_y = 0
2746
+ if max_y >= out_h:
2747
+ max_y = out_h - 1
2748
+
2749
+ width_foot = (max_x - min_x + 1)
2750
+ height_foot = (max_y - min_y + 1)
2751
+ area_pixels = width_foot * height_foot
2752
+ if area_pixels <= 0:
2753
+ continue
2754
+
2755
+ deposit_val = (val * frame_weight) / area_pixels
2756
+ coverage_fraction = frame_weight / area_pixels
2757
+
2758
+ for oy in range(min_y, max_y+1):
2759
+ for ox in range(min_x, max_x+1):
2760
+ drizzle_buffer[oy, ox] += deposit_val
2761
+ coverage_buffer[oy, ox] += coverage_fraction
2762
+
2763
+ return drizzle_buffer, coverage_buffer
2764
+
2765
+
2766
+ @njit(parallel=True, cache=True)
2767
+ def finalize_drizzle_2d(drizzle_buffer, coverage_buffer, final_out):
2768
+ """
2769
+ parallel-friendly final step: final_out = drizzle_buffer / coverage_buffer,
2770
+ with coverage < 1e-8 => 0
2771
+ """
2772
+ out_h, out_w = drizzle_buffer.shape
2773
+ for y in prange(out_h):
2774
+ for x in range(out_w):
2775
+ cov = coverage_buffer[y, x]
2776
+ if cov < 1e-8:
2777
+ final_out[y, x] = 0.0
2778
+ else:
2779
+ final_out[y, x] = drizzle_buffer[y, x] / cov
2780
+ return final_out
2781
+
2782
+ @njit(fastmath=True, cache=True)
2783
+ def drizzle_deposit_color_naive(
2784
+ img_data, # shape (H,W,C)
2785
+ transform, # shape (2,3)
2786
+ drizzle_buffer, # shape (outH,outW,C)
2787
+ coverage_buffer, # shape (outH,outW,C)
2788
+ drizzle_factor: float,
2789
+ drop_shrink: float, # unused here
2790
+ frame_weight: float
2791
+ ):
2792
+ """
2793
+ Naive color deposit:
2794
+ Each input pixel is mapped to exactly one output pixel (ignores drop_shrink).
2795
+ """
2796
+ H, W, channels = img_data.shape
2797
+ outH, outW, outC = drizzle_buffer.shape
2798
+
2799
+ # Build 3×3 matrix M
2800
+ M = np.zeros((3, 3), dtype=np.float32)
2801
+ M[0, 0] = transform[0, 0]
2802
+ M[0, 1] = transform[0, 1]
2803
+ M[0, 2] = transform[0, 2]
2804
+ M[1, 0] = transform[1, 0]
2805
+ M[1, 1] = transform[1, 1]
2806
+ M[1, 2] = transform[1, 2]
2807
+ M[2, 2] = 1.0
2808
+
2809
+ in_coords = np.zeros(3, dtype=np.float32)
2810
+ in_coords[2] = 1.0
2811
+
2812
+ for y in range(H):
2813
+ for x in range(W):
2814
+ # 1) Transform
2815
+ in_coords[0] = x
2816
+ in_coords[1] = y
2817
+ out_coords = M @ in_coords
2818
+ X = out_coords[0]
2819
+ Y = out_coords[1]
2820
+
2821
+ # 2) Upsample
2822
+ Xo = int(X * drizzle_factor)
2823
+ Yo = int(Y * drizzle_factor)
2824
+
2825
+ # 3) Check bounds
2826
+ if 0 <= Xo < outW and 0 <= Yo < outH:
2827
+ # 4) For each channel
2828
+ for cidx in range(channels):
2829
+ val = img_data[y, x, cidx]
2830
+ if val != 0:
2831
+ drizzle_buffer[Yo, Xo, cidx] += val * frame_weight
2832
+ coverage_buffer[Yo, Xo, cidx] += frame_weight
2833
+
2834
+ return drizzle_buffer, coverage_buffer
2835
+ @njit(fastmath=True, cache=True)
2836
+ def drizzle_deposit_color_footprint(
2837
+ img_data, # shape (H,W,C)
2838
+ transform, # shape (2,3)
2839
+ drizzle_buffer, # shape (outH,outW,C)
2840
+ coverage_buffer, # shape (outH,outW,C)
2841
+ drizzle_factor: float,
2842
+ drop_shrink: float,
2843
+ frame_weight: float
2844
+ ):
2845
+ """
2846
+ Color version with a bounding-box footprint of width=drop_shrink
2847
+ for distributing flux in the output plane.
2848
+ """
2849
+ H, W, channels = img_data.shape
2850
+ outH, outW, outC = drizzle_buffer.shape
2851
+
2852
+ # Build 3×3 matrix
2853
+ M = np.zeros((3, 3), dtype=np.float32)
2854
+ M[0, 0] = transform[0, 0]
2855
+ M[0, 1] = transform[0, 1]
2856
+ M[0, 2] = transform[0, 2]
2857
+ M[1, 0] = transform[1, 0]
2858
+ M[1, 1] = transform[1, 1]
2859
+ M[1, 2] = transform[1, 2]
2860
+ M[2, 2] = 1.0
2861
+
2862
+ in_coords = np.zeros(3, dtype=np.float32)
2863
+ in_coords[2] = 1.0
2864
+
2865
+ footprint_radius = drop_shrink * 0.5
2866
+
2867
+ for y in range(H):
2868
+ for x in range(W):
2869
+ # Transform once per pixel
2870
+ in_coords[0] = x
2871
+ in_coords[1] = y
2872
+ out_coords = M @ in_coords
2873
+ X = out_coords[0]
2874
+ Y = out_coords[1]
2875
+
2876
+ # Upsample
2877
+ Xo = X * drizzle_factor
2878
+ Yo = Y * drizzle_factor
2879
+
2880
+ # bounding box
2881
+ min_x = int(np.floor(Xo - footprint_radius))
2882
+ max_x = int(np.floor(Xo + footprint_radius))
2883
+ min_y = int(np.floor(Yo - footprint_radius))
2884
+ max_y = int(np.floor(Yo + footprint_radius))
2885
+
2886
+ if max_x < 0 or min_x >= outW or max_y < 0 or min_y >= outH:
2887
+ continue
2888
+ if min_x < 0:
2889
+ min_x = 0
2890
+ if max_x >= outW:
2891
+ max_x = outW - 1
2892
+ if min_y < 0:
2893
+ min_y = 0
2894
+ if max_y >= outH:
2895
+ max_y = outH - 1
2896
+
2897
+ width_foot = (max_x - min_x + 1)
2898
+ height_foot = (max_y - min_y + 1)
2899
+ area_pixels = width_foot * height_foot
2900
+ if area_pixels <= 0:
2901
+ continue
2902
+
2903
+ for cidx in range(channels):
2904
+ val = img_data[y, x, cidx]
2905
+ if val == 0:
2906
+ continue
2907
+
2908
+ deposit_val = (val * frame_weight) / area_pixels
2909
+ coverage_fraction = frame_weight / area_pixels
2910
+
2911
+ for oy in range(min_y, max_y + 1):
2912
+ for ox in range(min_x, max_x + 1):
2913
+ drizzle_buffer[oy, ox, cidx] += deposit_val
2914
+ coverage_buffer[oy, ox, cidx] += coverage_fraction
2915
+
2916
+ return drizzle_buffer, coverage_buffer
2917
+
2918
+
2919
+ @njit(cache=True)
2920
+ def finalize_drizzle_3d(drizzle_buffer, coverage_buffer, final_out):
2921
+ """
2922
+ final_out[y,x,c] = drizzle_buffer[y,x,c] / coverage_buffer[y,x,c]
2923
+ if coverage < 1e-8 => 0
2924
+ """
2925
+ outH, outW, channels = drizzle_buffer.shape
2926
+ for y in range(outH):
2927
+ for x in range(outW):
2928
+ for cidx in range(channels):
2929
+ cov = coverage_buffer[y, x, cidx]
2930
+ if cov < 1e-8:
2931
+ final_out[y, x, cidx] = 0.0
2932
+ else:
2933
+ final_out[y, x, cidx] = drizzle_buffer[y, x, cidx] / cov
2934
+ return final_out
2935
+
2936
+
2937
+
2938
+ @njit(cache=True)
2939
+ def piecewise_linear(val, xvals, yvals):
2940
+ """
2941
+ Performs piecewise linear interpolation:
2942
+ Given a scalar 'val', and arrays xvals, yvals (each of length N),
2943
+ finds i s.t. xvals[i] <= val < xvals[i+1],
2944
+ then returns the linear interpolation between yvals[i], yvals[i+1].
2945
+ If val < xvals[0], returns yvals[0].
2946
+ If val > xvals[-1], returns yvals[-1].
2947
+ """
2948
+ if val <= xvals[0]:
2949
+ return yvals[0]
2950
+ for i in range(len(xvals)-1):
2951
+ if val < xvals[i+1]:
2952
+ # Perform a linear interpolation in interval [xvals[i], xvals[i+1]]
2953
+ dx = xvals[i+1] - xvals[i]
2954
+ dy = yvals[i+1] - yvals[i]
2955
+ ratio = (val - xvals[i]) / dx
2956
+ return yvals[i] + ratio * dy
2957
+ return yvals[-1]
2958
+
2959
+ @njit(parallel=True, fastmath=True, cache=True)
2960
+ def apply_curves_numba(image, xvals, yvals):
2961
+ """
2962
+ Numba-accelerated routine to apply piecewise linear interpolation
2963
+ to each pixel in 'image'.
2964
+ - image can be (H,W) or (H,W,3).
2965
+ - xvals, yvals are the curve arrays in ascending order.
2966
+ Returns the adjusted image as float32.
2967
+ """
2968
+ if image.ndim == 2:
2969
+ H, W = image.shape
2970
+ out = np.empty((H, W), dtype=np.float32)
2971
+ for y in prange(H):
2972
+ for x in range(W):
2973
+ val = image[y, x]
2974
+ out[y, x] = piecewise_linear(val, xvals, yvals)
2975
+ return out
2976
+ elif image.ndim == 3:
2977
+ H, W, C = image.shape
2978
+ out = np.empty((H, W, C), dtype=np.float32)
2979
+ for y in prange(H):
2980
+ for x in range(W):
2981
+ for c in range(C):
2982
+ val = image[y, x, c]
2983
+ out[y, x, c] = piecewise_linear(val, xvals, yvals)
2984
+ return out
2985
+ else:
2986
+ # Unexpected shape
2987
+ return image # Fallback
2988
+
2989
+ def fast_star_detect(image,
2990
+ blur_size=9,
2991
+ threshold_factor=0.7,
2992
+ min_area=1,
2993
+ max_area=5000):
2994
+ """
2995
+ Finds star positions via contour detection + ellipse fitting.
2996
+ Returns Nx2 array of (x, y) star coordinates in the same coordinate system as 'image'.
2997
+ """
2998
+
2999
+ # 1) Convert to grayscale if needed
3000
+ if image.ndim == 3:
3001
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
3002
+
3003
+ # 2) Normalize to 8-bit [0..255]
3004
+ img_min, img_max = image.min(), image.max()
3005
+ if img_max <= img_min:
3006
+ return np.empty((0,2), dtype=np.float32) # All pixels same => no stars
3007
+ image_8u = (255.0 * (image - img_min) / (img_max - img_min)).astype(np.uint8)
3008
+
3009
+ # 3) Blur => subtract => highlight stars
3010
+ blurred = cv2.GaussianBlur(image_8u, (blur_size, blur_size), 0)
3011
+ subtracted = cv2.absdiff(image_8u, blurred)
3012
+
3013
+ # 4) Otsu's threshold => scaled by threshold_factor
3014
+ otsu_thresh, _ = cv2.threshold(subtracted, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
3015
+ final_thresh_val = max(2, int(otsu_thresh * threshold_factor))
3016
+
3017
+ _, thresh = cv2.threshold(subtracted, final_thresh_val, 255, cv2.THRESH_BINARY)
3018
+
3019
+ # 5) (Optional) morphological opening to remove single-pixel noise
3020
+ kernel = np.ones((2, 2), np.uint8)
3021
+ thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
3022
+
3023
+ # 6) Find contours
3024
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
3025
+
3026
+ # 7) Filter by area, fit ellipse => use ellipse center as star position
3027
+ star_positions = []
3028
+ for c in contours:
3029
+ area = cv2.contourArea(c)
3030
+ if area < min_area or area > max_area:
3031
+ continue
3032
+ if len(c) < 5:
3033
+ # Need >=5 points to fit an ellipse
3034
+ continue
3035
+
3036
+ ellipse = cv2.fitEllipse(c)
3037
+ (cx, cy), (major_axis, minor_axis), angle = ellipse
3038
+ # You could check eccentricity, etc. if you want to filter out weird shapes
3039
+ star_positions.append((cx, cy))
3040
+
3041
+ if len(star_positions) == 0:
3042
+ return np.empty((0,2), dtype=np.float32)
3043
+ else:
3044
+ return np.array(star_positions, dtype=np.float32)