setiastrosuitepro 1.6.2.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of setiastrosuitepro might be problematic. Click here for more details.
- setiastro/__init__.py +2 -0
- setiastro/data/SASP_data.fits +0 -0
- setiastro/data/catalogs/List_of_Galaxies_with_Distances_Gly.csv +488 -0
- setiastro/data/catalogs/astrobin_filters.csv +890 -0
- setiastro/data/catalogs/astrobin_filters_page1_local.csv +51 -0
- setiastro/data/catalogs/cali2.csv +63 -0
- setiastro/data/catalogs/cali2color.csv +65 -0
- setiastro/data/catalogs/celestial_catalog - original.csv +16471 -0
- setiastro/data/catalogs/celestial_catalog.csv +24031 -0
- setiastro/data/catalogs/detected_stars.csv +24784 -0
- setiastro/data/catalogs/fits_header_data.csv +46 -0
- setiastro/data/catalogs/test.csv +8 -0
- setiastro/data/catalogs/updated_celestial_catalog.csv +16471 -0
- setiastro/images/Astro_Spikes.png +0 -0
- setiastro/images/Background_startup.jpg +0 -0
- setiastro/images/HRDiagram.png +0 -0
- setiastro/images/LExtract.png +0 -0
- setiastro/images/LInsert.png +0 -0
- setiastro/images/Oxygenation-atm-2.svg.png +0 -0
- setiastro/images/RGB080604.png +0 -0
- setiastro/images/abeicon.png +0 -0
- setiastro/images/aberration.png +0 -0
- setiastro/images/andromedatry.png +0 -0
- setiastro/images/andromedatry_satellited.png +0 -0
- setiastro/images/annotated.png +0 -0
- setiastro/images/aperture.png +0 -0
- setiastro/images/astrosuite.ico +0 -0
- setiastro/images/astrosuite.png +0 -0
- setiastro/images/astrosuitepro.icns +0 -0
- setiastro/images/astrosuitepro.ico +0 -0
- setiastro/images/astrosuitepro.png +0 -0
- setiastro/images/background.png +0 -0
- setiastro/images/background2.png +0 -0
- setiastro/images/benchmark.png +0 -0
- setiastro/images/big_moon_stabilizer_timeline.png +0 -0
- setiastro/images/big_moon_stabilizer_timeline_clean.png +0 -0
- setiastro/images/blaster.png +0 -0
- setiastro/images/blink.png +0 -0
- setiastro/images/clahe.png +0 -0
- setiastro/images/collage.png +0 -0
- setiastro/images/colorwheel.png +0 -0
- setiastro/images/contsub.png +0 -0
- setiastro/images/convo.png +0 -0
- setiastro/images/copyslot.png +0 -0
- setiastro/images/cosmic.png +0 -0
- setiastro/images/cosmicsat.png +0 -0
- setiastro/images/crop1.png +0 -0
- setiastro/images/cropicon.png +0 -0
- setiastro/images/curves.png +0 -0
- setiastro/images/cvs.png +0 -0
- setiastro/images/debayer.png +0 -0
- setiastro/images/denoise_cnn_custom.png +0 -0
- setiastro/images/denoise_cnn_graph.png +0 -0
- setiastro/images/disk.png +0 -0
- setiastro/images/dse.png +0 -0
- setiastro/images/exoicon.png +0 -0
- setiastro/images/eye.png +0 -0
- setiastro/images/fliphorizontal.png +0 -0
- setiastro/images/flipvertical.png +0 -0
- setiastro/images/font.png +0 -0
- setiastro/images/freqsep.png +0 -0
- setiastro/images/functionbundle.png +0 -0
- setiastro/images/graxpert.png +0 -0
- setiastro/images/green.png +0 -0
- setiastro/images/gridicon.png +0 -0
- setiastro/images/halo.png +0 -0
- setiastro/images/hdr.png +0 -0
- setiastro/images/histogram.png +0 -0
- setiastro/images/hubble.png +0 -0
- setiastro/images/imagecombine.png +0 -0
- setiastro/images/invert.png +0 -0
- setiastro/images/isophote.png +0 -0
- setiastro/images/isophote_demo_figure.png +0 -0
- setiastro/images/isophote_demo_image.png +0 -0
- setiastro/images/isophote_demo_model.png +0 -0
- setiastro/images/isophote_demo_residual.png +0 -0
- setiastro/images/jwstpupil.png +0 -0
- setiastro/images/linearfit.png +0 -0
- setiastro/images/livestacking.png +0 -0
- setiastro/images/mask.png +0 -0
- setiastro/images/maskapply.png +0 -0
- setiastro/images/maskcreate.png +0 -0
- setiastro/images/maskremove.png +0 -0
- setiastro/images/morpho.png +0 -0
- setiastro/images/mosaic.png +0 -0
- setiastro/images/multiscale_decomp.png +0 -0
- setiastro/images/nbtorgb.png +0 -0
- setiastro/images/neutral.png +0 -0
- setiastro/images/nuke.png +0 -0
- setiastro/images/openfile.png +0 -0
- setiastro/images/pedestal.png +0 -0
- setiastro/images/pen.png +0 -0
- setiastro/images/pixelmath.png +0 -0
- setiastro/images/platesolve.png +0 -0
- setiastro/images/ppp.png +0 -0
- setiastro/images/pro.png +0 -0
- setiastro/images/project.png +0 -0
- setiastro/images/psf.png +0 -0
- setiastro/images/redo.png +0 -0
- setiastro/images/redoicon.png +0 -0
- setiastro/images/rescale.png +0 -0
- setiastro/images/rgbalign.png +0 -0
- setiastro/images/rgbcombo.png +0 -0
- setiastro/images/rgbextract.png +0 -0
- setiastro/images/rotate180.png +0 -0
- setiastro/images/rotateclockwise.png +0 -0
- setiastro/images/rotatecounterclockwise.png +0 -0
- setiastro/images/satellite.png +0 -0
- setiastro/images/script.png +0 -0
- setiastro/images/selectivecolor.png +0 -0
- setiastro/images/simbad.png +0 -0
- setiastro/images/slot0.png +0 -0
- setiastro/images/slot1.png +0 -0
- setiastro/images/slot2.png +0 -0
- setiastro/images/slot3.png +0 -0
- setiastro/images/slot4.png +0 -0
- setiastro/images/slot5.png +0 -0
- setiastro/images/slot6.png +0 -0
- setiastro/images/slot7.png +0 -0
- setiastro/images/slot8.png +0 -0
- setiastro/images/slot9.png +0 -0
- setiastro/images/spcc.png +0 -0
- setiastro/images/spin_precession_vs_lunar_distance.png +0 -0
- setiastro/images/spinner.gif +0 -0
- setiastro/images/stacking.png +0 -0
- setiastro/images/staradd.png +0 -0
- setiastro/images/staralign.png +0 -0
- setiastro/images/starnet.png +0 -0
- setiastro/images/starregistration.png +0 -0
- setiastro/images/starspike.png +0 -0
- setiastro/images/starstretch.png +0 -0
- setiastro/images/statstretch.png +0 -0
- setiastro/images/supernova.png +0 -0
- setiastro/images/uhs.png +0 -0
- setiastro/images/undoicon.png +0 -0
- setiastro/images/upscale.png +0 -0
- setiastro/images/viewbundle.png +0 -0
- setiastro/images/whitebalance.png +0 -0
- setiastro/images/wimi_icon_256x256.png +0 -0
- setiastro/images/wimilogo.png +0 -0
- setiastro/images/wims.png +0 -0
- setiastro/images/wrench_icon.png +0 -0
- setiastro/images/xisfliberator.png +0 -0
- setiastro/qml/ResourceMonitor.qml +126 -0
- setiastro/saspro/__init__.py +20 -0
- setiastro/saspro/__main__.py +945 -0
- setiastro/saspro/_generated/__init__.py +7 -0
- setiastro/saspro/_generated/build_info.py +3 -0
- setiastro/saspro/abe.py +1346 -0
- setiastro/saspro/abe_preset.py +196 -0
- setiastro/saspro/aberration_ai.py +694 -0
- setiastro/saspro/aberration_ai_preset.py +224 -0
- setiastro/saspro/accel_installer.py +218 -0
- setiastro/saspro/accel_workers.py +30 -0
- setiastro/saspro/add_stars.py +624 -0
- setiastro/saspro/astrobin_exporter.py +1010 -0
- setiastro/saspro/astrospike.py +153 -0
- setiastro/saspro/astrospike_python.py +1841 -0
- setiastro/saspro/autostretch.py +198 -0
- setiastro/saspro/backgroundneutral.py +602 -0
- setiastro/saspro/batch_convert.py +328 -0
- setiastro/saspro/batch_renamer.py +522 -0
- setiastro/saspro/blemish_blaster.py +491 -0
- setiastro/saspro/blink_comparator_pro.py +2926 -0
- setiastro/saspro/bundles.py +61 -0
- setiastro/saspro/bundles_dock.py +114 -0
- setiastro/saspro/cheat_sheet.py +213 -0
- setiastro/saspro/clahe.py +368 -0
- setiastro/saspro/comet_stacking.py +1442 -0
- setiastro/saspro/common_tr.py +107 -0
- setiastro/saspro/config.py +38 -0
- setiastro/saspro/config_bootstrap.py +40 -0
- setiastro/saspro/config_manager.py +316 -0
- setiastro/saspro/continuum_subtract.py +1617 -0
- setiastro/saspro/convo.py +1400 -0
- setiastro/saspro/convo_preset.py +414 -0
- setiastro/saspro/copyastro.py +190 -0
- setiastro/saspro/cosmicclarity.py +1589 -0
- setiastro/saspro/cosmicclarity_preset.py +407 -0
- setiastro/saspro/crop_dialog_pro.py +973 -0
- setiastro/saspro/crop_preset.py +189 -0
- setiastro/saspro/curve_editor_pro.py +2562 -0
- setiastro/saspro/curves_preset.py +375 -0
- setiastro/saspro/debayer.py +673 -0
- setiastro/saspro/debug_utils.py +29 -0
- setiastro/saspro/dnd_mime.py +35 -0
- setiastro/saspro/doc_manager.py +2664 -0
- setiastro/saspro/exoplanet_detector.py +2166 -0
- setiastro/saspro/file_utils.py +284 -0
- setiastro/saspro/fitsmodifier.py +748 -0
- setiastro/saspro/fix_bom.py +32 -0
- setiastro/saspro/free_torch_memory.py +48 -0
- setiastro/saspro/frequency_separation.py +1349 -0
- setiastro/saspro/function_bundle.py +1596 -0
- setiastro/saspro/generate_translations.py +3092 -0
- setiastro/saspro/ghs_dialog_pro.py +663 -0
- setiastro/saspro/ghs_preset.py +284 -0
- setiastro/saspro/graxpert.py +637 -0
- setiastro/saspro/graxpert_preset.py +287 -0
- setiastro/saspro/gui/__init__.py +0 -0
- setiastro/saspro/gui/main_window.py +8810 -0
- setiastro/saspro/gui/mixins/__init__.py +33 -0
- setiastro/saspro/gui/mixins/dock_mixin.py +362 -0
- setiastro/saspro/gui/mixins/file_mixin.py +450 -0
- setiastro/saspro/gui/mixins/geometry_mixin.py +403 -0
- setiastro/saspro/gui/mixins/header_mixin.py +441 -0
- setiastro/saspro/gui/mixins/mask_mixin.py +421 -0
- setiastro/saspro/gui/mixins/menu_mixin.py +389 -0
- setiastro/saspro/gui/mixins/theme_mixin.py +367 -0
- setiastro/saspro/gui/mixins/toolbar_mixin.py +1457 -0
- setiastro/saspro/gui/mixins/update_mixin.py +309 -0
- setiastro/saspro/gui/mixins/view_mixin.py +435 -0
- setiastro/saspro/gui/statistics_dialog.py +47 -0
- setiastro/saspro/halobgon.py +488 -0
- setiastro/saspro/header_viewer.py +448 -0
- setiastro/saspro/headless_utils.py +88 -0
- setiastro/saspro/histogram.py +756 -0
- setiastro/saspro/history_explorer.py +941 -0
- setiastro/saspro/i18n.py +168 -0
- setiastro/saspro/image_combine.py +417 -0
- setiastro/saspro/image_peeker_pro.py +1604 -0
- setiastro/saspro/imageops/__init__.py +37 -0
- setiastro/saspro/imageops/mdi_snap.py +292 -0
- setiastro/saspro/imageops/scnr.py +36 -0
- setiastro/saspro/imageops/starbasedwhitebalance.py +210 -0
- setiastro/saspro/imageops/stretch.py +236 -0
- setiastro/saspro/isophote.py +1182 -0
- setiastro/saspro/layers.py +208 -0
- setiastro/saspro/layers_dock.py +714 -0
- setiastro/saspro/lazy_imports.py +193 -0
- setiastro/saspro/legacy/__init__.py +2 -0
- setiastro/saspro/legacy/image_manager.py +2226 -0
- setiastro/saspro/legacy/numba_utils.py +3676 -0
- setiastro/saspro/legacy/xisf.py +1071 -0
- setiastro/saspro/linear_fit.py +537 -0
- setiastro/saspro/live_stacking.py +1841 -0
- setiastro/saspro/log_bus.py +5 -0
- setiastro/saspro/logging_config.py +460 -0
- setiastro/saspro/luminancerecombine.py +309 -0
- setiastro/saspro/main_helpers.py +201 -0
- setiastro/saspro/mask_creation.py +931 -0
- setiastro/saspro/masks_core.py +56 -0
- setiastro/saspro/mdi_widgets.py +353 -0
- setiastro/saspro/memory_utils.py +666 -0
- setiastro/saspro/metadata_patcher.py +75 -0
- setiastro/saspro/mfdeconv.py +3831 -0
- setiastro/saspro/mfdeconv_earlystop.py +71 -0
- setiastro/saspro/mfdeconvcudnn.py +3263 -0
- setiastro/saspro/mfdeconvsport.py +2382 -0
- setiastro/saspro/minorbodycatalog.py +567 -0
- setiastro/saspro/morphology.py +407 -0
- setiastro/saspro/multiscale_decomp.py +1293 -0
- setiastro/saspro/nbtorgb_stars.py +541 -0
- setiastro/saspro/numba_utils.py +3145 -0
- setiastro/saspro/numba_warmup.py +141 -0
- setiastro/saspro/ops/__init__.py +9 -0
- setiastro/saspro/ops/command_help_dialog.py +623 -0
- setiastro/saspro/ops/command_runner.py +217 -0
- setiastro/saspro/ops/commands.py +1594 -0
- setiastro/saspro/ops/script_editor.py +1102 -0
- setiastro/saspro/ops/scripts.py +1473 -0
- setiastro/saspro/ops/settings.py +637 -0
- setiastro/saspro/parallel_utils.py +554 -0
- setiastro/saspro/pedestal.py +121 -0
- setiastro/saspro/perfect_palette_picker.py +1071 -0
- setiastro/saspro/pipeline.py +110 -0
- setiastro/saspro/pixelmath.py +1604 -0
- setiastro/saspro/plate_solver.py +2445 -0
- setiastro/saspro/project_io.py +797 -0
- setiastro/saspro/psf_utils.py +136 -0
- setiastro/saspro/psf_viewer.py +549 -0
- setiastro/saspro/pyi_rthook_astroquery.py +95 -0
- setiastro/saspro/remove_green.py +331 -0
- setiastro/saspro/remove_stars.py +1599 -0
- setiastro/saspro/remove_stars_preset.py +404 -0
- setiastro/saspro/resources.py +501 -0
- setiastro/saspro/rgb_combination.py +208 -0
- setiastro/saspro/rgb_extract.py +19 -0
- setiastro/saspro/rgbalign.py +723 -0
- setiastro/saspro/runtime_imports.py +7 -0
- setiastro/saspro/runtime_torch.py +754 -0
- setiastro/saspro/save_options.py +73 -0
- setiastro/saspro/selective_color.py +1552 -0
- setiastro/saspro/sfcc.py +1472 -0
- setiastro/saspro/shortcuts.py +3043 -0
- setiastro/saspro/signature_insert.py +1102 -0
- setiastro/saspro/stacking_suite.py +18470 -0
- setiastro/saspro/star_alignment.py +7435 -0
- setiastro/saspro/star_alignment_preset.py +329 -0
- setiastro/saspro/star_metrics.py +49 -0
- setiastro/saspro/star_spikes.py +765 -0
- setiastro/saspro/star_stretch.py +507 -0
- setiastro/saspro/stat_stretch.py +538 -0
- setiastro/saspro/status_log_dock.py +78 -0
- setiastro/saspro/subwindow.py +3328 -0
- setiastro/saspro/supernovaasteroidhunter.py +1719 -0
- setiastro/saspro/swap_manager.py +99 -0
- setiastro/saspro/torch_backend.py +89 -0
- setiastro/saspro/torch_rejection.py +434 -0
- setiastro/saspro/translations/all_source_strings.json +3654 -0
- setiastro/saspro/translations/ar_translations.py +3865 -0
- setiastro/saspro/translations/de_translations.py +3749 -0
- setiastro/saspro/translations/es_translations.py +3939 -0
- setiastro/saspro/translations/fr_translations.py +3858 -0
- setiastro/saspro/translations/hi_translations.py +3571 -0
- setiastro/saspro/translations/integrate_translations.py +270 -0
- setiastro/saspro/translations/it_translations.py +3678 -0
- setiastro/saspro/translations/ja_translations.py +3601 -0
- setiastro/saspro/translations/pt_translations.py +3869 -0
- setiastro/saspro/translations/ru_translations.py +2848 -0
- setiastro/saspro/translations/saspro_ar.qm +0 -0
- setiastro/saspro/translations/saspro_ar.ts +255 -0
- setiastro/saspro/translations/saspro_de.qm +0 -0
- setiastro/saspro/translations/saspro_de.ts +253 -0
- setiastro/saspro/translations/saspro_es.qm +0 -0
- setiastro/saspro/translations/saspro_es.ts +12520 -0
- setiastro/saspro/translations/saspro_fr.qm +0 -0
- setiastro/saspro/translations/saspro_fr.ts +12514 -0
- setiastro/saspro/translations/saspro_hi.qm +0 -0
- setiastro/saspro/translations/saspro_hi.ts +257 -0
- setiastro/saspro/translations/saspro_it.qm +0 -0
- setiastro/saspro/translations/saspro_it.ts +12520 -0
- setiastro/saspro/translations/saspro_ja.qm +0 -0
- setiastro/saspro/translations/saspro_ja.ts +257 -0
- setiastro/saspro/translations/saspro_pt.qm +0 -0
- setiastro/saspro/translations/saspro_pt.ts +257 -0
- setiastro/saspro/translations/saspro_ru.qm +0 -0
- setiastro/saspro/translations/saspro_ru.ts +237 -0
- setiastro/saspro/translations/saspro_sw.qm +0 -0
- setiastro/saspro/translations/saspro_sw.ts +257 -0
- setiastro/saspro/translations/saspro_uk.qm +0 -0
- setiastro/saspro/translations/saspro_uk.ts +10771 -0
- setiastro/saspro/translations/saspro_zh.qm +0 -0
- setiastro/saspro/translations/saspro_zh.ts +12520 -0
- setiastro/saspro/translations/sw_translations.py +3671 -0
- setiastro/saspro/translations/uk_translations.py +3700 -0
- setiastro/saspro/translations/zh_translations.py +3675 -0
- setiastro/saspro/versioning.py +77 -0
- setiastro/saspro/view_bundle.py +1558 -0
- setiastro/saspro/wavescale_hdr.py +645 -0
- setiastro/saspro/wavescale_hdr_preset.py +101 -0
- setiastro/saspro/wavescalede.py +680 -0
- setiastro/saspro/wavescalede_preset.py +230 -0
- setiastro/saspro/wcs_update.py +374 -0
- setiastro/saspro/whitebalance.py +492 -0
- setiastro/saspro/widgets/__init__.py +48 -0
- setiastro/saspro/widgets/common_utilities.py +306 -0
- setiastro/saspro/widgets/graphics_views.py +122 -0
- setiastro/saspro/widgets/image_utils.py +518 -0
- setiastro/saspro/widgets/minigame/game.js +986 -0
- setiastro/saspro/widgets/minigame/index.html +53 -0
- setiastro/saspro/widgets/minigame/style.css +241 -0
- setiastro/saspro/widgets/preview_dialogs.py +280 -0
- setiastro/saspro/widgets/resource_monitor.py +237 -0
- setiastro/saspro/widgets/spinboxes.py +275 -0
- setiastro/saspro/widgets/themed_buttons.py +13 -0
- setiastro/saspro/widgets/wavelet_utils.py +331 -0
- setiastro/saspro/wimi.py +7996 -0
- setiastro/saspro/wims.py +578 -0
- setiastro/saspro/window_shelf.py +185 -0
- setiastro/saspro/xisf.py +1123 -0
- setiastrosuitepro-1.6.2.post1.dist-info/METADATA +278 -0
- setiastrosuitepro-1.6.2.post1.dist-info/RECORD +367 -0
- setiastrosuitepro-1.6.2.post1.dist-info/WHEEL +4 -0
- setiastrosuitepro-1.6.2.post1.dist-info/entry_points.txt +6 -0
- setiastrosuitepro-1.6.2.post1.dist-info/licenses/LICENSE +674 -0
- setiastrosuitepro-1.6.2.post1.dist-info/licenses/license.txt +2580 -0
|
@@ -0,0 +1,3145 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from numba import njit, prange
|
|
3
|
+
import cv2
|
|
4
|
+
import math
|
|
5
|
+
|
|
6
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
7
|
+
def blend_add_numba(A, B, alpha):
|
|
8
|
+
H, W, C = A.shape
|
|
9
|
+
out = np.empty_like(A)
|
|
10
|
+
for y in prange(H):
|
|
11
|
+
for x in range(W):
|
|
12
|
+
for c in range(C):
|
|
13
|
+
v = A[y,x,c] + B[y,x,c] * alpha
|
|
14
|
+
# clamp 0..1
|
|
15
|
+
if v < 0.0: v = 0.0
|
|
16
|
+
elif v > 1.0: v = 1.0
|
|
17
|
+
out[y,x,c] = v
|
|
18
|
+
return out
|
|
19
|
+
|
|
20
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
21
|
+
def blend_subtract_numba(A, B, alpha):
|
|
22
|
+
H, W, C = A.shape
|
|
23
|
+
out = np.empty_like(A)
|
|
24
|
+
for y in prange(H):
|
|
25
|
+
for x in range(W):
|
|
26
|
+
for c in range(C):
|
|
27
|
+
v = A[y,x,c] - B[y,x,c] * alpha
|
|
28
|
+
if v < 0.0: v = 0.0
|
|
29
|
+
elif v > 1.0: v = 1.0
|
|
30
|
+
out[y,x,c] = v
|
|
31
|
+
return out
|
|
32
|
+
|
|
33
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
34
|
+
def blend_multiply_numba(A, B, alpha):
|
|
35
|
+
H, W, C = A.shape
|
|
36
|
+
out = np.empty_like(A)
|
|
37
|
+
for y in prange(H):
|
|
38
|
+
for x in range(W):
|
|
39
|
+
for c in range(C):
|
|
40
|
+
v = (A[y,x,c] * (1-alpha)) + (A[y,x,c] * B[y,x,c] * alpha)
|
|
41
|
+
if v < 0.0: v = 0.0
|
|
42
|
+
elif v > 1.0: v = 1.0
|
|
43
|
+
out[y,x,c] = v
|
|
44
|
+
return out
|
|
45
|
+
|
|
46
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
47
|
+
def blend_divide_numba(A, B, alpha):
|
|
48
|
+
H, W, C = A.shape
|
|
49
|
+
out = np.empty_like(A)
|
|
50
|
+
eps = 1e-6
|
|
51
|
+
for y in prange(H):
|
|
52
|
+
for x in range(W):
|
|
53
|
+
for c in range(C):
|
|
54
|
+
# avoid division by zero
|
|
55
|
+
b = A[y,x,c] / (B[y,x,c] + eps)
|
|
56
|
+
# clamp f(A,B)
|
|
57
|
+
if b < 0.0: b = 0.0
|
|
58
|
+
elif b > 1.0: b = 1.0
|
|
59
|
+
# mix with original
|
|
60
|
+
v = A[y,x,c] * (1.0 - alpha) + b * alpha
|
|
61
|
+
# clamp final
|
|
62
|
+
if v < 0.0: v = 0.0
|
|
63
|
+
elif v > 1.0: v = 1.0
|
|
64
|
+
out[y,x,c] = v
|
|
65
|
+
return out
|
|
66
|
+
|
|
67
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
68
|
+
def blend_screen_numba(A, B, alpha):
|
|
69
|
+
H, W, C = A.shape
|
|
70
|
+
out = np.empty_like(A)
|
|
71
|
+
for y in prange(H):
|
|
72
|
+
for x in range(W):
|
|
73
|
+
for c in range(C):
|
|
74
|
+
# Screen: 1 - (1-A)*(1-B)
|
|
75
|
+
b = 1.0 - (1.0 - A[y,x,c]) * (1.0 - B[y,x,c])
|
|
76
|
+
if b < 0.0: b = 0.0
|
|
77
|
+
elif b > 1.0: b = 1.0
|
|
78
|
+
v = A[y,x,c] * (1.0 - alpha) + b * alpha
|
|
79
|
+
if v < 0.0: v = 0.0
|
|
80
|
+
elif v > 1.0: v = 1.0
|
|
81
|
+
out[y,x,c] = v
|
|
82
|
+
return out
|
|
83
|
+
|
|
84
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
85
|
+
def blend_overlay_numba(A, B, alpha):
|
|
86
|
+
H, W, C = A.shape
|
|
87
|
+
out = np.empty_like(A)
|
|
88
|
+
for y in prange(H):
|
|
89
|
+
for x in range(W):
|
|
90
|
+
for c in range(C):
|
|
91
|
+
a = A[y,x,c]
|
|
92
|
+
b_in = B[y,x,c]
|
|
93
|
+
# Overlay: if a < .5: 2*a*b, else: 1 - 2*(1-a)*(1-b)
|
|
94
|
+
if a <= 0.5:
|
|
95
|
+
b = 2.0 * a * b_in
|
|
96
|
+
else:
|
|
97
|
+
b = 1.0 - 2.0 * (1.0 - a) * (1.0 - b_in)
|
|
98
|
+
if b < 0.0: b = 0.0
|
|
99
|
+
elif b > 1.0: b = 1.0
|
|
100
|
+
v = a * (1.0 - alpha) + b * alpha
|
|
101
|
+
if v < 0.0: v = 0.0
|
|
102
|
+
elif v > 1.0: v = 1.0
|
|
103
|
+
out[y,x,c] = v
|
|
104
|
+
return out
|
|
105
|
+
|
|
106
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
107
|
+
def blend_difference_numba(A, B, alpha):
|
|
108
|
+
H, W, C = A.shape
|
|
109
|
+
out = np.empty_like(A)
|
|
110
|
+
for y in prange(H):
|
|
111
|
+
for x in range(W):
|
|
112
|
+
for c in range(C):
|
|
113
|
+
# Difference: |A - B|
|
|
114
|
+
b = A[y,x,c] - B[y,x,c]
|
|
115
|
+
if b < 0.0: b = -b
|
|
116
|
+
# clamp f(A,B) is redundant since abs() already >=0; we cap above 1
|
|
117
|
+
if b > 1.0: b = 1.0
|
|
118
|
+
v = A[y,x,c] * (1.0 - alpha) + b * alpha
|
|
119
|
+
if v < 0.0: v = 0.0
|
|
120
|
+
elif v > 1.0: v = 1.0
|
|
121
|
+
out[y,x,c] = v
|
|
122
|
+
return out
|
|
123
|
+
|
|
124
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
125
|
+
def rescale_image_numba(image, factor):
|
|
126
|
+
"""
|
|
127
|
+
Custom rescale function using bilinear interpolation optimized with numba.
|
|
128
|
+
Supports both mono (2D) and color (3D) images.
|
|
129
|
+
"""
|
|
130
|
+
if image.ndim == 2:
|
|
131
|
+
height, width = image.shape
|
|
132
|
+
new_width = int(width * factor)
|
|
133
|
+
new_height = int(height * factor)
|
|
134
|
+
output = np.zeros((new_height, new_width), dtype=np.float32)
|
|
135
|
+
for y in prange(new_height):
|
|
136
|
+
for x in prange(new_width):
|
|
137
|
+
src_x = x / factor
|
|
138
|
+
src_y = y / factor
|
|
139
|
+
x0, y0 = int(src_x), int(src_y)
|
|
140
|
+
x1 = x0 + 1 if x0 + 1 < width else width - 1
|
|
141
|
+
y1 = y0 + 1 if y0 + 1 < height else height - 1
|
|
142
|
+
dx = src_x - x0
|
|
143
|
+
dy = src_y - y0
|
|
144
|
+
output[y, x] = (image[y0, x0] * (1 - dx) * (1 - dy) +
|
|
145
|
+
image[y0, x1] * dx * (1 - dy) +
|
|
146
|
+
image[y1, x0] * (1 - dx) * dy +
|
|
147
|
+
image[y1, x1] * dx * dy)
|
|
148
|
+
return output
|
|
149
|
+
else:
|
|
150
|
+
height, width, channels = image.shape
|
|
151
|
+
new_width = int(width * factor)
|
|
152
|
+
new_height = int(height * factor)
|
|
153
|
+
output = np.zeros((new_height, new_width, channels), dtype=np.float32)
|
|
154
|
+
for y in prange(new_height):
|
|
155
|
+
for x in prange(new_width):
|
|
156
|
+
src_x = x / factor
|
|
157
|
+
src_y = y / factor
|
|
158
|
+
x0, y0 = int(src_x), int(src_y)
|
|
159
|
+
x1 = x0 + 1 if x0 + 1 < width else width - 1
|
|
160
|
+
y1 = y0 + 1 if y0 + 1 < height else height - 1
|
|
161
|
+
dx = src_x - x0
|
|
162
|
+
dy = src_y - y0
|
|
163
|
+
for c in range(channels):
|
|
164
|
+
output[y, x, c] = (image[y0, x0, c] * (1 - dx) * (1 - dy) +
|
|
165
|
+
image[y0, x1, c] * dx * (1 - dy) +
|
|
166
|
+
image[y1, x0, c] * (1 - dx) * dy +
|
|
167
|
+
image[y1, x1, c] * dx * dy)
|
|
168
|
+
return output
|
|
169
|
+
|
|
170
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
171
|
+
def bin2x2_numba(image):
|
|
172
|
+
"""
|
|
173
|
+
Downsample the image by 2×2 via simple averaging (“integer binning”).
|
|
174
|
+
Works on 2D (H×W) or 3D (H×W×C) arrays. If dimensions aren’t even,
|
|
175
|
+
the last row/column is dropped.
|
|
176
|
+
"""
|
|
177
|
+
h, w = image.shape[:2]
|
|
178
|
+
h2 = h // 2
|
|
179
|
+
w2 = w // 2
|
|
180
|
+
|
|
181
|
+
# allocate output
|
|
182
|
+
if image.ndim == 2:
|
|
183
|
+
out = np.empty((h2, w2), dtype=np.float32)
|
|
184
|
+
for i in prange(h2):
|
|
185
|
+
for j in prange(w2):
|
|
186
|
+
# average 2x2 block
|
|
187
|
+
s = image[2*i , 2*j ] \
|
|
188
|
+
+ image[2*i+1, 2*j ] \
|
|
189
|
+
+ image[2*i , 2*j+1] \
|
|
190
|
+
+ image[2*i+1, 2*j+1]
|
|
191
|
+
out[i, j] = s * 0.25
|
|
192
|
+
else:
|
|
193
|
+
c = image.shape[2]
|
|
194
|
+
out = np.empty((h2, w2, c), dtype=np.float32)
|
|
195
|
+
for i in prange(h2):
|
|
196
|
+
for j in prange(w2):
|
|
197
|
+
for k in range(c):
|
|
198
|
+
s = image[2*i , 2*j , k] \
|
|
199
|
+
+ image[2*i+1, 2*j , k] \
|
|
200
|
+
+ image[2*i , 2*j+1, k] \
|
|
201
|
+
+ image[2*i+1, 2*j+1, k]
|
|
202
|
+
out[i, j, k] = s * 0.25
|
|
203
|
+
|
|
204
|
+
return out
|
|
205
|
+
|
|
206
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
207
|
+
def flip_horizontal_numba(image):
|
|
208
|
+
"""
|
|
209
|
+
Flips an image horizontally using Numba JIT.
|
|
210
|
+
Works with both mono (2D) and color (3D) images.
|
|
211
|
+
"""
|
|
212
|
+
if image.ndim == 2:
|
|
213
|
+
height, width = image.shape
|
|
214
|
+
output = np.empty((height, width), dtype=image.dtype)
|
|
215
|
+
for y in prange(height):
|
|
216
|
+
for x in prange(width):
|
|
217
|
+
output[y, x] = image[y, width - x - 1]
|
|
218
|
+
return output
|
|
219
|
+
else:
|
|
220
|
+
height, width, channels = image.shape
|
|
221
|
+
output = np.empty((height, width, channels), dtype=image.dtype)
|
|
222
|
+
for y in prange(height):
|
|
223
|
+
for x in prange(width):
|
|
224
|
+
for c in range(channels):
|
|
225
|
+
output[y, x, c] = image[y, width - x - 1, c]
|
|
226
|
+
return output
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
230
|
+
def flip_vertical_numba(image):
|
|
231
|
+
"""
|
|
232
|
+
Flips an image vertically using Numba JIT.
|
|
233
|
+
Works with both mono (2D) and color (3D) images.
|
|
234
|
+
"""
|
|
235
|
+
if image.ndim == 2:
|
|
236
|
+
height, width = image.shape
|
|
237
|
+
output = np.empty((height, width), dtype=image.dtype)
|
|
238
|
+
for y in prange(height):
|
|
239
|
+
for x in prange(width):
|
|
240
|
+
output[y, x] = image[height - y - 1, x]
|
|
241
|
+
return output
|
|
242
|
+
else:
|
|
243
|
+
height, width, channels = image.shape
|
|
244
|
+
output = np.empty((height, width, channels), dtype=image.dtype)
|
|
245
|
+
for y in prange(height):
|
|
246
|
+
for x in prange(width):
|
|
247
|
+
for c in range(channels):
|
|
248
|
+
output[y, x, c] = image[height - y - 1, x, c]
|
|
249
|
+
return output
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
253
|
+
def rotate_90_clockwise_numba(image):
|
|
254
|
+
"""
|
|
255
|
+
Rotates the image 90 degrees clockwise.
|
|
256
|
+
Works with both mono (2D) and color (3D) images.
|
|
257
|
+
"""
|
|
258
|
+
if image.ndim == 2:
|
|
259
|
+
height, width = image.shape
|
|
260
|
+
output = np.empty((width, height), dtype=image.dtype)
|
|
261
|
+
for y in prange(height):
|
|
262
|
+
for x in prange(width):
|
|
263
|
+
output[x, height - 1 - y] = image[y, x]
|
|
264
|
+
return output
|
|
265
|
+
else:
|
|
266
|
+
height, width, channels = image.shape
|
|
267
|
+
output = np.empty((width, height, channels), dtype=image.dtype)
|
|
268
|
+
for y in prange(height):
|
|
269
|
+
for x in prange(width):
|
|
270
|
+
for c in range(channels):
|
|
271
|
+
output[x, height - 1 - y, c] = image[y, x, c]
|
|
272
|
+
return output
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
276
|
+
def rotate_90_counterclockwise_numba(image):
|
|
277
|
+
"""
|
|
278
|
+
Rotates the image 90 degrees counterclockwise.
|
|
279
|
+
Works with both mono (2D) and color (3D) images.
|
|
280
|
+
"""
|
|
281
|
+
if image.ndim == 2:
|
|
282
|
+
height, width = image.shape
|
|
283
|
+
output = np.empty((width, height), dtype=image.dtype)
|
|
284
|
+
for y in prange(height):
|
|
285
|
+
for x in prange(width):
|
|
286
|
+
output[width - 1 - x, y] = image[y, x]
|
|
287
|
+
return output
|
|
288
|
+
else:
|
|
289
|
+
height, width, channels = image.shape
|
|
290
|
+
output = np.empty((width, height, channels), dtype=image.dtype)
|
|
291
|
+
for y in prange(height):
|
|
292
|
+
for x in prange(width):
|
|
293
|
+
for c in range(channels):
|
|
294
|
+
output[width - 1 - x, y, c] = image[y, x, c]
|
|
295
|
+
return output
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
299
|
+
def invert_image_numba(image):
|
|
300
|
+
"""
|
|
301
|
+
Inverts an image (1 - pixel value) using Numba JIT.
|
|
302
|
+
Works with both mono (2D) and color (3D) images.
|
|
303
|
+
"""
|
|
304
|
+
if image.ndim == 2:
|
|
305
|
+
height, width = image.shape
|
|
306
|
+
output = np.empty((height, width), dtype=image.dtype)
|
|
307
|
+
for y in prange(height):
|
|
308
|
+
for x in prange(width):
|
|
309
|
+
output[y, x] = 1.0 - image[y, x]
|
|
310
|
+
return output
|
|
311
|
+
else:
|
|
312
|
+
height, width, channels = image.shape
|
|
313
|
+
output = np.empty((height, width, channels), dtype=image.dtype)
|
|
314
|
+
for y in prange(height):
|
|
315
|
+
for x in prange(width):
|
|
316
|
+
for c in range(channels):
|
|
317
|
+
output[y, x, c] = 1.0 - image[y, x, c]
|
|
318
|
+
return output
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
323
|
+
def apply_flat_division_numba_2d(image, master_flat, master_bias=None):
|
|
324
|
+
"""
|
|
325
|
+
Mono version: image.shape == (H,W)
|
|
326
|
+
"""
|
|
327
|
+
if master_bias is not None:
|
|
328
|
+
master_flat = master_flat - master_bias
|
|
329
|
+
image = image - master_bias
|
|
330
|
+
|
|
331
|
+
median_flat = np.mean(master_flat)
|
|
332
|
+
height, width = image.shape
|
|
333
|
+
|
|
334
|
+
for y in prange(height):
|
|
335
|
+
for x in range(width):
|
|
336
|
+
image[y, x] /= (master_flat[y, x] / median_flat)
|
|
337
|
+
|
|
338
|
+
return image
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
342
|
+
def apply_flat_division_numba_3d(image, master_flat, master_bias=None):
|
|
343
|
+
"""
|
|
344
|
+
Color version: image.shape == (H,W,C)
|
|
345
|
+
"""
|
|
346
|
+
if master_bias is not None:
|
|
347
|
+
master_flat = master_flat - master_bias
|
|
348
|
+
image = image - master_bias
|
|
349
|
+
|
|
350
|
+
median_flat = np.mean(master_flat)
|
|
351
|
+
height, width, channels = image.shape
|
|
352
|
+
|
|
353
|
+
for y in prange(height):
|
|
354
|
+
for x in range(width):
|
|
355
|
+
for c in range(channels):
|
|
356
|
+
image[y, x, c] /= (master_flat[y, x, c] / median_flat)
|
|
357
|
+
|
|
358
|
+
return image
|
|
359
|
+
|
|
360
|
+
def apply_flat_division_numba(image, master_flat, master_bias=None):
|
|
361
|
+
"""
|
|
362
|
+
Dispatcher that calls the correct Numba function
|
|
363
|
+
depending on whether 'image' is 2D or 3D.
|
|
364
|
+
"""
|
|
365
|
+
if image.ndim == 2:
|
|
366
|
+
# Mono
|
|
367
|
+
return apply_flat_division_numba_2d(image, master_flat, master_bias)
|
|
368
|
+
elif image.ndim == 3:
|
|
369
|
+
# Color
|
|
370
|
+
return apply_flat_division_numba_3d(image, master_flat, master_bias)
|
|
371
|
+
else:
|
|
372
|
+
raise ValueError(f"apply_flat_division_numba: expected 2D or 3D, got shape {image.shape}")
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
@njit(parallel=True, cache=True)
|
|
376
|
+
def subtract_dark_3d(frames, dark_frame):
|
|
377
|
+
"""
|
|
378
|
+
For mono stack:
|
|
379
|
+
frames.shape == (F,H,W)
|
|
380
|
+
dark_frame.shape == (H,W)
|
|
381
|
+
Returns the same shape (F,H,W).
|
|
382
|
+
"""
|
|
383
|
+
num_frames, height, width = frames.shape
|
|
384
|
+
result = np.empty_like(frames, dtype=np.float32)
|
|
385
|
+
|
|
386
|
+
for i in prange(num_frames):
|
|
387
|
+
# Subtract the dark frame from each 2D slice
|
|
388
|
+
result[i] = frames[i] - dark_frame
|
|
389
|
+
|
|
390
|
+
return result
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
@njit(parallel=True, cache=True)
|
|
394
|
+
def subtract_dark_4d(frames, dark_frame):
|
|
395
|
+
"""
|
|
396
|
+
For color stack:
|
|
397
|
+
frames.shape == (F,H,W,C)
|
|
398
|
+
dark_frame.shape == (H,W,C)
|
|
399
|
+
Returns the same shape (F,H,W,C).
|
|
400
|
+
"""
|
|
401
|
+
num_frames, height, width, channels = frames.shape
|
|
402
|
+
result = np.empty_like(frames, dtype=np.float32)
|
|
403
|
+
|
|
404
|
+
for i in prange(num_frames):
|
|
405
|
+
for y in range(height):
|
|
406
|
+
for x in range(width):
|
|
407
|
+
for c in range(channels):
|
|
408
|
+
result[i, y, x, c] = frames[i, y, x, c] - dark_frame[y, x, c]
|
|
409
|
+
|
|
410
|
+
return result
|
|
411
|
+
|
|
412
|
+
def subtract_dark(frames, dark_frame):
|
|
413
|
+
"""
|
|
414
|
+
Dispatcher function that calls the correct Numba function
|
|
415
|
+
depending on whether 'frames' is 3D or 4D.
|
|
416
|
+
"""
|
|
417
|
+
if frames.ndim == 3:
|
|
418
|
+
# frames: (F,H,W), dark_frame: (H,W)
|
|
419
|
+
return subtract_dark_3d(frames, dark_frame)
|
|
420
|
+
elif frames.ndim == 4:
|
|
421
|
+
# frames: (F,H,W,C), dark_frame: (H,W,C)
|
|
422
|
+
return subtract_dark_4d(frames, dark_frame)
|
|
423
|
+
else:
|
|
424
|
+
raise ValueError(f"subtract_dark: frames must be 3D or 4D, got {frames.shape}")
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
import numpy as np
|
|
428
|
+
from numba import njit, prange
|
|
429
|
+
|
|
430
|
+
# -------------------------------
|
|
431
|
+
# Windsorized Sigma Clipping (Weighted, Iterative)
|
|
432
|
+
# -------------------------------
|
|
433
|
+
|
|
434
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
435
|
+
def windsorized_sigma_clip_weighted_3d_iter(stack, weights, lower=2.5, upper=2.5, iterations=2):
|
|
436
|
+
"""
|
|
437
|
+
Iterative Weighted Windsorized Sigma Clipping for a 3D mono stack.
|
|
438
|
+
stack.shape == (F,H,W)
|
|
439
|
+
weights.shape can be (F,) or (F,H,W).
|
|
440
|
+
Returns a tuple:
|
|
441
|
+
(clipped, rejection_mask)
|
|
442
|
+
where:
|
|
443
|
+
clipped is a 2D image (H,W),
|
|
444
|
+
rejection_mask is a boolean array of shape (F,H,W) with True indicating rejection.
|
|
445
|
+
"""
|
|
446
|
+
num_frames, height, width = stack.shape
|
|
447
|
+
clipped = np.zeros((height, width), dtype=np.float32)
|
|
448
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
449
|
+
|
|
450
|
+
# Check weights shape
|
|
451
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
452
|
+
pass
|
|
453
|
+
elif weights.ndim == 3 and weights.shape == stack.shape:
|
|
454
|
+
pass
|
|
455
|
+
else:
|
|
456
|
+
raise ValueError("windsorized_sigma_clip_weighted_3d_iter: mismatch in shapes for 3D stack & weights")
|
|
457
|
+
|
|
458
|
+
for i in prange(height):
|
|
459
|
+
for j in range(width):
|
|
460
|
+
pixel_values = stack[:, i, j] # shape=(F,)
|
|
461
|
+
if weights.ndim == 1:
|
|
462
|
+
pixel_weights = weights[:] # shape (F,)
|
|
463
|
+
else:
|
|
464
|
+
pixel_weights = weights[:, i, j]
|
|
465
|
+
# Start with nonzero pixels as valid
|
|
466
|
+
valid_mask = pixel_values != 0
|
|
467
|
+
for _ in range(iterations):
|
|
468
|
+
if np.sum(valid_mask) == 0:
|
|
469
|
+
break
|
|
470
|
+
valid_vals = pixel_values[valid_mask]
|
|
471
|
+
median_val = np.median(valid_vals)
|
|
472
|
+
std_dev = np.std(valid_vals)
|
|
473
|
+
lower_bound = median_val - lower * std_dev
|
|
474
|
+
upper_bound = median_val + upper * std_dev
|
|
475
|
+
valid_mask = valid_mask & (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
|
|
476
|
+
# Record rejections: a pixel is rejected if not valid.
|
|
477
|
+
for f in range(num_frames):
|
|
478
|
+
rej_mask[f, i, j] = not valid_mask[f]
|
|
479
|
+
valid_vals = pixel_values[valid_mask]
|
|
480
|
+
valid_w = pixel_weights[valid_mask]
|
|
481
|
+
wsum = np.sum(valid_w)
|
|
482
|
+
if wsum > 0:
|
|
483
|
+
clipped[i, j] = np.sum(valid_vals * valid_w) / wsum
|
|
484
|
+
else:
|
|
485
|
+
nonzero = pixel_values[pixel_values != 0]
|
|
486
|
+
if nonzero.size > 0:
|
|
487
|
+
clipped[i, j] = np.median(nonzero)
|
|
488
|
+
else:
|
|
489
|
+
clipped[i, j] = 0.0
|
|
490
|
+
return clipped, rej_mask
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
494
|
+
def windsorized_sigma_clip_weighted_4d_iter(stack, weights, lower=2.5, upper=2.5, iterations=2):
|
|
495
|
+
"""
|
|
496
|
+
Iterative Weighted Windsorized Sigma Clipping for a 4D color stack.
|
|
497
|
+
stack.shape == (F,H,W,C)
|
|
498
|
+
weights.shape can be (F,) or (F,H,W,C).
|
|
499
|
+
Returns a tuple:
|
|
500
|
+
(clipped, rejection_mask)
|
|
501
|
+
where:
|
|
502
|
+
clipped is a 3D image (H,W,C),
|
|
503
|
+
rejection_mask is a boolean array of shape (F,H,W,C).
|
|
504
|
+
"""
|
|
505
|
+
num_frames, height, width, channels = stack.shape
|
|
506
|
+
clipped = np.zeros((height, width, channels), dtype=np.float32)
|
|
507
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
508
|
+
|
|
509
|
+
# Check weights shape
|
|
510
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
511
|
+
pass
|
|
512
|
+
elif weights.ndim == 4 and weights.shape == stack.shape:
|
|
513
|
+
pass
|
|
514
|
+
else:
|
|
515
|
+
raise ValueError("windsorized_sigma_clip_weighted_4d_iter: mismatch in shapes for 4D stack & weights")
|
|
516
|
+
|
|
517
|
+
for i in prange(height):
|
|
518
|
+
for j in range(width):
|
|
519
|
+
for c in range(channels):
|
|
520
|
+
pixel_values = stack[:, i, j, c] # shape=(F,)
|
|
521
|
+
if weights.ndim == 1:
|
|
522
|
+
pixel_weights = weights[:]
|
|
523
|
+
else:
|
|
524
|
+
pixel_weights = weights[:, i, j, c]
|
|
525
|
+
valid_mask = pixel_values != 0
|
|
526
|
+
for _ in range(iterations):
|
|
527
|
+
if np.sum(valid_mask) == 0:
|
|
528
|
+
break
|
|
529
|
+
valid_vals = pixel_values[valid_mask]
|
|
530
|
+
median_val = np.median(valid_vals)
|
|
531
|
+
std_dev = np.std(valid_vals)
|
|
532
|
+
lower_bound = median_val - lower * std_dev
|
|
533
|
+
upper_bound = median_val + upper * std_dev
|
|
534
|
+
valid_mask = valid_mask & (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
|
|
535
|
+
for f in range(num_frames):
|
|
536
|
+
rej_mask[f, i, j, c] = not valid_mask[f]
|
|
537
|
+
valid_vals = pixel_values[valid_mask]
|
|
538
|
+
valid_w = pixel_weights[valid_mask]
|
|
539
|
+
wsum = np.sum(valid_w)
|
|
540
|
+
if wsum > 0:
|
|
541
|
+
clipped[i, j, c] = np.sum(valid_vals * valid_w) / wsum
|
|
542
|
+
else:
|
|
543
|
+
nonzero = pixel_values[pixel_values != 0]
|
|
544
|
+
if nonzero.size > 0:
|
|
545
|
+
clipped[i, j, c] = np.median(nonzero)
|
|
546
|
+
else:
|
|
547
|
+
clipped[i, j, c] = 0.0
|
|
548
|
+
return clipped, rej_mask
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
def windsorized_sigma_clip_weighted(stack, weights, lower=2.5, upper=2.5, iterations=2):
|
|
552
|
+
"""
|
|
553
|
+
Dispatcher that calls the appropriate iterative Numba function.
|
|
554
|
+
Now returns (clipped, rejection_mask).
|
|
555
|
+
"""
|
|
556
|
+
if stack.ndim == 3:
|
|
557
|
+
return windsorized_sigma_clip_weighted_3d_iter(stack, weights, lower, upper, iterations)
|
|
558
|
+
elif stack.ndim == 4:
|
|
559
|
+
return windsorized_sigma_clip_weighted_4d_iter(stack, weights, lower, upper, iterations)
|
|
560
|
+
else:
|
|
561
|
+
raise ValueError(f"windsorized_sigma_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
# -------------------------------
|
|
565
|
+
# Kappa-Sigma Clipping (Weighted)
|
|
566
|
+
# -------------------------------
|
|
567
|
+
|
|
568
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
569
|
+
def kappa_sigma_clip_weighted_3d(stack, weights, kappa=2.5, iterations=3):
|
|
570
|
+
"""
|
|
571
|
+
Kappa-Sigma Clipping for a 3D mono stack.
|
|
572
|
+
stack.shape == (F,H,W)
|
|
573
|
+
Returns a tuple: (clipped, rejection_mask)
|
|
574
|
+
where rejection_mask is of shape (F,H,W) indicating per-frame rejections.
|
|
575
|
+
"""
|
|
576
|
+
num_frames, height, width = stack.shape
|
|
577
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
578
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
579
|
+
|
|
580
|
+
for i in prange(height):
|
|
581
|
+
for j in range(width):
|
|
582
|
+
pixel_values = stack[:, i, j].copy()
|
|
583
|
+
if weights.ndim == 1:
|
|
584
|
+
pixel_weights = weights[:]
|
|
585
|
+
else:
|
|
586
|
+
pixel_weights = weights[:, i, j].copy()
|
|
587
|
+
|
|
588
|
+
# Use boolean mask instead of tracking indices
|
|
589
|
+
valid_mask = pixel_values != 0
|
|
590
|
+
|
|
591
|
+
med = 0.0
|
|
592
|
+
for _ in range(iterations):
|
|
593
|
+
# Count valid pixels
|
|
594
|
+
count = 0
|
|
595
|
+
for k in range(num_frames):
|
|
596
|
+
if valid_mask[k]:
|
|
597
|
+
count += 1
|
|
598
|
+
|
|
599
|
+
if count == 0:
|
|
600
|
+
break
|
|
601
|
+
|
|
602
|
+
# Extract valid values for stats (this allocation is unavoidable but smaller/temp)
|
|
603
|
+
# In numba this usually lowers to efficient code if we avoid 'np.empty' overhead inside loops
|
|
604
|
+
# but pure mask operations are often faster.
|
|
605
|
+
# However, for median/std we need the compacted array.
|
|
606
|
+
current_vals = pixel_values[valid_mask]
|
|
607
|
+
|
|
608
|
+
med = np.median(current_vals)
|
|
609
|
+
std = np.std(current_vals)
|
|
610
|
+
lower_bound = med - kappa * std
|
|
611
|
+
upper_bound = med + kappa * std
|
|
612
|
+
|
|
613
|
+
# Update mask: must be valid AND within bounds
|
|
614
|
+
for k in range(num_frames):
|
|
615
|
+
if valid_mask[k]:
|
|
616
|
+
val = pixel_values[k]
|
|
617
|
+
if val < lower_bound or val > upper_bound:
|
|
618
|
+
valid_mask[k] = False
|
|
619
|
+
|
|
620
|
+
# Fill rejection mask
|
|
621
|
+
for f in range(num_frames):
|
|
622
|
+
rej_mask[f, i, j] = not valid_mask[f]
|
|
623
|
+
|
|
624
|
+
# Compute weighted mean of final valid pixels
|
|
625
|
+
wsum = 0.0
|
|
626
|
+
vsum = 0.0
|
|
627
|
+
for k in range(num_frames):
|
|
628
|
+
if valid_mask[k]:
|
|
629
|
+
w = pixel_weights[k]
|
|
630
|
+
v = pixel_values[k]
|
|
631
|
+
wsum += w
|
|
632
|
+
vsum += v * w
|
|
633
|
+
|
|
634
|
+
if wsum > 0:
|
|
635
|
+
clipped[i, j] = vsum / wsum
|
|
636
|
+
else:
|
|
637
|
+
clipped[i, j] = med
|
|
638
|
+
return clipped, rej_mask
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
642
|
+
def kappa_sigma_clip_weighted_4d(stack, weights, kappa=2.5, iterations=3):
|
|
643
|
+
"""
|
|
644
|
+
Kappa-Sigma Clipping for a 4D color stack.
|
|
645
|
+
stack.shape == (F,H,W,C)
|
|
646
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
|
|
647
|
+
"""
|
|
648
|
+
num_frames, height, width, channels = stack.shape
|
|
649
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
650
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
651
|
+
|
|
652
|
+
for i in prange(height):
|
|
653
|
+
for j in range(width):
|
|
654
|
+
for c in range(channels):
|
|
655
|
+
pixel_values = stack[:, i, j, c].copy()
|
|
656
|
+
if weights.ndim == 1:
|
|
657
|
+
pixel_weights = weights[:]
|
|
658
|
+
else:
|
|
659
|
+
pixel_weights = weights[:, i, j, c].copy()
|
|
660
|
+
|
|
661
|
+
valid_mask = pixel_values != 0
|
|
662
|
+
|
|
663
|
+
med = 0.0
|
|
664
|
+
for _ in range(iterations):
|
|
665
|
+
count = 0
|
|
666
|
+
for k in range(num_frames):
|
|
667
|
+
if valid_mask[k]:
|
|
668
|
+
count += 1
|
|
669
|
+
|
|
670
|
+
if count == 0:
|
|
671
|
+
break
|
|
672
|
+
|
|
673
|
+
current_vals = pixel_values[valid_mask]
|
|
674
|
+
|
|
675
|
+
med = np.median(current_vals)
|
|
676
|
+
std = np.std(current_vals)
|
|
677
|
+
lower_bound = med - kappa * std
|
|
678
|
+
upper_bound = med + kappa * std
|
|
679
|
+
|
|
680
|
+
for k in range(num_frames):
|
|
681
|
+
if valid_mask[k]:
|
|
682
|
+
val = pixel_values[k]
|
|
683
|
+
if val < lower_bound or val > upper_bound:
|
|
684
|
+
valid_mask[k] = False
|
|
685
|
+
|
|
686
|
+
for f in range(num_frames):
|
|
687
|
+
rej_mask[f, i, j, c] = not valid_mask[f]
|
|
688
|
+
|
|
689
|
+
wsum = 0.0
|
|
690
|
+
vsum = 0.0
|
|
691
|
+
for k in range(num_frames):
|
|
692
|
+
if valid_mask[k]:
|
|
693
|
+
w = pixel_weights[k]
|
|
694
|
+
v = pixel_values[k]
|
|
695
|
+
wsum += w
|
|
696
|
+
vsum += v * w
|
|
697
|
+
|
|
698
|
+
if wsum > 0:
|
|
699
|
+
clipped[i, j, c] = vsum / wsum
|
|
700
|
+
else:
|
|
701
|
+
clipped[i, j, c] = med
|
|
702
|
+
return clipped, rej_mask
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
def kappa_sigma_clip_weighted(stack, weights, kappa=2.5, iterations=3):
|
|
706
|
+
"""
|
|
707
|
+
Dispatcher that returns (clipped, rejection_mask) for kappa-sigma clipping.
|
|
708
|
+
"""
|
|
709
|
+
if stack.ndim == 3:
|
|
710
|
+
return kappa_sigma_clip_weighted_3d(stack, weights, kappa, iterations)
|
|
711
|
+
elif stack.ndim == 4:
|
|
712
|
+
return kappa_sigma_clip_weighted_4d(stack, weights, kappa, iterations)
|
|
713
|
+
else:
|
|
714
|
+
raise ValueError(f"kappa_sigma_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
715
|
+
|
|
716
|
+
|
|
717
|
+
# -------------------------------
|
|
718
|
+
# Trimmed Mean (Weighted)
|
|
719
|
+
# -------------------------------
|
|
720
|
+
|
|
721
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
722
|
+
def trimmed_mean_weighted_3d(stack, weights, trim_fraction=0.1):
|
|
723
|
+
"""
|
|
724
|
+
Trimmed Mean for a 3D mono stack.
|
|
725
|
+
stack.shape == (F,H,W)
|
|
726
|
+
Returns (clipped, rejection_mask) where rejection_mask (F,H,W) flags frames that were trimmed.
|
|
727
|
+
"""
|
|
728
|
+
num_frames, height, width = stack.shape
|
|
729
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
730
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
731
|
+
|
|
732
|
+
for i in prange(height):
|
|
733
|
+
for j in range(width):
|
|
734
|
+
pix_all = stack[:, i, j]
|
|
735
|
+
if weights.ndim == 1:
|
|
736
|
+
w_all = weights[:]
|
|
737
|
+
else:
|
|
738
|
+
w_all = weights[:, i, j]
|
|
739
|
+
# Exclude zeros and record original indices.
|
|
740
|
+
valid = pix_all != 0
|
|
741
|
+
pix = pix_all[valid]
|
|
742
|
+
w = w_all[valid]
|
|
743
|
+
orig_idx = np.empty(pix_all.shape[0], dtype=np.int64)
|
|
744
|
+
count = 0
|
|
745
|
+
for f in range(num_frames):
|
|
746
|
+
if valid[f]:
|
|
747
|
+
orig_idx[count] = f
|
|
748
|
+
count += 1
|
|
749
|
+
n = pix.size
|
|
750
|
+
if n == 0:
|
|
751
|
+
clipped[i, j] = 0.0
|
|
752
|
+
# Mark all as rejected.
|
|
753
|
+
for f in range(num_frames):
|
|
754
|
+
if not valid[f]:
|
|
755
|
+
rej_mask[f, i, j] = True
|
|
756
|
+
continue
|
|
757
|
+
trim = int(trim_fraction * n)
|
|
758
|
+
order = np.argsort(pix)
|
|
759
|
+
# Determine which indices (in the valid list) are kept.
|
|
760
|
+
if n > 2 * trim:
|
|
761
|
+
keep_order = order[trim:n - trim]
|
|
762
|
+
else:
|
|
763
|
+
keep_order = order
|
|
764
|
+
# Build a mask for the valid pixels (length n) that are kept.
|
|
765
|
+
keep_mask = np.zeros(n, dtype=np.bool_)
|
|
766
|
+
for k in range(keep_order.size):
|
|
767
|
+
keep_mask[keep_order[k]] = True
|
|
768
|
+
# Map back to original frame indices.
|
|
769
|
+
for idx in range(n):
|
|
770
|
+
frame = orig_idx[idx]
|
|
771
|
+
if not keep_mask[idx]:
|
|
772
|
+
rej_mask[frame, i, j] = True
|
|
773
|
+
else:
|
|
774
|
+
rej_mask[frame, i, j] = False
|
|
775
|
+
# Compute weighted average of kept values.
|
|
776
|
+
sorted_pix = pix[order]
|
|
777
|
+
sorted_w = w[order]
|
|
778
|
+
if n > 2 * trim:
|
|
779
|
+
trimmed_values = sorted_pix[trim:n - trim]
|
|
780
|
+
trimmed_weights = sorted_w[trim:n - trim]
|
|
781
|
+
else:
|
|
782
|
+
trimmed_values = sorted_pix
|
|
783
|
+
trimmed_weights = sorted_w
|
|
784
|
+
wsum = trimmed_weights.sum()
|
|
785
|
+
if wsum > 0:
|
|
786
|
+
clipped[i, j] = np.sum(trimmed_values * trimmed_weights) / wsum
|
|
787
|
+
else:
|
|
788
|
+
clipped[i, j] = np.median(trimmed_values)
|
|
789
|
+
return clipped, rej_mask
|
|
790
|
+
|
|
791
|
+
|
|
792
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
793
|
+
def trimmed_mean_weighted_4d(stack, weights, trim_fraction=0.1):
|
|
794
|
+
"""
|
|
795
|
+
Trimmed Mean for a 4D color stack.
|
|
796
|
+
stack.shape == (F,H,W,C)
|
|
797
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
|
|
798
|
+
"""
|
|
799
|
+
num_frames, height, width, channels = stack.shape
|
|
800
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
801
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
802
|
+
|
|
803
|
+
for i in prange(height):
|
|
804
|
+
for j in range(width):
|
|
805
|
+
for c in range(channels):
|
|
806
|
+
pix_all = stack[:, i, j, c]
|
|
807
|
+
if weights.ndim == 1:
|
|
808
|
+
w_all = weights[:]
|
|
809
|
+
else:
|
|
810
|
+
w_all = weights[:, i, j, c]
|
|
811
|
+
valid = pix_all != 0
|
|
812
|
+
pix = pix_all[valid]
|
|
813
|
+
w = w_all[valid]
|
|
814
|
+
orig_idx = np.empty(pix_all.shape[0], dtype=np.int64)
|
|
815
|
+
count = 0
|
|
816
|
+
for f in range(num_frames):
|
|
817
|
+
if valid[f]:
|
|
818
|
+
orig_idx[count] = f
|
|
819
|
+
count += 1
|
|
820
|
+
n = pix.size
|
|
821
|
+
if n == 0:
|
|
822
|
+
clipped[i, j, c] = 0.0
|
|
823
|
+
for f in range(num_frames):
|
|
824
|
+
if not valid[f]:
|
|
825
|
+
rej_mask[f, i, j, c] = True
|
|
826
|
+
continue
|
|
827
|
+
trim = int(trim_fraction * n)
|
|
828
|
+
order = np.argsort(pix)
|
|
829
|
+
if n > 2 * trim:
|
|
830
|
+
keep_order = order[trim:n - trim]
|
|
831
|
+
else:
|
|
832
|
+
keep_order = order
|
|
833
|
+
keep_mask = np.zeros(n, dtype=np.bool_)
|
|
834
|
+
for k in range(keep_order.size):
|
|
835
|
+
keep_mask[keep_order[k]] = True
|
|
836
|
+
for idx in range(n):
|
|
837
|
+
frame = orig_idx[idx]
|
|
838
|
+
if not keep_mask[idx]:
|
|
839
|
+
rej_mask[frame, i, j, c] = True
|
|
840
|
+
else:
|
|
841
|
+
rej_mask[frame, i, j, c] = False
|
|
842
|
+
sorted_pix = pix[order]
|
|
843
|
+
sorted_w = w[order]
|
|
844
|
+
if n > 2 * trim:
|
|
845
|
+
trimmed_values = sorted_pix[trim:n - trim]
|
|
846
|
+
trimmed_weights = sorted_w[trim:n - trim]
|
|
847
|
+
else:
|
|
848
|
+
trimmed_values = sorted_pix
|
|
849
|
+
trimmed_weights = sorted_w
|
|
850
|
+
wsum = trimmed_weights.sum()
|
|
851
|
+
if wsum > 0:
|
|
852
|
+
clipped[i, j, c] = np.sum(trimmed_values * trimmed_weights) / wsum
|
|
853
|
+
else:
|
|
854
|
+
clipped[i, j, c] = np.median(trimmed_values)
|
|
855
|
+
return clipped, rej_mask
|
|
856
|
+
|
|
857
|
+
|
|
858
|
+
def trimmed_mean_weighted(stack, weights, trim_fraction=0.1):
|
|
859
|
+
"""
|
|
860
|
+
Dispatcher that returns (clipped, rejection_mask) for trimmed mean.
|
|
861
|
+
"""
|
|
862
|
+
if stack.ndim == 3:
|
|
863
|
+
return trimmed_mean_weighted_3d(stack, weights, trim_fraction)
|
|
864
|
+
elif stack.ndim == 4:
|
|
865
|
+
return trimmed_mean_weighted_4d(stack, weights, trim_fraction)
|
|
866
|
+
else:
|
|
867
|
+
raise ValueError(f"trimmed_mean_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
868
|
+
|
|
869
|
+
|
|
870
|
+
# -------------------------------
|
|
871
|
+
# Extreme Studentized Deviate (ESD) Clipping (Weighted)
|
|
872
|
+
# -------------------------------
|
|
873
|
+
|
|
874
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
875
|
+
def esd_clip_weighted_3d(stack, weights, threshold=3.0):
|
|
876
|
+
"""
|
|
877
|
+
ESD Clipping for a 3D mono stack.
|
|
878
|
+
stack.shape == (F,H,W)
|
|
879
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W).
|
|
880
|
+
"""
|
|
881
|
+
num_frames, height, width = stack.shape
|
|
882
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
883
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
884
|
+
|
|
885
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
886
|
+
pass
|
|
887
|
+
elif weights.ndim == 3 and weights.shape == stack.shape:
|
|
888
|
+
pass
|
|
889
|
+
else:
|
|
890
|
+
raise ValueError("esd_clip_weighted_3d: mismatch in shapes for 3D stack & weights")
|
|
891
|
+
|
|
892
|
+
for i in prange(height):
|
|
893
|
+
for j in range(width):
|
|
894
|
+
pix = stack[:, i, j]
|
|
895
|
+
if weights.ndim == 1:
|
|
896
|
+
w = weights[:]
|
|
897
|
+
else:
|
|
898
|
+
w = weights[:, i, j]
|
|
899
|
+
valid = pix != 0
|
|
900
|
+
values = pix[valid]
|
|
901
|
+
wvals = w[valid]
|
|
902
|
+
if values.size == 0:
|
|
903
|
+
clipped[i, j] = 0.0
|
|
904
|
+
for f in range(num_frames):
|
|
905
|
+
if not valid[f]:
|
|
906
|
+
rej_mask[f, i, j] = True
|
|
907
|
+
continue
|
|
908
|
+
mean_val = np.mean(values)
|
|
909
|
+
std_val = np.std(values)
|
|
910
|
+
if std_val == 0:
|
|
911
|
+
clipped[i, j] = mean_val
|
|
912
|
+
for f in range(num_frames):
|
|
913
|
+
rej_mask[f, i, j] = False
|
|
914
|
+
continue
|
|
915
|
+
z_scores = np.abs((values - mean_val) / std_val)
|
|
916
|
+
valid2 = z_scores < threshold
|
|
917
|
+
# Mark rejected: for the valid entries, use valid2.
|
|
918
|
+
idx = 0
|
|
919
|
+
for f in range(num_frames):
|
|
920
|
+
if valid[f]:
|
|
921
|
+
if not valid2[idx]:
|
|
922
|
+
rej_mask[f, i, j] = True
|
|
923
|
+
else:
|
|
924
|
+
rej_mask[f, i, j] = False
|
|
925
|
+
idx += 1
|
|
926
|
+
else:
|
|
927
|
+
rej_mask[f, i, j] = True
|
|
928
|
+
values = values[valid2]
|
|
929
|
+
wvals = wvals[valid2]
|
|
930
|
+
wsum = wvals.sum()
|
|
931
|
+
if wsum > 0:
|
|
932
|
+
clipped[i, j] = np.sum(values * wvals) / wsum
|
|
933
|
+
else:
|
|
934
|
+
clipped[i, j] = mean_val
|
|
935
|
+
return clipped, rej_mask
|
|
936
|
+
|
|
937
|
+
|
|
938
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
939
|
+
def esd_clip_weighted_4d(stack, weights, threshold=3.0):
|
|
940
|
+
"""
|
|
941
|
+
ESD Clipping for a 4D color stack.
|
|
942
|
+
stack.shape == (F,H,W,C)
|
|
943
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
|
|
944
|
+
"""
|
|
945
|
+
num_frames, height, width, channels = stack.shape
|
|
946
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
947
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
948
|
+
|
|
949
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
950
|
+
pass
|
|
951
|
+
elif weights.ndim == 4 and weights.shape == stack.shape:
|
|
952
|
+
pass
|
|
953
|
+
else:
|
|
954
|
+
raise ValueError("esd_clip_weighted_4d: mismatch in shapes for 4D stack & weights")
|
|
955
|
+
|
|
956
|
+
for i in prange(height):
|
|
957
|
+
for j in range(width):
|
|
958
|
+
for c in range(channels):
|
|
959
|
+
pix = stack[:, i, j, c]
|
|
960
|
+
if weights.ndim == 1:
|
|
961
|
+
w = weights[:]
|
|
962
|
+
else:
|
|
963
|
+
w = weights[:, i, j, c]
|
|
964
|
+
valid = pix != 0
|
|
965
|
+
values = pix[valid]
|
|
966
|
+
wvals = w[valid]
|
|
967
|
+
if values.size == 0:
|
|
968
|
+
clipped[i, j, c] = 0.0
|
|
969
|
+
for f in range(num_frames):
|
|
970
|
+
if not valid[f]:
|
|
971
|
+
rej_mask[f, i, j, c] = True
|
|
972
|
+
continue
|
|
973
|
+
mean_val = np.mean(values)
|
|
974
|
+
std_val = np.std(values)
|
|
975
|
+
if std_val == 0:
|
|
976
|
+
clipped[i, j, c] = mean_val
|
|
977
|
+
for f in range(num_frames):
|
|
978
|
+
rej_mask[f, i, j, c] = False
|
|
979
|
+
continue
|
|
980
|
+
z_scores = np.abs((values - mean_val) / std_val)
|
|
981
|
+
valid2 = z_scores < threshold
|
|
982
|
+
idx = 0
|
|
983
|
+
for f in range(num_frames):
|
|
984
|
+
if valid[f]:
|
|
985
|
+
if not valid2[idx]:
|
|
986
|
+
rej_mask[f, i, j, c] = True
|
|
987
|
+
else:
|
|
988
|
+
rej_mask[f, i, j, c] = False
|
|
989
|
+
idx += 1
|
|
990
|
+
else:
|
|
991
|
+
rej_mask[f, i, j, c] = True
|
|
992
|
+
values = values[valid2]
|
|
993
|
+
wvals = wvals[valid2]
|
|
994
|
+
wsum = wvals.sum()
|
|
995
|
+
if wsum > 0:
|
|
996
|
+
clipped[i, j, c] = np.sum(values * wvals) / wsum
|
|
997
|
+
else:
|
|
998
|
+
clipped[i, j, c] = mean_val
|
|
999
|
+
return clipped, rej_mask
|
|
1000
|
+
|
|
1001
|
+
|
|
1002
|
+
def esd_clip_weighted(stack, weights, threshold=3.0):
|
|
1003
|
+
"""
|
|
1004
|
+
Dispatcher that returns (clipped, rejection_mask) for ESD clipping.
|
|
1005
|
+
"""
|
|
1006
|
+
if stack.ndim == 3:
|
|
1007
|
+
return esd_clip_weighted_3d(stack, weights, threshold)
|
|
1008
|
+
elif stack.ndim == 4:
|
|
1009
|
+
return esd_clip_weighted_4d(stack, weights, threshold)
|
|
1010
|
+
else:
|
|
1011
|
+
raise ValueError(f"esd_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
1012
|
+
|
|
1013
|
+
|
|
1014
|
+
# -------------------------------
|
|
1015
|
+
# Biweight Location (Weighted)
|
|
1016
|
+
# -------------------------------
|
|
1017
|
+
|
|
1018
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1019
|
+
def biweight_location_weighted_3d(stack, weights, tuning_constant=6.0):
|
|
1020
|
+
"""
|
|
1021
|
+
Biweight Location for a 3D mono stack.
|
|
1022
|
+
stack.shape == (F,H,W)
|
|
1023
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W).
|
|
1024
|
+
"""
|
|
1025
|
+
num_frames, height, width = stack.shape
|
|
1026
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
1027
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
1028
|
+
|
|
1029
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1030
|
+
pass
|
|
1031
|
+
elif weights.ndim == 3 and weights.shape == stack.shape:
|
|
1032
|
+
pass
|
|
1033
|
+
else:
|
|
1034
|
+
raise ValueError("biweight_location_weighted_3d: mismatch in shapes for 3D stack & weights")
|
|
1035
|
+
|
|
1036
|
+
for i in prange(height):
|
|
1037
|
+
for j in range(width):
|
|
1038
|
+
x = stack[:, i, j]
|
|
1039
|
+
if weights.ndim == 1:
|
|
1040
|
+
w = weights[:]
|
|
1041
|
+
else:
|
|
1042
|
+
w = weights[:, i, j]
|
|
1043
|
+
valid = x != 0
|
|
1044
|
+
x_valid = x[valid]
|
|
1045
|
+
w_valid = w[valid]
|
|
1046
|
+
# Record rejections for zeros:
|
|
1047
|
+
for f in range(num_frames):
|
|
1048
|
+
if not valid[f]:
|
|
1049
|
+
rej_mask[f, i, j] = True
|
|
1050
|
+
else:
|
|
1051
|
+
rej_mask[f, i, j] = False # initialize as accepted; may update below
|
|
1052
|
+
n = x_valid.size
|
|
1053
|
+
if n == 0:
|
|
1054
|
+
clipped[i, j] = 0.0
|
|
1055
|
+
continue
|
|
1056
|
+
M = np.median(x_valid)
|
|
1057
|
+
mad = np.median(np.abs(x_valid - M))
|
|
1058
|
+
if mad == 0:
|
|
1059
|
+
clipped[i, j] = M
|
|
1060
|
+
continue
|
|
1061
|
+
u = (x_valid - M) / (tuning_constant * mad)
|
|
1062
|
+
mask = np.abs(u) < 1
|
|
1063
|
+
# Mark frames that were excluded by the biweight rejection:
|
|
1064
|
+
idx = 0
|
|
1065
|
+
for f in range(num_frames):
|
|
1066
|
+
if valid[f]:
|
|
1067
|
+
if not mask[idx]:
|
|
1068
|
+
rej_mask[f, i, j] = True
|
|
1069
|
+
idx += 1
|
|
1070
|
+
x_masked = x_valid[mask]
|
|
1071
|
+
w_masked = w_valid[mask]
|
|
1072
|
+
numerator = ((x_masked - M) * (1 - u[mask]**2)**2 * w_masked).sum()
|
|
1073
|
+
denominator = ((1 - u[mask]**2)**2 * w_masked).sum()
|
|
1074
|
+
if denominator != 0:
|
|
1075
|
+
biweight = M + numerator / denominator
|
|
1076
|
+
else:
|
|
1077
|
+
biweight = M
|
|
1078
|
+
clipped[i, j] = biweight
|
|
1079
|
+
return clipped, rej_mask
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1083
|
+
def biweight_location_weighted_4d(stack, weights, tuning_constant=6.0):
|
|
1084
|
+
"""
|
|
1085
|
+
Biweight Location for a 4D color stack.
|
|
1086
|
+
stack.shape == (F,H,W,C)
|
|
1087
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
|
|
1088
|
+
"""
|
|
1089
|
+
num_frames, height, width, channels = stack.shape
|
|
1090
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
1091
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
1092
|
+
|
|
1093
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1094
|
+
pass
|
|
1095
|
+
elif weights.ndim == 4 and weights.shape == stack.shape:
|
|
1096
|
+
pass
|
|
1097
|
+
else:
|
|
1098
|
+
raise ValueError("biweight_location_weighted_4d: mismatch in shapes for 4D stack & weights")
|
|
1099
|
+
|
|
1100
|
+
for i in prange(height):
|
|
1101
|
+
for j in range(width):
|
|
1102
|
+
for c in range(channels):
|
|
1103
|
+
x = stack[:, i, j, c]
|
|
1104
|
+
if weights.ndim == 1:
|
|
1105
|
+
w = weights[:]
|
|
1106
|
+
else:
|
|
1107
|
+
w = weights[:, i, j, c]
|
|
1108
|
+
valid = x != 0
|
|
1109
|
+
x_valid = x[valid]
|
|
1110
|
+
w_valid = w[valid]
|
|
1111
|
+
for f in range(num_frames):
|
|
1112
|
+
if not valid[f]:
|
|
1113
|
+
rej_mask[f, i, j, c] = True
|
|
1114
|
+
else:
|
|
1115
|
+
rej_mask[f, i, j, c] = False
|
|
1116
|
+
n = x_valid.size
|
|
1117
|
+
if n == 0:
|
|
1118
|
+
clipped[i, j, c] = 0.0
|
|
1119
|
+
continue
|
|
1120
|
+
M = np.median(x_valid)
|
|
1121
|
+
mad = np.median(np.abs(x_valid - M))
|
|
1122
|
+
if mad == 0:
|
|
1123
|
+
clipped[i, j, c] = M
|
|
1124
|
+
continue
|
|
1125
|
+
u = (x_valid - M) / (tuning_constant * mad)
|
|
1126
|
+
mask = np.abs(u) < 1
|
|
1127
|
+
idx = 0
|
|
1128
|
+
for f in range(num_frames):
|
|
1129
|
+
if valid[f]:
|
|
1130
|
+
if not mask[idx]:
|
|
1131
|
+
rej_mask[f, i, j, c] = True
|
|
1132
|
+
idx += 1
|
|
1133
|
+
x_masked = x_valid[mask]
|
|
1134
|
+
w_masked = w_valid[mask]
|
|
1135
|
+
numerator = ((x_masked - M) * (1 - u[mask]**2)**2 * w_masked).sum()
|
|
1136
|
+
denominator = ((1 - u[mask]**2)**2 * w_masked).sum()
|
|
1137
|
+
if denominator != 0:
|
|
1138
|
+
biweight = M + numerator / denominator
|
|
1139
|
+
else:
|
|
1140
|
+
biweight = M
|
|
1141
|
+
clipped[i, j, c] = biweight
|
|
1142
|
+
return clipped, rej_mask
|
|
1143
|
+
|
|
1144
|
+
|
|
1145
|
+
def biweight_location_weighted(stack, weights, tuning_constant=6.0):
|
|
1146
|
+
"""
|
|
1147
|
+
Dispatcher that returns (clipped, rejection_mask) for biweight location.
|
|
1148
|
+
"""
|
|
1149
|
+
if stack.ndim == 3:
|
|
1150
|
+
return biweight_location_weighted_3d(stack, weights, tuning_constant)
|
|
1151
|
+
elif stack.ndim == 4:
|
|
1152
|
+
return biweight_location_weighted_4d(stack, weights, tuning_constant)
|
|
1153
|
+
else:
|
|
1154
|
+
raise ValueError(f"biweight_location_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
1155
|
+
|
|
1156
|
+
|
|
1157
|
+
# -------------------------------
|
|
1158
|
+
# Modified Z-Score Clipping (Weighted)
|
|
1159
|
+
# -------------------------------
|
|
1160
|
+
|
|
1161
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1162
|
+
def modified_zscore_clip_weighted_3d(stack, weights, threshold=3.5):
|
|
1163
|
+
"""
|
|
1164
|
+
Modified Z-Score Clipping for a 3D mono stack.
|
|
1165
|
+
stack.shape == (F,H,W)
|
|
1166
|
+
Returns (clipped, rejection_mask) with rejection_mask shape (F,H,W).
|
|
1167
|
+
"""
|
|
1168
|
+
num_frames, height, width = stack.shape
|
|
1169
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
1170
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
1171
|
+
|
|
1172
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1173
|
+
pass
|
|
1174
|
+
elif weights.ndim == 3 and weights.shape == stack.shape:
|
|
1175
|
+
pass
|
|
1176
|
+
else:
|
|
1177
|
+
raise ValueError("modified_zscore_clip_weighted_3d: mismatch in shapes for 3D stack & weights")
|
|
1178
|
+
|
|
1179
|
+
for i in prange(height):
|
|
1180
|
+
for j in range(width):
|
|
1181
|
+
x = stack[:, i, j]
|
|
1182
|
+
if weights.ndim == 1:
|
|
1183
|
+
w = weights[:]
|
|
1184
|
+
else:
|
|
1185
|
+
w = weights[:, i, j]
|
|
1186
|
+
valid = x != 0
|
|
1187
|
+
x_valid = x[valid]
|
|
1188
|
+
w_valid = w[valid]
|
|
1189
|
+
if x_valid.size == 0:
|
|
1190
|
+
clipped[i, j] = 0.0
|
|
1191
|
+
for f in range(num_frames):
|
|
1192
|
+
if not valid[f]:
|
|
1193
|
+
rej_mask[f, i, j] = True
|
|
1194
|
+
continue
|
|
1195
|
+
median_val = np.median(x_valid)
|
|
1196
|
+
mad = np.median(np.abs(x_valid - median_val))
|
|
1197
|
+
if mad == 0:
|
|
1198
|
+
clipped[i, j] = median_val
|
|
1199
|
+
for f in range(num_frames):
|
|
1200
|
+
rej_mask[f, i, j] = False
|
|
1201
|
+
continue
|
|
1202
|
+
modified_z = 0.6745 * (x_valid - median_val) / mad
|
|
1203
|
+
valid2 = np.abs(modified_z) < threshold
|
|
1204
|
+
idx = 0
|
|
1205
|
+
for f in range(num_frames):
|
|
1206
|
+
if valid[f]:
|
|
1207
|
+
if not valid2[idx]:
|
|
1208
|
+
rej_mask[f, i, j] = True
|
|
1209
|
+
else:
|
|
1210
|
+
rej_mask[f, i, j] = False
|
|
1211
|
+
idx += 1
|
|
1212
|
+
else:
|
|
1213
|
+
rej_mask[f, i, j] = True
|
|
1214
|
+
x_final = x_valid[valid2]
|
|
1215
|
+
w_final = w_valid[valid2]
|
|
1216
|
+
wsum = w_final.sum()
|
|
1217
|
+
if wsum > 0:
|
|
1218
|
+
clipped[i, j] = np.sum(x_final * w_final) / wsum
|
|
1219
|
+
else:
|
|
1220
|
+
clipped[i, j] = median_val
|
|
1221
|
+
return clipped, rej_mask
|
|
1222
|
+
|
|
1223
|
+
|
|
1224
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1225
|
+
def modified_zscore_clip_weighted_4d(stack, weights, threshold=3.5):
|
|
1226
|
+
"""
|
|
1227
|
+
Modified Z-Score Clipping for a 4D color stack.
|
|
1228
|
+
stack.shape == (F,H,W,C)
|
|
1229
|
+
Returns (clipped, rejection_mask) with rejection_mask shape (F,H,W,C).
|
|
1230
|
+
"""
|
|
1231
|
+
num_frames, height, width, channels = stack.shape
|
|
1232
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
1233
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
1234
|
+
|
|
1235
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1236
|
+
pass
|
|
1237
|
+
elif weights.ndim == 4 and weights.shape == stack.shape:
|
|
1238
|
+
pass
|
|
1239
|
+
else:
|
|
1240
|
+
raise ValueError("modified_zscore_clip_weighted_4d: mismatch in shapes for 4D stack & weights")
|
|
1241
|
+
|
|
1242
|
+
for i in prange(height):
|
|
1243
|
+
for j in range(width):
|
|
1244
|
+
for c in range(channels):
|
|
1245
|
+
x = stack[:, i, j, c]
|
|
1246
|
+
if weights.ndim == 1:
|
|
1247
|
+
w = weights[:]
|
|
1248
|
+
else:
|
|
1249
|
+
w = weights[:, i, j, c]
|
|
1250
|
+
valid = x != 0
|
|
1251
|
+
x_valid = x[valid]
|
|
1252
|
+
w_valid = w[valid]
|
|
1253
|
+
if x_valid.size == 0:
|
|
1254
|
+
clipped[i, j, c] = 0.0
|
|
1255
|
+
for f in range(num_frames):
|
|
1256
|
+
if not valid[f]:
|
|
1257
|
+
rej_mask[f, i, j, c] = True
|
|
1258
|
+
continue
|
|
1259
|
+
median_val = np.median(x_valid)
|
|
1260
|
+
mad = np.median(np.abs(x_valid - median_val))
|
|
1261
|
+
if mad == 0:
|
|
1262
|
+
clipped[i, j, c] = median_val
|
|
1263
|
+
for f in range(num_frames):
|
|
1264
|
+
rej_mask[f, i, j, c] = False
|
|
1265
|
+
continue
|
|
1266
|
+
modified_z = 0.6745 * (x_valid - median_val) / mad
|
|
1267
|
+
valid2 = np.abs(modified_z) < threshold
|
|
1268
|
+
idx = 0
|
|
1269
|
+
for f in range(num_frames):
|
|
1270
|
+
if valid[f]:
|
|
1271
|
+
if not valid2[idx]:
|
|
1272
|
+
rej_mask[f, i, j, c] = True
|
|
1273
|
+
else:
|
|
1274
|
+
rej_mask[f, i, j, c] = False
|
|
1275
|
+
idx += 1
|
|
1276
|
+
else:
|
|
1277
|
+
rej_mask[f, i, j, c] = True
|
|
1278
|
+
x_final = x_valid[valid2]
|
|
1279
|
+
w_final = w_valid[valid2]
|
|
1280
|
+
wsum = w_final.sum()
|
|
1281
|
+
if wsum > 0:
|
|
1282
|
+
clipped[i, j, c] = np.sum(x_final * w_final) / wsum
|
|
1283
|
+
else:
|
|
1284
|
+
clipped[i, j, c] = median_val
|
|
1285
|
+
return clipped, rej_mask
|
|
1286
|
+
|
|
1287
|
+
|
|
1288
|
+
def modified_zscore_clip_weighted(stack, weights, threshold=3.5):
|
|
1289
|
+
"""
|
|
1290
|
+
Dispatcher that returns (clipped, rejection_mask) for modified z-score clipping.
|
|
1291
|
+
"""
|
|
1292
|
+
if stack.ndim == 3:
|
|
1293
|
+
return modified_zscore_clip_weighted_3d(stack, weights, threshold)
|
|
1294
|
+
elif stack.ndim == 4:
|
|
1295
|
+
return modified_zscore_clip_weighted_4d(stack, weights, threshold)
|
|
1296
|
+
else:
|
|
1297
|
+
raise ValueError(f"modified_zscore_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
1298
|
+
|
|
1299
|
+
|
|
1300
|
+
# -------------------------------
|
|
1301
|
+
# Windsorized Sigma Clipping (Non-weighted)
|
|
1302
|
+
# -------------------------------
|
|
1303
|
+
|
|
1304
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1305
|
+
def windsorized_sigma_clip_3d(stack, lower=2.5, upper=2.5):
|
|
1306
|
+
"""
|
|
1307
|
+
Windsorized Sigma Clipping for a 3D mono stack (non-weighted).
|
|
1308
|
+
stack.shape == (F,H,W)
|
|
1309
|
+
Returns (clipped, rejection_mask) where rejection_mask is (F,H,W).
|
|
1310
|
+
"""
|
|
1311
|
+
num_frames, height, width = stack.shape
|
|
1312
|
+
clipped = np.zeros((height, width), dtype=np.float32)
|
|
1313
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
1314
|
+
|
|
1315
|
+
for i in prange(height):
|
|
1316
|
+
for j in range(width):
|
|
1317
|
+
pixel_values = stack[:, i, j]
|
|
1318
|
+
median_val = np.median(pixel_values)
|
|
1319
|
+
std_dev = np.std(pixel_values)
|
|
1320
|
+
lower_bound = median_val - lower * std_dev
|
|
1321
|
+
upper_bound = median_val + upper * std_dev
|
|
1322
|
+
valid = (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
|
|
1323
|
+
for f in range(num_frames):
|
|
1324
|
+
rej_mask[f, i, j] = not valid[f]
|
|
1325
|
+
valid_vals = pixel_values[valid]
|
|
1326
|
+
if valid_vals.size > 0:
|
|
1327
|
+
clipped[i, j] = np.mean(valid_vals)
|
|
1328
|
+
else:
|
|
1329
|
+
clipped[i, j] = median_val
|
|
1330
|
+
return clipped, rej_mask
|
|
1331
|
+
|
|
1332
|
+
|
|
1333
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1334
|
+
def windsorized_sigma_clip_4d(stack, lower=2.5, upper=2.5):
|
|
1335
|
+
"""
|
|
1336
|
+
Windsorized Sigma Clipping for a 4D color stack (non-weighted).
|
|
1337
|
+
stack.shape == (F,H,W,C)
|
|
1338
|
+
Returns (clipped, rejection_mask) where rejection_mask is (F,H,W,C).
|
|
1339
|
+
"""
|
|
1340
|
+
num_frames, height, width, channels = stack.shape
|
|
1341
|
+
clipped = np.zeros((height, width, channels), dtype=np.float32)
|
|
1342
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
1343
|
+
|
|
1344
|
+
for i in prange(height):
|
|
1345
|
+
for j in range(width):
|
|
1346
|
+
for c in range(channels):
|
|
1347
|
+
pixel_values = stack[:, i, j, c]
|
|
1348
|
+
median_val = np.median(pixel_values)
|
|
1349
|
+
std_dev = np.std(pixel_values)
|
|
1350
|
+
lower_bound = median_val - lower * std_dev
|
|
1351
|
+
upper_bound = median_val + upper * std_dev
|
|
1352
|
+
valid = (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
|
|
1353
|
+
for f in range(num_frames):
|
|
1354
|
+
rej_mask[f, i, j, c] = not valid[f]
|
|
1355
|
+
valid_vals = pixel_values[valid]
|
|
1356
|
+
if valid_vals.size > 0:
|
|
1357
|
+
clipped[i, j, c] = np.mean(valid_vals)
|
|
1358
|
+
else:
|
|
1359
|
+
clipped[i, j, c] = median_val
|
|
1360
|
+
return clipped, rej_mask
|
|
1361
|
+
|
|
1362
|
+
|
|
1363
|
+
def windsorized_sigma_clip(stack, lower=2.5, upper=2.5):
|
|
1364
|
+
"""
|
|
1365
|
+
Dispatcher function that calls either the 3D or 4D specialized Numba function,
|
|
1366
|
+
depending on 'stack.ndim'.
|
|
1367
|
+
"""
|
|
1368
|
+
if stack.ndim == 3:
|
|
1369
|
+
return windsorized_sigma_clip_3d(stack, lower, upper)
|
|
1370
|
+
elif stack.ndim == 4:
|
|
1371
|
+
return windsorized_sigma_clip_4d(stack, lower, upper)
|
|
1372
|
+
else:
|
|
1373
|
+
raise ValueError(f"windsorized_sigma_clip: stack must be 3D or 4D, got {stack.shape}")
|
|
1374
|
+
|
|
1375
|
+
def max_value_stack(stack, weights=None):
|
|
1376
|
+
"""
|
|
1377
|
+
Stacking by taking the maximum value along the frame axis.
|
|
1378
|
+
Returns (clipped, rejection_mask) for compatibility:
|
|
1379
|
+
- clipped: H×W (or H×W×C)
|
|
1380
|
+
- rejection_mask: same shape as stack, all False
|
|
1381
|
+
"""
|
|
1382
|
+
clipped = np.max(stack, axis=0)
|
|
1383
|
+
rej_mask = np.zeros(stack.shape, dtype=bool)
|
|
1384
|
+
return clipped, rej_mask
|
|
1385
|
+
|
|
1386
|
+
@njit(parallel=True, cache=True)
|
|
1387
|
+
def subtract_dark_with_pedestal_3d(frames, dark_frame, pedestal):
|
|
1388
|
+
"""
|
|
1389
|
+
For mono stack:
|
|
1390
|
+
frames.shape == (F,H,W)
|
|
1391
|
+
dark_frame.shape == (H,W)
|
|
1392
|
+
Adds 'pedestal' after subtracting dark_frame from each frame.
|
|
1393
|
+
Returns the same shape (F,H,W).
|
|
1394
|
+
"""
|
|
1395
|
+
num_frames, height, width = frames.shape
|
|
1396
|
+
result = np.empty_like(frames, dtype=np.float32)
|
|
1397
|
+
|
|
1398
|
+
# Validate dark_frame shape
|
|
1399
|
+
if dark_frame.ndim != 2 or dark_frame.shape != (height, width):
|
|
1400
|
+
raise ValueError(
|
|
1401
|
+
"subtract_dark_with_pedestal_3d: for 3D frames, dark_frame must be 2D (H,W)"
|
|
1402
|
+
)
|
|
1403
|
+
|
|
1404
|
+
for i in prange(num_frames):
|
|
1405
|
+
for y in range(height):
|
|
1406
|
+
for x in range(width):
|
|
1407
|
+
result[i, y, x] = frames[i, y, x] - dark_frame[y, x] + pedestal
|
|
1408
|
+
|
|
1409
|
+
return result
|
|
1410
|
+
|
|
1411
|
+
@njit(parallel=True, cache=True)
|
|
1412
|
+
def subtract_dark_with_pedestal_4d(frames, dark_frame, pedestal):
|
|
1413
|
+
"""
|
|
1414
|
+
For color stack:
|
|
1415
|
+
frames.shape == (F,H,W,C)
|
|
1416
|
+
dark_frame.shape == (H,W,C)
|
|
1417
|
+
Adds 'pedestal' after subtracting dark_frame from each frame.
|
|
1418
|
+
Returns the same shape (F,H,W,C).
|
|
1419
|
+
"""
|
|
1420
|
+
num_frames, height, width, channels = frames.shape
|
|
1421
|
+
result = np.empty_like(frames, dtype=np.float32)
|
|
1422
|
+
|
|
1423
|
+
# Validate dark_frame shape
|
|
1424
|
+
if dark_frame.ndim != 3 or dark_frame.shape != (height, width, channels):
|
|
1425
|
+
raise ValueError(
|
|
1426
|
+
"subtract_dark_with_pedestal_4d: for 4D frames, dark_frame must be 3D (H,W,C)"
|
|
1427
|
+
)
|
|
1428
|
+
|
|
1429
|
+
for i in prange(num_frames):
|
|
1430
|
+
for y in range(height):
|
|
1431
|
+
for x in range(width):
|
|
1432
|
+
for c in range(channels):
|
|
1433
|
+
result[i, y, x, c] = frames[i, y, x, c] - dark_frame[y, x, c] + pedestal
|
|
1434
|
+
|
|
1435
|
+
return result
|
|
1436
|
+
|
|
1437
|
+
def subtract_dark_with_pedestal(frames, dark_frame, pedestal):
|
|
1438
|
+
"""
|
|
1439
|
+
Dispatcher function that calls either the 3D or 4D specialized Numba function
|
|
1440
|
+
depending on 'frames.ndim'.
|
|
1441
|
+
"""
|
|
1442
|
+
if frames.ndim == 3:
|
|
1443
|
+
return subtract_dark_with_pedestal_3d(frames, dark_frame, pedestal)
|
|
1444
|
+
elif frames.ndim == 4:
|
|
1445
|
+
return subtract_dark_with_pedestal_4d(frames, dark_frame, pedestal)
|
|
1446
|
+
else:
|
|
1447
|
+
raise ValueError(
|
|
1448
|
+
f"subtract_dark_with_pedestal: frames must be 3D or 4D, got {frames.shape}"
|
|
1449
|
+
)
|
|
1450
|
+
|
|
1451
|
+
|
|
1452
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1453
|
+
def parallel_measure_frames(images):
|
|
1454
|
+
"""
|
|
1455
|
+
Parallel processing for measuring simple stats (mean only).
|
|
1456
|
+
'images' is a list (or array) of N images, each of which can be:
|
|
1457
|
+
- 2D (H,W) for a single mono image
|
|
1458
|
+
- 3D (H,W,C) for a single color image
|
|
1459
|
+
- Possibly 3D or 4D if you're storing multi-frame stacks in 'images'
|
|
1460
|
+
We just compute np.mean(...) of each image, no matter how many dims.
|
|
1461
|
+
"""
|
|
1462
|
+
n = len(images)
|
|
1463
|
+
means = np.zeros(n, dtype=np.float32)
|
|
1464
|
+
|
|
1465
|
+
for i in prange(n):
|
|
1466
|
+
arr = images[i]
|
|
1467
|
+
# arr could have shape (H,W) or (H,W,C) or (F,H,W) etc.
|
|
1468
|
+
# np.mean works for any dimension, so no special logic needed.
|
|
1469
|
+
means[i] = np.float32(np.mean(arr))
|
|
1470
|
+
|
|
1471
|
+
return means
|
|
1472
|
+
|
|
1473
|
+
|
|
1474
|
+
@njit(fastmath=True, cache=True)
|
|
1475
|
+
def fast_mad(image):
|
|
1476
|
+
""" Computes the Median Absolute Deviation (MAD) as a robust noise estimator. """
|
|
1477
|
+
flat_image = image.ravel() # ✅ Flatten the 2D array into 1D
|
|
1478
|
+
median_val = np.median(flat_image) # Compute median
|
|
1479
|
+
mad = np.median(np.abs(flat_image - median_val)) # Compute MAD
|
|
1480
|
+
return mad * 1.4826 # ✅ Scale MAD to match standard deviation (for Gaussian noise)
|
|
1481
|
+
|
|
1482
|
+
|
|
1483
|
+
|
|
1484
|
+
@njit(fastmath=True, cache=True)
|
|
1485
|
+
def compute_snr(image):
|
|
1486
|
+
""" Computes the Signal-to-Noise Ratio (SNR) using fast Numba std. """
|
|
1487
|
+
mean_signal = np.mean(image)
|
|
1488
|
+
noise = compute_noise(image)
|
|
1489
|
+
return mean_signal / noise if noise > 0 else 0
|
|
1490
|
+
|
|
1491
|
+
|
|
1492
|
+
|
|
1493
|
+
|
|
1494
|
+
@njit(fastmath=True, cache=True)
|
|
1495
|
+
def compute_noise(image):
|
|
1496
|
+
""" Estimates noise using Median Absolute Deviation (MAD). """
|
|
1497
|
+
return fast_mad(image)
|
|
1498
|
+
|
|
1499
|
+
|
|
1500
|
+
|
|
1501
|
+
|
|
1502
|
+
def compute_star_count(image):
|
|
1503
|
+
""" Uses fast star detection instead of DAOStarFinder. """
|
|
1504
|
+
return fast_star_count(image)
|
|
1505
|
+
|
|
1506
|
+
|
|
1507
|
+
def fast_star_count(
|
|
1508
|
+
image,
|
|
1509
|
+
blur_size=15, # Smaller blur preserves faint/small stars
|
|
1510
|
+
threshold_factor=0.8,
|
|
1511
|
+
min_area=2,
|
|
1512
|
+
max_area=5000
|
|
1513
|
+
):
|
|
1514
|
+
"""
|
|
1515
|
+
Estimate star count + average eccentricity by:
|
|
1516
|
+
1) Convert to 8-bit grayscale
|
|
1517
|
+
2) Blur => subtract => enhance stars
|
|
1518
|
+
3) Otsu's threshold * threshold_factor => final threshold
|
|
1519
|
+
4) Contour detection + ellipse fit => eccentricity
|
|
1520
|
+
Returns (star_count, avg_ecc).
|
|
1521
|
+
"""
|
|
1522
|
+
|
|
1523
|
+
# 1) Convert to grayscale if needed
|
|
1524
|
+
if image.ndim == 3:
|
|
1525
|
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
|
1526
|
+
|
|
1527
|
+
# 2) Normalize to 8-bit
|
|
1528
|
+
img_min, img_max = image.min(), image.max()
|
|
1529
|
+
if img_max > img_min:
|
|
1530
|
+
image_8u = (255.0 * (image - img_min) / (img_max - img_min)).astype(np.uint8)
|
|
1531
|
+
else:
|
|
1532
|
+
return 0, 0.0 # All pixels identical => no stars
|
|
1533
|
+
|
|
1534
|
+
# 3) Blur + subtract => enhance
|
|
1535
|
+
blurred = cv2.GaussianBlur(image_8u, (blur_size, blur_size), 0)
|
|
1536
|
+
subtracted = cv2.absdiff(image_8u, blurred)
|
|
1537
|
+
|
|
1538
|
+
# 4) Otsu's threshold on 'subtracted'
|
|
1539
|
+
otsu_thresh_val, _ = cv2.threshold(subtracted, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
|
|
1540
|
+
# Scale it down if we want to detect more/fainter stars
|
|
1541
|
+
final_thresh_val = int(otsu_thresh_val * threshold_factor)
|
|
1542
|
+
if final_thresh_val < 2:
|
|
1543
|
+
final_thresh_val = 2 # avoid going below 2
|
|
1544
|
+
|
|
1545
|
+
# 5) Apply threshold
|
|
1546
|
+
_, thresh = cv2.threshold(subtracted, final_thresh_val, 255, cv2.THRESH_BINARY)
|
|
1547
|
+
|
|
1548
|
+
# 6) (Optional) Morphological opening to remove single-pixel noise
|
|
1549
|
+
# Adjust kernel size if you get too many/few stars
|
|
1550
|
+
kernel = np.ones((2, 2), np.uint8)
|
|
1551
|
+
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
|
|
1552
|
+
|
|
1553
|
+
# 7) Find contours
|
|
1554
|
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
1555
|
+
|
|
1556
|
+
# 8) Filter contours by area, fit ellipse => compute eccentricity
|
|
1557
|
+
star_count = 0
|
|
1558
|
+
ecc_values = []
|
|
1559
|
+
for c in contours:
|
|
1560
|
+
area = cv2.contourArea(c)
|
|
1561
|
+
if area < min_area or area > max_area:
|
|
1562
|
+
continue
|
|
1563
|
+
|
|
1564
|
+
if len(c) < 5:
|
|
1565
|
+
continue # Need >=5 points to fit an ellipse
|
|
1566
|
+
|
|
1567
|
+
# Fit ellipse
|
|
1568
|
+
ellipse = cv2.fitEllipse(c)
|
|
1569
|
+
(cx, cy), (major_axis, minor_axis), angle = ellipse
|
|
1570
|
+
|
|
1571
|
+
# major_axis >= minor_axis
|
|
1572
|
+
if minor_axis > major_axis:
|
|
1573
|
+
major_axis, minor_axis = minor_axis, major_axis
|
|
1574
|
+
|
|
1575
|
+
if major_axis > 0:
|
|
1576
|
+
ecc = math.sqrt(1.0 - (minor_axis**2 / major_axis**2))
|
|
1577
|
+
else:
|
|
1578
|
+
ecc = 0.0
|
|
1579
|
+
|
|
1580
|
+
ecc_values.append(ecc)
|
|
1581
|
+
star_count += 1
|
|
1582
|
+
|
|
1583
|
+
if star_count > 0:
|
|
1584
|
+
avg_ecc = float(np.mean(ecc_values))
|
|
1585
|
+
else:
|
|
1586
|
+
avg_ecc = 0.0
|
|
1587
|
+
|
|
1588
|
+
return star_count, avg_ecc
|
|
1589
|
+
|
|
1590
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1591
|
+
def normalize_images_3d(stack, ref_median):
|
|
1592
|
+
"""
|
|
1593
|
+
Normalizes each frame in a 3D mono stack (F,H,W)
|
|
1594
|
+
so that its median equals ref_median.
|
|
1595
|
+
|
|
1596
|
+
Returns a 3D result (F,H,W).
|
|
1597
|
+
"""
|
|
1598
|
+
num_frames, height, width = stack.shape
|
|
1599
|
+
normalized_stack = np.zeros_like(stack, dtype=np.float32)
|
|
1600
|
+
|
|
1601
|
+
for i in prange(num_frames):
|
|
1602
|
+
# shape of one frame: (H,W)
|
|
1603
|
+
img = stack[i]
|
|
1604
|
+
img_median = np.median(img)
|
|
1605
|
+
|
|
1606
|
+
# Prevent division by zero
|
|
1607
|
+
scale_factor = ref_median / max(img_median, 1e-6)
|
|
1608
|
+
# Scale the entire 2D frame
|
|
1609
|
+
normalized_stack[i] = img * scale_factor
|
|
1610
|
+
|
|
1611
|
+
return normalized_stack
|
|
1612
|
+
|
|
1613
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1614
|
+
def normalize_images_4d(stack, ref_median):
|
|
1615
|
+
"""
|
|
1616
|
+
Normalizes each frame in a 4D color stack (F,H,W,C)
|
|
1617
|
+
so that its median equals ref_median.
|
|
1618
|
+
|
|
1619
|
+
Returns a 4D result (F,H,W,C).
|
|
1620
|
+
"""
|
|
1621
|
+
num_frames, height, width, channels = stack.shape
|
|
1622
|
+
normalized_stack = np.zeros_like(stack, dtype=np.float32)
|
|
1623
|
+
|
|
1624
|
+
for i in prange(num_frames):
|
|
1625
|
+
# shape of one frame: (H,W,C)
|
|
1626
|
+
img = stack[i] # (H,W,C)
|
|
1627
|
+
# Flatten to 1D to compute median across all channels/pixels
|
|
1628
|
+
img_median = np.median(img.ravel())
|
|
1629
|
+
|
|
1630
|
+
# Prevent division by zero
|
|
1631
|
+
scale_factor = ref_median / max(img_median, 1e-6)
|
|
1632
|
+
|
|
1633
|
+
# Scale the entire 3D frame
|
|
1634
|
+
for y in range(height):
|
|
1635
|
+
for x in range(width):
|
|
1636
|
+
for c in range(channels):
|
|
1637
|
+
normalized_stack[i, y, x, c] = img[y, x, c] * scale_factor
|
|
1638
|
+
|
|
1639
|
+
return normalized_stack
|
|
1640
|
+
|
|
1641
|
+
def normalize_images(stack, ref_median):
|
|
1642
|
+
"""
|
|
1643
|
+
Dispatcher that calls either the 3D or 4D specialized Numba function
|
|
1644
|
+
depending on 'stack.ndim'.
|
|
1645
|
+
|
|
1646
|
+
- If stack.ndim == 3, we assume shape (F,H,W).
|
|
1647
|
+
- If stack.ndim == 4, we assume shape (F,H,W,C).
|
|
1648
|
+
"""
|
|
1649
|
+
if stack.ndim == 3:
|
|
1650
|
+
return normalize_images_3d(stack, ref_median)
|
|
1651
|
+
elif stack.ndim == 4:
|
|
1652
|
+
return normalize_images_4d(stack, ref_median)
|
|
1653
|
+
else:
|
|
1654
|
+
raise ValueError(f"normalize_images: stack must be 3D or 4D, got shape {stack.shape}")
|
|
1655
|
+
|
|
1656
|
+
|
|
1657
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1658
|
+
def _edge_aware_interpolate_numba(out):
|
|
1659
|
+
"""
|
|
1660
|
+
For each pixel in out (shape: (H,W,3)) where out[y,x,c] == 0,
|
|
1661
|
+
use a simple edge-aware approach:
|
|
1662
|
+
1) Compute horizontal gradient = abs( left - right )
|
|
1663
|
+
2) Compute vertical gradient = abs( top - bottom )
|
|
1664
|
+
3) Choose the direction with the smaller gradient => average neighbors
|
|
1665
|
+
4) If neighbors are missing or zero, fallback to a small 3x3 average
|
|
1666
|
+
|
|
1667
|
+
This is simpler than AHD but usually better than naive bilinear
|
|
1668
|
+
for high-contrast features like star cores.
|
|
1669
|
+
"""
|
|
1670
|
+
H, W, C = out.shape
|
|
1671
|
+
|
|
1672
|
+
for c in range(C):
|
|
1673
|
+
for y in prange(H):
|
|
1674
|
+
for x in range(W):
|
|
1675
|
+
if out[y, x, c] == 0:
|
|
1676
|
+
# Gather immediate neighbors
|
|
1677
|
+
left = 0.0
|
|
1678
|
+
right = 0.0
|
|
1679
|
+
top = 0.0
|
|
1680
|
+
bottom = 0.0
|
|
1681
|
+
have_left = False
|
|
1682
|
+
have_right = False
|
|
1683
|
+
have_top = False
|
|
1684
|
+
have_bottom = False
|
|
1685
|
+
|
|
1686
|
+
# Left
|
|
1687
|
+
if x - 1 >= 0:
|
|
1688
|
+
val = out[y, x - 1, c]
|
|
1689
|
+
if val != 0:
|
|
1690
|
+
left = val
|
|
1691
|
+
have_left = True
|
|
1692
|
+
|
|
1693
|
+
# Right
|
|
1694
|
+
if x + 1 < W:
|
|
1695
|
+
val = out[y, x + 1, c]
|
|
1696
|
+
if val != 0:
|
|
1697
|
+
right = val
|
|
1698
|
+
have_right = True
|
|
1699
|
+
|
|
1700
|
+
# Top
|
|
1701
|
+
if y - 1 >= 0:
|
|
1702
|
+
val = out[y - 1, x, c]
|
|
1703
|
+
if val != 0:
|
|
1704
|
+
top = val
|
|
1705
|
+
have_top = True
|
|
1706
|
+
|
|
1707
|
+
# Bottom
|
|
1708
|
+
if y + 1 < H:
|
|
1709
|
+
val = out[y + 1, x, c]
|
|
1710
|
+
if val != 0:
|
|
1711
|
+
bottom = val
|
|
1712
|
+
have_bottom = True
|
|
1713
|
+
|
|
1714
|
+
# Compute gradients
|
|
1715
|
+
# If we don't have valid neighbors for that direction,
|
|
1716
|
+
# set the gradient to a large number => won't be chosen
|
|
1717
|
+
gh = 1e6
|
|
1718
|
+
gv = 1e6
|
|
1719
|
+
|
|
1720
|
+
if have_left and have_right:
|
|
1721
|
+
gh = abs(left - right)
|
|
1722
|
+
if have_top and have_bottom:
|
|
1723
|
+
gv = abs(top - bottom)
|
|
1724
|
+
|
|
1725
|
+
# Decide which direction to interpolate
|
|
1726
|
+
if gh < gv and have_left and have_right:
|
|
1727
|
+
# Horizontal interpolation
|
|
1728
|
+
out[y, x, c] = 0.5 * (left + right)
|
|
1729
|
+
elif gv <= gh and have_top and have_bottom:
|
|
1730
|
+
# Vertical interpolation
|
|
1731
|
+
out[y, x, c] = 0.5 * (top + bottom)
|
|
1732
|
+
else:
|
|
1733
|
+
# Fallback: average 3×3 region
|
|
1734
|
+
sumv = 0.0
|
|
1735
|
+
count = 0
|
|
1736
|
+
for dy in range(-1, 2):
|
|
1737
|
+
for dx in range(-1, 2):
|
|
1738
|
+
yy = y + dy
|
|
1739
|
+
xx = x + dx
|
|
1740
|
+
if 0 <= yy < H and 0 <= xx < W:
|
|
1741
|
+
val = out[yy, xx, c]
|
|
1742
|
+
if val != 0:
|
|
1743
|
+
sumv += val
|
|
1744
|
+
count += 1
|
|
1745
|
+
if count > 0:
|
|
1746
|
+
out[y, x, c] = sumv / count
|
|
1747
|
+
|
|
1748
|
+
return out
|
|
1749
|
+
# === Separate Full-Resolution Demosaicing Kernels ===
|
|
1750
|
+
# These njit functions assume the raw image is arranged in a Bayer pattern
|
|
1751
|
+
# and that we want a full (H,W,3) output.
|
|
1752
|
+
|
|
1753
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1754
|
+
def debayer_RGGB_fullres_fast(image):
|
|
1755
|
+
"""
|
|
1756
|
+
For an RGGB pattern:
|
|
1757
|
+
- Even rows: even cols = Red, odd cols = Green.
|
|
1758
|
+
- Odd rows: even cols = Green, odd cols = Blue.
|
|
1759
|
+
"""
|
|
1760
|
+
H, W = image.shape
|
|
1761
|
+
out = np.zeros((H, W, 3), dtype=image.dtype)
|
|
1762
|
+
for y in prange(H):
|
|
1763
|
+
for x in range(W):
|
|
1764
|
+
if (y & 1) == 0:
|
|
1765
|
+
if (x & 1) == 0:
|
|
1766
|
+
# Even row, even col: Red
|
|
1767
|
+
out[y, x, 0] = image[y, x]
|
|
1768
|
+
else:
|
|
1769
|
+
# Even row, odd col: Green
|
|
1770
|
+
out[y, x, 1] = image[y, x]
|
|
1771
|
+
else:
|
|
1772
|
+
if (x & 1) == 0:
|
|
1773
|
+
# Odd row, even col: Green
|
|
1774
|
+
out[y, x, 1] = image[y, x]
|
|
1775
|
+
else:
|
|
1776
|
+
# Odd row, odd col: Blue
|
|
1777
|
+
out[y, x, 2] = image[y, x]
|
|
1778
|
+
_edge_aware_interpolate_numba(out)
|
|
1779
|
+
return out
|
|
1780
|
+
|
|
1781
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1782
|
+
def debayer_BGGR_fullres_fast(image):
|
|
1783
|
+
"""
|
|
1784
|
+
For a BGGR pattern:
|
|
1785
|
+
- Even rows: even cols = Blue, odd cols = Green.
|
|
1786
|
+
- Odd rows: even cols = Green, odd cols = Red.
|
|
1787
|
+
"""
|
|
1788
|
+
H, W = image.shape
|
|
1789
|
+
out = np.zeros((H, W, 3), dtype=image.dtype)
|
|
1790
|
+
for y in prange(H):
|
|
1791
|
+
for x in range(W):
|
|
1792
|
+
if (y & 1) == 0:
|
|
1793
|
+
if (x & 1) == 0:
|
|
1794
|
+
# Even row, even col: Blue
|
|
1795
|
+
out[y, x, 2] = image[y, x]
|
|
1796
|
+
else:
|
|
1797
|
+
# Even row, odd col: Green
|
|
1798
|
+
out[y, x, 1] = image[y, x]
|
|
1799
|
+
else:
|
|
1800
|
+
if (x & 1) == 0:
|
|
1801
|
+
# Odd row, even col: Green
|
|
1802
|
+
out[y, x, 1] = image[y, x]
|
|
1803
|
+
else:
|
|
1804
|
+
# Odd row, odd col: Red
|
|
1805
|
+
out[y, x, 0] = image[y, x]
|
|
1806
|
+
_edge_aware_interpolate_numba(out)
|
|
1807
|
+
return out
|
|
1808
|
+
|
|
1809
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1810
|
+
def debayer_GRBG_fullres_fast(image):
|
|
1811
|
+
"""
|
|
1812
|
+
For a GRBG pattern:
|
|
1813
|
+
- Even rows: even cols = Green, odd cols = Red.
|
|
1814
|
+
- Odd rows: even cols = Blue, odd cols = Green.
|
|
1815
|
+
"""
|
|
1816
|
+
H, W = image.shape
|
|
1817
|
+
out = np.zeros((H, W, 3), dtype=image.dtype)
|
|
1818
|
+
for y in prange(H):
|
|
1819
|
+
for x in range(W):
|
|
1820
|
+
if (y & 1) == 0:
|
|
1821
|
+
if (x & 1) == 0:
|
|
1822
|
+
# Even row, even col: Green
|
|
1823
|
+
out[y, x, 1] = image[y, x]
|
|
1824
|
+
else:
|
|
1825
|
+
# Even row, odd col: Red
|
|
1826
|
+
out[y, x, 0] = image[y, x]
|
|
1827
|
+
else:
|
|
1828
|
+
if (x & 1) == 0:
|
|
1829
|
+
# Odd row, even col: Blue
|
|
1830
|
+
out[y, x, 2] = image[y, x]
|
|
1831
|
+
else:
|
|
1832
|
+
# Odd row, odd col: Green
|
|
1833
|
+
out[y, x, 1] = image[y, x]
|
|
1834
|
+
_edge_aware_interpolate_numba(out)
|
|
1835
|
+
return out
|
|
1836
|
+
|
|
1837
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1838
|
+
def debayer_GBRG_fullres_fast(image):
|
|
1839
|
+
"""
|
|
1840
|
+
For a GBRG pattern:
|
|
1841
|
+
- Even rows: even cols = Green, odd cols = Blue.
|
|
1842
|
+
- Odd rows: even cols = Red, odd cols = Green.
|
|
1843
|
+
"""
|
|
1844
|
+
H, W = image.shape
|
|
1845
|
+
out = np.zeros((H, W, 3), dtype=image.dtype)
|
|
1846
|
+
for y in prange(H):
|
|
1847
|
+
for x in range(W):
|
|
1848
|
+
if (y & 1) == 0:
|
|
1849
|
+
if (x & 1) == 0:
|
|
1850
|
+
# Even row, even col: Green
|
|
1851
|
+
out[y, x, 1] = image[y, x]
|
|
1852
|
+
else:
|
|
1853
|
+
# Even row, odd col: Blue
|
|
1854
|
+
out[y, x, 2] = image[y, x]
|
|
1855
|
+
else:
|
|
1856
|
+
if (x & 1) == 0:
|
|
1857
|
+
# Odd row, even col: Red
|
|
1858
|
+
out[y, x, 0] = image[y, x]
|
|
1859
|
+
else:
|
|
1860
|
+
# Odd row, odd col: Green
|
|
1861
|
+
out[y, x, 1] = image[y, x]
|
|
1862
|
+
_edge_aware_interpolate_numba(out)
|
|
1863
|
+
return out
|
|
1864
|
+
|
|
1865
|
+
# === Python-Level Dispatch Function ===
|
|
1866
|
+
# Since Numba cannot easily compare strings in nopython mode,
|
|
1867
|
+
# we do the if/elif check here in Python and then call the appropriate njit function.
|
|
1868
|
+
|
|
1869
|
+
def debayer_fits_fast(image_data, bayer_pattern):
|
|
1870
|
+
bp = bayer_pattern.upper()
|
|
1871
|
+
if bp == 'RGGB':
|
|
1872
|
+
return debayer_RGGB_fullres_fast(image_data)
|
|
1873
|
+
elif bp == 'BGGR':
|
|
1874
|
+
return debayer_BGGR_fullres_fast(image_data)
|
|
1875
|
+
elif bp == 'GRBG':
|
|
1876
|
+
return debayer_GRBG_fullres_fast(image_data)
|
|
1877
|
+
elif bp == 'GBRG':
|
|
1878
|
+
return debayer_GBRG_fullres_fast(image_data)
|
|
1879
|
+
else:
|
|
1880
|
+
raise ValueError(f"Unsupported Bayer pattern: {bayer_pattern}")
|
|
1881
|
+
|
|
1882
|
+
def debayer_raw_fast(raw_image_data, bayer_pattern="RGGB"):
|
|
1883
|
+
# For RAW images, use the same full-resolution demosaicing logic.
|
|
1884
|
+
return debayer_fits_fast(raw_image_data, bayer_pattern)
|
|
1885
|
+
|
|
1886
|
+
|
|
1887
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1888
|
+
def applyPixelMath_numba(image_array, amount):
|
|
1889
|
+
factor = 3 ** amount
|
|
1890
|
+
denom_factor = 3 ** amount - 1
|
|
1891
|
+
height, width, channels = image_array.shape
|
|
1892
|
+
output = np.empty_like(image_array, dtype=np.float32)
|
|
1893
|
+
|
|
1894
|
+
for y in prange(height):
|
|
1895
|
+
for x in prange(width):
|
|
1896
|
+
for c in prange(channels):
|
|
1897
|
+
val = (factor * image_array[y, x, c]) / (denom_factor * image_array[y, x, c] + 1)
|
|
1898
|
+
output[y, x, c] = min(max(val, 0.0), 1.0) # Equivalent to np.clip()
|
|
1899
|
+
|
|
1900
|
+
return output
|
|
1901
|
+
|
|
1902
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1903
|
+
def adjust_saturation_numba(image_array, saturation_factor):
|
|
1904
|
+
height, width, channels = image_array.shape
|
|
1905
|
+
output = np.empty_like(image_array, dtype=np.float32)
|
|
1906
|
+
|
|
1907
|
+
for y in prange(int(height)): # Ensure y is an integer
|
|
1908
|
+
for x in prange(int(width)): # Ensure x is an integer
|
|
1909
|
+
r, g, b = image_array[int(y), int(x)] # Force integer indexing
|
|
1910
|
+
|
|
1911
|
+
# Convert RGB to HSV manually
|
|
1912
|
+
max_val = max(r, g, b)
|
|
1913
|
+
min_val = min(r, g, b)
|
|
1914
|
+
delta = max_val - min_val
|
|
1915
|
+
|
|
1916
|
+
# Compute Hue (H)
|
|
1917
|
+
if delta == 0:
|
|
1918
|
+
h = 0
|
|
1919
|
+
elif max_val == r:
|
|
1920
|
+
h = (60 * ((g - b) / delta) + 360) % 360
|
|
1921
|
+
elif max_val == g:
|
|
1922
|
+
h = (60 * ((b - r) / delta) + 120) % 360
|
|
1923
|
+
else:
|
|
1924
|
+
h = (60 * ((r - g) / delta) + 240) % 360
|
|
1925
|
+
|
|
1926
|
+
# Compute Saturation (S)
|
|
1927
|
+
s = (delta / max_val) if max_val != 0 else 0
|
|
1928
|
+
s *= saturation_factor # Apply saturation adjustment
|
|
1929
|
+
s = min(max(s, 0.0), 1.0) # Clip saturation
|
|
1930
|
+
|
|
1931
|
+
# Convert back to RGB
|
|
1932
|
+
if s == 0:
|
|
1933
|
+
r, g, b = max_val, max_val, max_val
|
|
1934
|
+
else:
|
|
1935
|
+
c = s * max_val
|
|
1936
|
+
x_val = c * (1 - abs((h / 60) % 2 - 1))
|
|
1937
|
+
m = max_val - c
|
|
1938
|
+
|
|
1939
|
+
if 0 <= h < 60:
|
|
1940
|
+
r, g, b = c, x_val, 0
|
|
1941
|
+
elif 60 <= h < 120:
|
|
1942
|
+
r, g, b = x_val, c, 0
|
|
1943
|
+
elif 120 <= h < 180:
|
|
1944
|
+
r, g, b = 0, c, x_val
|
|
1945
|
+
elif 180 <= h < 240:
|
|
1946
|
+
r, g, b = 0, x_val, c
|
|
1947
|
+
elif 240 <= h < 300:
|
|
1948
|
+
r, g, b = x_val, 0, c
|
|
1949
|
+
else:
|
|
1950
|
+
r, g, b = c, 0, x_val
|
|
1951
|
+
|
|
1952
|
+
r, g, b = r + m, g + m, b + m # Add m to shift brightness
|
|
1953
|
+
|
|
1954
|
+
# ✅ Fix: Explicitly cast indices to integers
|
|
1955
|
+
output[int(y), int(x), 0] = r
|
|
1956
|
+
output[int(y), int(x), 1] = g
|
|
1957
|
+
output[int(y), int(x), 2] = b
|
|
1958
|
+
|
|
1959
|
+
return output
|
|
1960
|
+
|
|
1961
|
+
|
|
1962
|
+
|
|
1963
|
+
|
|
1964
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1965
|
+
def applySCNR_numba(image_array):
|
|
1966
|
+
height, width, _ = image_array.shape
|
|
1967
|
+
output = np.empty_like(image_array, dtype=np.float32)
|
|
1968
|
+
|
|
1969
|
+
for y in prange(int(height)):
|
|
1970
|
+
for x in prange(int(width)):
|
|
1971
|
+
r, g, b = image_array[y, x]
|
|
1972
|
+
g = min(g, (r + b) / 2) # Reduce green to the average of red & blue
|
|
1973
|
+
|
|
1974
|
+
# ✅ Fix: Assign channels individually instead of a tuple
|
|
1975
|
+
output[int(y), int(x), 0] = r
|
|
1976
|
+
output[int(y), int(x), 1] = g
|
|
1977
|
+
output[int(y), int(x), 2] = b
|
|
1978
|
+
|
|
1979
|
+
|
|
1980
|
+
return output
|
|
1981
|
+
|
|
1982
|
+
# D65 reference
|
|
1983
|
+
_Xn, _Yn, _Zn = 0.95047, 1.00000, 1.08883
|
|
1984
|
+
|
|
1985
|
+
# Matrix for RGB -> XYZ (sRGB => D65)
|
|
1986
|
+
_M_rgb2xyz = np.array([
|
|
1987
|
+
[0.4124564, 0.3575761, 0.1804375],
|
|
1988
|
+
[0.2126729, 0.7151522, 0.0721750],
|
|
1989
|
+
[0.0193339, 0.1191920, 0.9503041]
|
|
1990
|
+
], dtype=np.float32)
|
|
1991
|
+
|
|
1992
|
+
# Matrix for XYZ -> RGB (sRGB => D65)
|
|
1993
|
+
_M_xyz2rgb = np.array([
|
|
1994
|
+
[ 3.2404542, -1.5371385, -0.4985314],
|
|
1995
|
+
[-0.9692660, 1.8760108, 0.0415560],
|
|
1996
|
+
[ 0.0556434, -0.2040259, 1.0572252]
|
|
1997
|
+
], dtype=np.float32)
|
|
1998
|
+
|
|
1999
|
+
|
|
2000
|
+
|
|
2001
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2002
|
+
def apply_lut_gray(image_in, lut):
|
|
2003
|
+
"""
|
|
2004
|
+
Numba-accelerated application of 'lut' to a single-channel image_in in [0..1].
|
|
2005
|
+
'lut' is a 1D array of shape (size,) also in [0..1].
|
|
2006
|
+
"""
|
|
2007
|
+
out = np.empty_like(image_in)
|
|
2008
|
+
height, width = image_in.shape
|
|
2009
|
+
size_lut = len(lut) - 1
|
|
2010
|
+
|
|
2011
|
+
for y in prange(height):
|
|
2012
|
+
for x in range(width):
|
|
2013
|
+
v = image_in[y, x]
|
|
2014
|
+
idx = int(v * size_lut + 0.5)
|
|
2015
|
+
if idx < 0: idx = 0
|
|
2016
|
+
elif idx > size_lut: idx = size_lut
|
|
2017
|
+
out[y, x] = lut[idx]
|
|
2018
|
+
|
|
2019
|
+
return out
|
|
2020
|
+
|
|
2021
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2022
|
+
def apply_lut_color(image_in, lut):
|
|
2023
|
+
"""
|
|
2024
|
+
Numba-accelerated application of 'lut' to a 3-channel image_in in [0..1].
|
|
2025
|
+
'lut' is a 1D array of shape (size,) also in [0..1].
|
|
2026
|
+
"""
|
|
2027
|
+
out = np.empty_like(image_in)
|
|
2028
|
+
height, width, channels = image_in.shape
|
|
2029
|
+
size_lut = len(lut) - 1
|
|
2030
|
+
|
|
2031
|
+
for y in prange(height):
|
|
2032
|
+
for x in range(width):
|
|
2033
|
+
for c in range(channels):
|
|
2034
|
+
v = image_in[y, x, c]
|
|
2035
|
+
idx = int(v * size_lut + 0.5)
|
|
2036
|
+
if idx < 0: idx = 0
|
|
2037
|
+
elif idx > size_lut: idx = size_lut
|
|
2038
|
+
out[y, x, c] = lut[idx]
|
|
2039
|
+
|
|
2040
|
+
return out
|
|
2041
|
+
|
|
2042
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2043
|
+
def apply_lut_mono_inplace(array2d, lut):
|
|
2044
|
+
"""
|
|
2045
|
+
In-place LUT application on a single-channel 2D array in [0..1].
|
|
2046
|
+
'lut' has shape (size,) also in [0..1].
|
|
2047
|
+
"""
|
|
2048
|
+
H, W = array2d.shape
|
|
2049
|
+
size_lut = len(lut) - 1
|
|
2050
|
+
for y in prange(H):
|
|
2051
|
+
for x in prange(W):
|
|
2052
|
+
v = array2d[y, x]
|
|
2053
|
+
idx = int(v * size_lut + 0.5)
|
|
2054
|
+
if idx < 0:
|
|
2055
|
+
idx = 0
|
|
2056
|
+
elif idx > size_lut:
|
|
2057
|
+
idx = size_lut
|
|
2058
|
+
array2d[y, x] = lut[idx]
|
|
2059
|
+
|
|
2060
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2061
|
+
def apply_lut_color_inplace(array3d, lut):
|
|
2062
|
+
"""
|
|
2063
|
+
In-place LUT application on a 3-channel array in [0..1].
|
|
2064
|
+
'lut' has shape (size,) also in [0..1].
|
|
2065
|
+
"""
|
|
2066
|
+
H, W, C = array3d.shape
|
|
2067
|
+
size_lut = len(lut) - 1
|
|
2068
|
+
for y in prange(H):
|
|
2069
|
+
for x in prange(W):
|
|
2070
|
+
for c in range(C):
|
|
2071
|
+
v = array3d[y, x, c]
|
|
2072
|
+
idx = int(v * size_lut + 0.5)
|
|
2073
|
+
if idx < 0:
|
|
2074
|
+
idx = 0
|
|
2075
|
+
elif idx > size_lut:
|
|
2076
|
+
idx = size_lut
|
|
2077
|
+
array3d[y, x, c] = lut[idx]
|
|
2078
|
+
|
|
2079
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2080
|
+
def rgb_to_xyz_numba(rgb):
|
|
2081
|
+
"""
|
|
2082
|
+
Convert an image from sRGB to XYZ (D65).
|
|
2083
|
+
rgb: float32 array in [0..1], shape (H,W,3)
|
|
2084
|
+
returns xyz in [0..maybe >1], shape (H,W,3)
|
|
2085
|
+
"""
|
|
2086
|
+
H, W, _ = rgb.shape
|
|
2087
|
+
out = np.empty((H, W, 3), dtype=np.float32)
|
|
2088
|
+
for y in prange(H):
|
|
2089
|
+
for x in prange(W):
|
|
2090
|
+
r = rgb[y, x, 0]
|
|
2091
|
+
g = rgb[y, x, 1]
|
|
2092
|
+
b = rgb[y, x, 2]
|
|
2093
|
+
# Multiply by M_rgb2xyz
|
|
2094
|
+
X = _M_rgb2xyz[0,0]*r + _M_rgb2xyz[0,1]*g + _M_rgb2xyz[0,2]*b
|
|
2095
|
+
Y = _M_rgb2xyz[1,0]*r + _M_rgb2xyz[1,1]*g + _M_rgb2xyz[1,2]*b
|
|
2096
|
+
Z = _M_rgb2xyz[2,0]*r + _M_rgb2xyz[2,1]*g + _M_rgb2xyz[2,2]*b
|
|
2097
|
+
out[y, x, 0] = X
|
|
2098
|
+
out[y, x, 1] = Y
|
|
2099
|
+
out[y, x, 2] = Z
|
|
2100
|
+
return out
|
|
2101
|
+
|
|
2102
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2103
|
+
def xyz_to_rgb_numba(xyz):
|
|
2104
|
+
"""
|
|
2105
|
+
Convert an image from XYZ (D65) to sRGB.
|
|
2106
|
+
xyz: float32 array, shape (H,W,3)
|
|
2107
|
+
returns rgb in [0..1], shape (H,W,3)
|
|
2108
|
+
"""
|
|
2109
|
+
H, W, _ = xyz.shape
|
|
2110
|
+
out = np.empty((H, W, 3), dtype=np.float32)
|
|
2111
|
+
for y in prange(H):
|
|
2112
|
+
for x in prange(W):
|
|
2113
|
+
X = xyz[y, x, 0]
|
|
2114
|
+
Y = xyz[y, x, 1]
|
|
2115
|
+
Z = xyz[y, x, 2]
|
|
2116
|
+
# Multiply by M_xyz2rgb
|
|
2117
|
+
r = _M_xyz2rgb[0,0]*X + _M_xyz2rgb[0,1]*Y + _M_xyz2rgb[0,2]*Z
|
|
2118
|
+
g = _M_xyz2rgb[1,0]*X + _M_xyz2rgb[1,1]*Y + _M_xyz2rgb[1,2]*Z
|
|
2119
|
+
b = _M_xyz2rgb[2,0]*X + _M_xyz2rgb[2,1]*Y + _M_xyz2rgb[2,2]*Z
|
|
2120
|
+
# Clip to [0..1]
|
|
2121
|
+
if r < 0: r = 0
|
|
2122
|
+
elif r > 1: r = 1
|
|
2123
|
+
if g < 0: g = 0
|
|
2124
|
+
elif g > 1: g = 1
|
|
2125
|
+
if b < 0: b = 0
|
|
2126
|
+
elif b > 1: b = 1
|
|
2127
|
+
out[y, x, 0] = r
|
|
2128
|
+
out[y, x, 1] = g
|
|
2129
|
+
out[y, x, 2] = b
|
|
2130
|
+
return out
|
|
2131
|
+
|
|
2132
|
+
@njit(cache=True)
|
|
2133
|
+
def f_lab_numba(t):
|
|
2134
|
+
delta = 6/29
|
|
2135
|
+
out = np.empty_like(t, dtype=np.float32)
|
|
2136
|
+
for i in range(t.size):
|
|
2137
|
+
val = t.flat[i]
|
|
2138
|
+
if val > delta**3:
|
|
2139
|
+
out.flat[i] = val**(1/3)
|
|
2140
|
+
else:
|
|
2141
|
+
out.flat[i] = val/(3*delta*delta) + (4/29)
|
|
2142
|
+
return out
|
|
2143
|
+
|
|
2144
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2145
|
+
def xyz_to_lab_numba(xyz):
|
|
2146
|
+
"""
|
|
2147
|
+
xyz => shape(H,W,3), in D65.
|
|
2148
|
+
returns lab in shape(H,W,3): L in [0..100], a,b in ~[-128..127].
|
|
2149
|
+
"""
|
|
2150
|
+
H, W, _ = xyz.shape
|
|
2151
|
+
out = np.empty((H,W,3), dtype=np.float32)
|
|
2152
|
+
for y in prange(H):
|
|
2153
|
+
for x in prange(W):
|
|
2154
|
+
X = xyz[y, x, 0] / _Xn
|
|
2155
|
+
Y = xyz[y, x, 1] / _Yn
|
|
2156
|
+
Z = xyz[y, x, 2] / _Zn
|
|
2157
|
+
fx = (X)**(1/3) if X > (6/29)**3 else X/(3*(6/29)**2) + 4/29
|
|
2158
|
+
fy = (Y)**(1/3) if Y > (6/29)**3 else Y/(3*(6/29)**2) + 4/29
|
|
2159
|
+
fz = (Z)**(1/3) if Z > (6/29)**3 else Z/(3*(6/29)**2) + 4/29
|
|
2160
|
+
L = 116*fy - 16
|
|
2161
|
+
a = 500*(fx - fy)
|
|
2162
|
+
b = 200*(fy - fz)
|
|
2163
|
+
out[y, x, 0] = L
|
|
2164
|
+
out[y, x, 1] = a
|
|
2165
|
+
out[y, x, 2] = b
|
|
2166
|
+
return out
|
|
2167
|
+
|
|
2168
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2169
|
+
def lab_to_xyz_numba(lab):
|
|
2170
|
+
"""
|
|
2171
|
+
lab => shape(H,W,3): L in [0..100], a,b in ~[-128..127].
|
|
2172
|
+
returns xyz shape(H,W,3).
|
|
2173
|
+
"""
|
|
2174
|
+
H, W, _ = lab.shape
|
|
2175
|
+
out = np.empty((H,W,3), dtype=np.float32)
|
|
2176
|
+
delta = 6/29
|
|
2177
|
+
for y in prange(H):
|
|
2178
|
+
for x in prange(W):
|
|
2179
|
+
L = lab[y, x, 0]
|
|
2180
|
+
a = lab[y, x, 1]
|
|
2181
|
+
b = lab[y, x, 2]
|
|
2182
|
+
fy = (L+16)/116
|
|
2183
|
+
fx = fy + a/500
|
|
2184
|
+
fz = fy - b/200
|
|
2185
|
+
|
|
2186
|
+
if fx > delta:
|
|
2187
|
+
xr = fx**3
|
|
2188
|
+
else:
|
|
2189
|
+
xr = 3*delta*delta*(fx - 4/29)
|
|
2190
|
+
if fy > delta:
|
|
2191
|
+
yr = fy**3
|
|
2192
|
+
else:
|
|
2193
|
+
yr = 3*delta*delta*(fy - 4/29)
|
|
2194
|
+
if fz > delta:
|
|
2195
|
+
zr = fz**3
|
|
2196
|
+
else:
|
|
2197
|
+
zr = 3*delta*delta*(fz - 4/29)
|
|
2198
|
+
|
|
2199
|
+
X = _Xn * xr
|
|
2200
|
+
Y = _Yn * yr
|
|
2201
|
+
Z = _Zn * zr
|
|
2202
|
+
out[y, x, 0] = X
|
|
2203
|
+
out[y, x, 1] = Y
|
|
2204
|
+
out[y, x, 2] = Z
|
|
2205
|
+
return out
|
|
2206
|
+
|
|
2207
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2208
|
+
def rgb_to_hsv_numba(rgb):
|
|
2209
|
+
H, W, _ = rgb.shape
|
|
2210
|
+
out = np.empty((H,W,3), dtype=np.float32)
|
|
2211
|
+
for y in prange(H):
|
|
2212
|
+
for x in prange(W):
|
|
2213
|
+
r = rgb[y,x,0]
|
|
2214
|
+
g = rgb[y,x,1]
|
|
2215
|
+
b = rgb[y,x,2]
|
|
2216
|
+
cmax = max(r,g,b)
|
|
2217
|
+
cmin = min(r,g,b)
|
|
2218
|
+
delta = cmax - cmin
|
|
2219
|
+
# Hue
|
|
2220
|
+
h = 0.0
|
|
2221
|
+
if delta != 0.0:
|
|
2222
|
+
if cmax == r:
|
|
2223
|
+
h = 60*(((g-b)/delta) % 6)
|
|
2224
|
+
elif cmax == g:
|
|
2225
|
+
h = 60*(((b-r)/delta) + 2)
|
|
2226
|
+
else:
|
|
2227
|
+
h = 60*(((r-g)/delta) + 4)
|
|
2228
|
+
# Saturation
|
|
2229
|
+
s = 0.0
|
|
2230
|
+
if cmax > 0.0:
|
|
2231
|
+
s = delta / cmax
|
|
2232
|
+
v = cmax
|
|
2233
|
+
out[y,x,0] = h
|
|
2234
|
+
out[y,x,1] = s
|
|
2235
|
+
out[y,x,2] = v
|
|
2236
|
+
return out
|
|
2237
|
+
|
|
2238
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2239
|
+
def hsv_to_rgb_numba(hsv):
|
|
2240
|
+
H, W, _ = hsv.shape
|
|
2241
|
+
out = np.empty((H,W,3), dtype=np.float32)
|
|
2242
|
+
for y in prange(H):
|
|
2243
|
+
for x in prange(W):
|
|
2244
|
+
h = hsv[y,x,0]
|
|
2245
|
+
s = hsv[y,x,1]
|
|
2246
|
+
v = hsv[y,x,2]
|
|
2247
|
+
c = v*s
|
|
2248
|
+
hh = (h/60.0) % 6
|
|
2249
|
+
x_ = c*(1 - abs(hh % 2 - 1))
|
|
2250
|
+
m = v - c
|
|
2251
|
+
r = 0.0
|
|
2252
|
+
g = 0.0
|
|
2253
|
+
b = 0.0
|
|
2254
|
+
if 0 <= hh < 1:
|
|
2255
|
+
r,g,b = c,x_,0
|
|
2256
|
+
elif 1 <= hh < 2:
|
|
2257
|
+
r,g,b = x_,c,0
|
|
2258
|
+
elif 2 <= hh < 3:
|
|
2259
|
+
r,g,b = 0,c,x_
|
|
2260
|
+
elif 3 <= hh < 4:
|
|
2261
|
+
r,g,b = 0,x_,c
|
|
2262
|
+
elif 4 <= hh < 5:
|
|
2263
|
+
r,g,b = x_,0,c
|
|
2264
|
+
else:
|
|
2265
|
+
r,g,b = c,0,x_
|
|
2266
|
+
out[y,x,0] = (r + m)
|
|
2267
|
+
out[y,x,1] = (g + m)
|
|
2268
|
+
out[y,x,2] = (b + m)
|
|
2269
|
+
return out
|
|
2270
|
+
|
|
2271
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2272
|
+
def _cosmetic_correction_numba_fixed(corrected, H, W, C, hot_sigma, cold_sigma):
|
|
2273
|
+
"""
|
|
2274
|
+
Optimized Numba-compiled local outlier correction.
|
|
2275
|
+
- Computes median and standard deviation from 8 surrounding pixels (excluding center).
|
|
2276
|
+
- If the center pixel is greater than (median + hot_sigma * std_dev), it is replaced with the median.
|
|
2277
|
+
- If the center pixel is less than (median - cold_sigma * std_dev), it is replaced with the median.
|
|
2278
|
+
- Edge pixels are skipped (avoiding padding artifacts).
|
|
2279
|
+
"""
|
|
2280
|
+
local_vals = np.empty(9, dtype=np.float32) # Holds 8 surrounding pixels
|
|
2281
|
+
|
|
2282
|
+
# Process pixels in parallel, skipping edges
|
|
2283
|
+
for y in prange(1, H - 1): # Skip first and last rows
|
|
2284
|
+
for x in range(1, W - 1): # Skip first and last columns
|
|
2285
|
+
# If the image is grayscale, set C=1 and handle accordingly
|
|
2286
|
+
for c_i in prange(C if corrected.ndim == 3 else 1):
|
|
2287
|
+
k = 0
|
|
2288
|
+
for dy in range(-1, 2): # -1, 0, +1
|
|
2289
|
+
for dx in range(-1, 2): # -1, 0, +1
|
|
2290
|
+
if corrected.ndim == 3: # Color image
|
|
2291
|
+
local_vals[k] = corrected[y + dy, x + dx, c_i]
|
|
2292
|
+
else: # Grayscale image
|
|
2293
|
+
local_vals[k] = corrected[y + dy, x + dx]
|
|
2294
|
+
k += 1
|
|
2295
|
+
|
|
2296
|
+
# Compute median
|
|
2297
|
+
M = np.median(local_vals)
|
|
2298
|
+
|
|
2299
|
+
# Compute MAD manually
|
|
2300
|
+
abs_devs = np.abs(local_vals - M)
|
|
2301
|
+
MAD = np.median(abs_devs)
|
|
2302
|
+
|
|
2303
|
+
# Convert MAD to an approximation of standard deviation
|
|
2304
|
+
sigma_mad = 1.4826 * MAD
|
|
2305
|
+
|
|
2306
|
+
# Get center pixel
|
|
2307
|
+
if corrected.ndim == 3:
|
|
2308
|
+
T = corrected[y, x, c_i]
|
|
2309
|
+
else:
|
|
2310
|
+
T = corrected[y, x]
|
|
2311
|
+
|
|
2312
|
+
threshold_high = M + (hot_sigma * sigma_mad)
|
|
2313
|
+
threshold_low = M - (cold_sigma * sigma_mad)
|
|
2314
|
+
|
|
2315
|
+
# **Apply correction ONLY if center pixel is an outlier**
|
|
2316
|
+
if T > threshold_high or T < threshold_low:
|
|
2317
|
+
if corrected.ndim == 3:
|
|
2318
|
+
corrected[y, x, c_i] = M # Replace center pixel in color image
|
|
2319
|
+
else:
|
|
2320
|
+
corrected[y, x] = M # Replace center pixel in grayscale image
|
|
2321
|
+
|
|
2322
|
+
def bulk_cosmetic_correction_bayer(image, hot_sigma=5.0, cold_sigma=5.0):
|
|
2323
|
+
"""
|
|
2324
|
+
Perform cosmetic correction on a single-channel Bayer mosaic.
|
|
2325
|
+
Assumes a default Bayer pattern "RGGB":
|
|
2326
|
+
- Red: even rows, even columns
|
|
2327
|
+
- Green1: even rows, odd columns
|
|
2328
|
+
- Green2: odd rows, even columns
|
|
2329
|
+
- Blue: odd rows, odd columns
|
|
2330
|
+
Applies cosmetic correction separately on each channel and reassembles them.
|
|
2331
|
+
"""
|
|
2332
|
+
H, W = image.shape
|
|
2333
|
+
# Create a copy to hold the corrected image.
|
|
2334
|
+
corrected = image.astype(np.float32).copy()
|
|
2335
|
+
|
|
2336
|
+
# For each channel, extract the subarray and apply the standard correction.
|
|
2337
|
+
# We use your existing bulk_cosmetic_correction_numba function, which accepts a 2D array.
|
|
2338
|
+
# Red channel (even rows, even cols)
|
|
2339
|
+
red = corrected[0:H:2, 0:W:2]
|
|
2340
|
+
red_corrected = bulk_cosmetic_correction_numba(red, hot_sigma, cold_sigma)
|
|
2341
|
+
corrected[0:H:2, 0:W:2] = red_corrected
|
|
2342
|
+
|
|
2343
|
+
# Blue channel (odd rows, odd cols)
|
|
2344
|
+
blue = corrected[1:H:2, 1:W:2]
|
|
2345
|
+
blue_corrected = bulk_cosmetic_correction_numba(blue, hot_sigma, cold_sigma)
|
|
2346
|
+
corrected[1:H:2, 1:W:2] = blue_corrected
|
|
2347
|
+
|
|
2348
|
+
# Green channel: two sets:
|
|
2349
|
+
# Green1 (even rows, odd cols)
|
|
2350
|
+
green1 = corrected[0:H:2, 1:W:2]
|
|
2351
|
+
green1_corrected = bulk_cosmetic_correction_numba(green1, hot_sigma, cold_sigma)
|
|
2352
|
+
corrected[0:H:2, 1:W:2] = green1_corrected
|
|
2353
|
+
|
|
2354
|
+
# Green2 (odd rows, even cols)
|
|
2355
|
+
green2 = corrected[1:H:2, 0:W:2]
|
|
2356
|
+
green2_corrected = bulk_cosmetic_correction_numba(green2, hot_sigma, cold_sigma)
|
|
2357
|
+
corrected[1:H:2, 0:W:2] = green2_corrected
|
|
2358
|
+
|
|
2359
|
+
return corrected
|
|
2360
|
+
|
|
2361
|
+
def bulk_cosmetic_correction_numba(image, hot_sigma=3.0, cold_sigma=3.0, window_size=3):
|
|
2362
|
+
"""
|
|
2363
|
+
Optimized local outlier correction using Numba.
|
|
2364
|
+
- Identifies hot and cold outliers based on local neighborhood statistics.
|
|
2365
|
+
- Uses median and standard deviation from surrounding pixels to detect and replace outliers.
|
|
2366
|
+
- Applies separate hot_sigma and cold_sigma thresholds.
|
|
2367
|
+
- Skips edge pixels to avoid padding artifacts.
|
|
2368
|
+
"""
|
|
2369
|
+
|
|
2370
|
+
was_gray = False
|
|
2371
|
+
|
|
2372
|
+
if image.ndim == 2: # Convert grayscale to 3D
|
|
2373
|
+
H, W = image.shape
|
|
2374
|
+
C = 1
|
|
2375
|
+
was_gray = True
|
|
2376
|
+
image = image[:, :, np.newaxis] # Explicitly add a color channel dimension
|
|
2377
|
+
|
|
2378
|
+
else:
|
|
2379
|
+
H, W, C = image.shape
|
|
2380
|
+
|
|
2381
|
+
# Copy the image for modification
|
|
2382
|
+
corrected = image.astype(np.float32).copy()
|
|
2383
|
+
|
|
2384
|
+
# Apply fast correction (no padding, edges skipped)
|
|
2385
|
+
_cosmetic_correction_numba_fixed(corrected, H, W, C, hot_sigma, cold_sigma)
|
|
2386
|
+
|
|
2387
|
+
if was_gray:
|
|
2388
|
+
corrected = corrected[:, :, 0] # Convert back to 2D if originally grayscale
|
|
2389
|
+
|
|
2390
|
+
return corrected
|
|
2391
|
+
|
|
2392
|
+
def evaluate_polynomial(H: int, W: int, coeffs: np.ndarray, degree: int) -> np.ndarray:
|
|
2393
|
+
"""
|
|
2394
|
+
Evaluates the polynomial function over the entire image domain.
|
|
2395
|
+
"""
|
|
2396
|
+
xx, yy = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing="xy")
|
|
2397
|
+
A_full = build_poly_terms(xx.ravel(), yy.ravel(), degree)
|
|
2398
|
+
return (A_full @ coeffs).reshape(H, W)
|
|
2399
|
+
|
|
2400
|
+
|
|
2401
|
+
|
|
2402
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2403
|
+
def numba_mono_final_formula(rescaled, median_rescaled, target_median):
|
|
2404
|
+
"""
|
|
2405
|
+
Applies the final formula *after* we already have the rescaled values.
|
|
2406
|
+
|
|
2407
|
+
rescaled[y,x] = (original[y,x] - black_point) / (1 - black_point)
|
|
2408
|
+
median_rescaled = median(rescaled)
|
|
2409
|
+
|
|
2410
|
+
out_val = ((median_rescaled - 1) * target_median * r) /
|
|
2411
|
+
( median_rescaled*(target_median + r -1) - target_median*r )
|
|
2412
|
+
"""
|
|
2413
|
+
H, W = rescaled.shape
|
|
2414
|
+
out = np.empty_like(rescaled)
|
|
2415
|
+
|
|
2416
|
+
for y in prange(H):
|
|
2417
|
+
for x in range(W):
|
|
2418
|
+
r = rescaled[y, x]
|
|
2419
|
+
numer = (median_rescaled - 1.0) * target_median * r
|
|
2420
|
+
denom = median_rescaled * (target_median + r - 1.0) - target_median * r
|
|
2421
|
+
if np.abs(denom) < 1e-12:
|
|
2422
|
+
denom = 1e-12
|
|
2423
|
+
out[y, x] = numer / denom
|
|
2424
|
+
|
|
2425
|
+
return out
|
|
2426
|
+
|
|
2427
|
+
@njit(parallel=True, fastmath=True)
|
|
2428
|
+
def drizzle_deposit_numba_naive(image_data, affine_2x3, drizzle_buffer, coverage_buffer, scale, weight):
|
|
2429
|
+
"""
|
|
2430
|
+
Naive drizzle deposit (point-to-point-ish) for Mono images.
|
|
2431
|
+
Maps input (x,y) -> output (u,v) via affine, deposits 'weight' at nearest integer pixel.
|
|
2432
|
+
|
|
2433
|
+
image_data: (H, W)
|
|
2434
|
+
affine_2x3: (2, 3) matrix mapping source->canvas
|
|
2435
|
+
drizzle_buffer: (Ho, Wo)
|
|
2436
|
+
coverage_buffer: (Ho, Wo)
|
|
2437
|
+
"""
|
|
2438
|
+
H, W = image_data.shape
|
|
2439
|
+
Ho, Wo = drizzle_buffer.shape
|
|
2440
|
+
|
|
2441
|
+
# We iterate over source pixels
|
|
2442
|
+
for y in prange(H):
|
|
2443
|
+
for x in range(W):
|
|
2444
|
+
val = image_data[y, x]
|
|
2445
|
+
if val == 0:
|
|
2446
|
+
continue
|
|
2447
|
+
|
|
2448
|
+
# Project center of pixel (x, y)
|
|
2449
|
+
# u = a*x + b*y + tx
|
|
2450
|
+
# v = c*x + d*y + ty
|
|
2451
|
+
|
|
2452
|
+
u = affine_2x3[0, 0] * x + affine_2x3[0, 1] * y + affine_2x3[0, 2]
|
|
2453
|
+
v = affine_2x3[1, 0] * x + affine_2x3[1, 1] * y + affine_2x3[1, 2]
|
|
2454
|
+
|
|
2455
|
+
# Nearest neighbor deposit
|
|
2456
|
+
ui = int(round(u))
|
|
2457
|
+
vi = int(round(v))
|
|
2458
|
+
|
|
2459
|
+
if 0 <= ui < Wo and 0 <= vi < Ho:
|
|
2460
|
+
# Accumulate
|
|
2461
|
+
drizzle_buffer[vi, ui] += val * weight
|
|
2462
|
+
coverage_buffer[vi, ui] += weight
|
|
2463
|
+
|
|
2464
|
+
return drizzle_buffer, coverage_buffer
|
|
2465
|
+
|
|
2466
|
+
|
|
2467
|
+
@njit(parallel=True, fastmath=True)
|
|
2468
|
+
def drizzle_deposit_color_naive(image_data, affine_2x3, drizzle_buffer, coverage_buffer, scale, drop_shrink, weight):
|
|
2469
|
+
"""
|
|
2470
|
+
Naive drizzle deposit for Color images (H,W,C).
|
|
2471
|
+
image_data: (H, W, C)
|
|
2472
|
+
affine_2x3: (2, 3)
|
|
2473
|
+
drizzle_buffer: (Ho, Wo, C)
|
|
2474
|
+
coverage_buffer: (Ho, Wo, C)
|
|
2475
|
+
"""
|
|
2476
|
+
H, W, C = image_data.shape
|
|
2477
|
+
Ho, Wo, _ = drizzle_buffer.shape
|
|
2478
|
+
|
|
2479
|
+
for y in prange(H):
|
|
2480
|
+
for x in range(W):
|
|
2481
|
+
# Check if pixel has any data (simple check: if sum > 0 or checks per channel)
|
|
2482
|
+
# usually we just project.
|
|
2483
|
+
|
|
2484
|
+
u = affine_2x3[0, 0] * x + affine_2x3[0, 1] * y + affine_2x3[0, 2]
|
|
2485
|
+
v = affine_2x3[1, 0] * x + affine_2x3[1, 1] * y + affine_2x3[1, 2]
|
|
2486
|
+
|
|
2487
|
+
ui = int(round(u))
|
|
2488
|
+
vi = int(round(v))
|
|
2489
|
+
|
|
2490
|
+
if 0 <= ui < Wo and 0 <= vi < Ho:
|
|
2491
|
+
for c in range(C):
|
|
2492
|
+
val = image_data[y, x, c]
|
|
2493
|
+
drizzle_buffer[vi, ui, c] += val * weight
|
|
2494
|
+
coverage_buffer[vi, ui, c] += weight
|
|
2495
|
+
|
|
2496
|
+
return drizzle_buffer, coverage_buffer
|
|
2497
|
+
|
|
2498
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2499
|
+
def numba_color_final_formula_linked(rescaled, median_rescaled, target_median):
|
|
2500
|
+
"""
|
|
2501
|
+
Linked color transform: we use one median_rescaled for all channels.
|
|
2502
|
+
rescaled: (H,W,3), already = (image - black_point)/(1 - black_point)
|
|
2503
|
+
median_rescaled = median of *all* pixels in rescaled
|
|
2504
|
+
"""
|
|
2505
|
+
H, W, C = rescaled.shape
|
|
2506
|
+
out = np.empty_like(rescaled)
|
|
2507
|
+
|
|
2508
|
+
for y in prange(H):
|
|
2509
|
+
for x in range(W):
|
|
2510
|
+
for c in range(C):
|
|
2511
|
+
r = rescaled[y, x, c]
|
|
2512
|
+
numer = (median_rescaled - 1.0) * target_median * r
|
|
2513
|
+
denom = median_rescaled * (target_median + r - 1.0) - target_median * r
|
|
2514
|
+
if np.abs(denom) < 1e-12:
|
|
2515
|
+
denom = 1e-12
|
|
2516
|
+
out[y, x, c] = numer / denom
|
|
2517
|
+
|
|
2518
|
+
return out
|
|
2519
|
+
|
|
2520
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2521
|
+
def numba_color_final_formula_unlinked(rescaled, medians_rescaled, target_median):
|
|
2522
|
+
"""
|
|
2523
|
+
Unlinked color transform: a separate median_rescaled per channel.
|
|
2524
|
+
rescaled: (H,W,3), where each channel is already (val - black_point[c]) / (1 - black_point[c])
|
|
2525
|
+
medians_rescaled: shape (3,) with median of each channel in the rescaled array.
|
|
2526
|
+
"""
|
|
2527
|
+
H, W, C = rescaled.shape
|
|
2528
|
+
out = np.empty_like(rescaled)
|
|
2529
|
+
|
|
2530
|
+
for y in prange(H):
|
|
2531
|
+
for x in range(W):
|
|
2532
|
+
for c in range(C):
|
|
2533
|
+
r = rescaled[y, x, c]
|
|
2534
|
+
med = medians_rescaled[c]
|
|
2535
|
+
numer = (med - 1.0) * target_median * r
|
|
2536
|
+
denom = med * (target_median + r - 1.0) - target_median * r
|
|
2537
|
+
if np.abs(denom) < 1e-12:
|
|
2538
|
+
denom = 1e-12
|
|
2539
|
+
out[y, x, c] = numer / denom
|
|
2540
|
+
|
|
2541
|
+
return out
|
|
2542
|
+
|
|
2543
|
+
|
|
2544
|
+
def build_poly_terms(x_array: np.ndarray, y_array: np.ndarray, degree: int) -> np.ndarray:
|
|
2545
|
+
"""
|
|
2546
|
+
Precomputes polynomial basis terms efficiently using NumPy, supporting up to degree 6.
|
|
2547
|
+
"""
|
|
2548
|
+
ones = np.ones_like(x_array, dtype=np.float32)
|
|
2549
|
+
|
|
2550
|
+
if degree == 1:
|
|
2551
|
+
return np.column_stack((ones, x_array, y_array))
|
|
2552
|
+
|
|
2553
|
+
elif degree == 2:
|
|
2554
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2555
|
+
x_array**2, x_array * y_array, y_array**2))
|
|
2556
|
+
|
|
2557
|
+
elif degree == 3:
|
|
2558
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2559
|
+
x_array**2, x_array * y_array, y_array**2,
|
|
2560
|
+
x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3))
|
|
2561
|
+
|
|
2562
|
+
elif degree == 4:
|
|
2563
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2564
|
+
x_array**2, x_array * y_array, y_array**2,
|
|
2565
|
+
x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3,
|
|
2566
|
+
x_array**4, x_array**3 * y_array, x_array**2 * y_array**2, x_array * y_array**3, y_array**4))
|
|
2567
|
+
|
|
2568
|
+
elif degree == 5:
|
|
2569
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2570
|
+
x_array**2, x_array * y_array, y_array**2,
|
|
2571
|
+
x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3,
|
|
2572
|
+
x_array**4, x_array**3 * y_array, x_array**2 * y_array**2, x_array * y_array**3, y_array**4,
|
|
2573
|
+
x_array**5, x_array**4 * y_array, x_array**3 * y_array**2, x_array**2 * y_array**3, x_array * y_array**4, y_array**5))
|
|
2574
|
+
|
|
2575
|
+
elif degree == 6:
|
|
2576
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2577
|
+
x_array**2, x_array * y_array, y_array**2,
|
|
2578
|
+
x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3,
|
|
2579
|
+
x_array**4, x_array**3 * y_array, x_array**2 * y_array**2, x_array * y_array**3, y_array**4,
|
|
2580
|
+
x_array**5, x_array**4 * y_array, x_array**3 * y_array**2, x_array**2 * y_array**3, x_array * y_array**4, y_array**5,
|
|
2581
|
+
x_array**6, x_array**5 * y_array, x_array**4 * y_array**2, x_array**3 * y_array**3, x_array**2 * y_array**4, x_array * y_array**5, y_array**6))
|
|
2582
|
+
|
|
2583
|
+
else:
|
|
2584
|
+
raise ValueError(f"Unsupported polynomial degree={degree}. Max supported is 6.")
|
|
2585
|
+
|
|
2586
|
+
|
|
2587
|
+
|
|
2588
|
+
|
|
2589
|
+
def generate_sample_points(image: np.ndarray, num_points: int = 100) -> np.ndarray:
|
|
2590
|
+
"""
|
|
2591
|
+
Generates sample points uniformly across the image.
|
|
2592
|
+
|
|
2593
|
+
- Places points in a uniform grid (no randomization).
|
|
2594
|
+
- Avoids border pixels.
|
|
2595
|
+
- Skips any points with value 0.000 or above 0.85.
|
|
2596
|
+
|
|
2597
|
+
Returns:
|
|
2598
|
+
np.ndarray: Array of shape (N, 2) containing (x, y) coordinates of sample points.
|
|
2599
|
+
"""
|
|
2600
|
+
H, W = image.shape[:2]
|
|
2601
|
+
points = []
|
|
2602
|
+
|
|
2603
|
+
# Create a uniform grid (avoiding the border)
|
|
2604
|
+
grid_size = int(np.sqrt(num_points)) # Roughly equal spacing
|
|
2605
|
+
x_vals = np.linspace(10, W - 10, grid_size, dtype=int) # Avoids border
|
|
2606
|
+
y_vals = np.linspace(10, H - 10, grid_size, dtype=int)
|
|
2607
|
+
|
|
2608
|
+
for y in y_vals:
|
|
2609
|
+
for x in x_vals:
|
|
2610
|
+
# Skip values that are too dark (0.000) or too bright (> 0.85)
|
|
2611
|
+
if np.any(image[int(y), int(x)] == 0.000) or np.any(image[int(y), int(x)] > 0.85):
|
|
2612
|
+
continue # Skip this pixel
|
|
2613
|
+
|
|
2614
|
+
points.append((int(x), int(y)))
|
|
2615
|
+
|
|
2616
|
+
if len(points) >= num_points:
|
|
2617
|
+
return np.array(points, dtype=np.int32) # Return only valid points
|
|
2618
|
+
|
|
2619
|
+
return np.array(points, dtype=np.int32) # Return all collected points
|
|
2620
|
+
|
|
2621
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2622
|
+
def numba_unstretch(image: np.ndarray, stretch_original_medians: np.ndarray, stretch_original_mins: np.ndarray) -> np.ndarray:
|
|
2623
|
+
"""
|
|
2624
|
+
Numba-optimized function to undo the unlinked stretch.
|
|
2625
|
+
Restores each channel separately.
|
|
2626
|
+
"""
|
|
2627
|
+
H, W, C = image.shape
|
|
2628
|
+
out = np.empty_like(image, dtype=np.float32)
|
|
2629
|
+
|
|
2630
|
+
for c in prange(C): # Parallelize per channel
|
|
2631
|
+
cmed_stretched = np.median(image[..., c])
|
|
2632
|
+
orig_med = stretch_original_medians[c]
|
|
2633
|
+
orig_min = stretch_original_mins[c]
|
|
2634
|
+
|
|
2635
|
+
if cmed_stretched != 0 and orig_med != 0:
|
|
2636
|
+
for y in prange(H):
|
|
2637
|
+
for x in range(W):
|
|
2638
|
+
r = image[y, x, c]
|
|
2639
|
+
numerator = (cmed_stretched - 1) * orig_med * r
|
|
2640
|
+
denominator = cmed_stretched * (orig_med + r - 1) - orig_med * r
|
|
2641
|
+
if denominator == 0:
|
|
2642
|
+
denominator = 1e-6 # Avoid division by zero
|
|
2643
|
+
out[y, x, c] = numerator / denominator
|
|
2644
|
+
|
|
2645
|
+
# Restore the original black point
|
|
2646
|
+
out[..., c] += orig_min
|
|
2647
|
+
|
|
2648
|
+
return np.clip(out, 0, 1) # Clip to valid range
|
|
2649
|
+
|
|
2650
|
+
|
|
2651
|
+
@njit(fastmath=True, cache=True)
|
|
2652
|
+
def drizzle_deposit_numba_naive(
|
|
2653
|
+
img_data, # shape (H, W), mono
|
|
2654
|
+
transform, # shape (2, 3), e.g. [[a,b,tx],[c,d,ty]]
|
|
2655
|
+
drizzle_buffer, # shape (outH, outW)
|
|
2656
|
+
coverage_buffer,# shape (outH, outW)
|
|
2657
|
+
drizzle_factor: float,
|
|
2658
|
+
frame_weight: float
|
|
2659
|
+
):
|
|
2660
|
+
"""
|
|
2661
|
+
Naive deposit: each input pixel is mapped to exactly one output pixel,
|
|
2662
|
+
ignoring drop_shrink. 2D single-channel version (mono).
|
|
2663
|
+
"""
|
|
2664
|
+
h, w = img_data.shape
|
|
2665
|
+
out_h, out_w = drizzle_buffer.shape
|
|
2666
|
+
|
|
2667
|
+
# Build a 3×3 matrix M
|
|
2668
|
+
# transform is 2×3, so we expand to 3×3 for the standard [x, y, 1] approach
|
|
2669
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
2670
|
+
M[0, 0] = transform[0, 0] # a
|
|
2671
|
+
M[0, 1] = transform[0, 1] # b
|
|
2672
|
+
M[0, 2] = transform[0, 2] # tx
|
|
2673
|
+
M[1, 0] = transform[1, 0] # c
|
|
2674
|
+
M[1, 1] = transform[1, 1] # d
|
|
2675
|
+
M[1, 2] = transform[1, 2] # ty
|
|
2676
|
+
M[2, 2] = 1.0
|
|
2677
|
+
|
|
2678
|
+
# We'll reuse a small input vector for each pixel
|
|
2679
|
+
in_coords = np.zeros(3, dtype=np.float32)
|
|
2680
|
+
in_coords[2] = 1.0
|
|
2681
|
+
|
|
2682
|
+
for y in range(h):
|
|
2683
|
+
for x in range(w):
|
|
2684
|
+
val = img_data[y, x]
|
|
2685
|
+
if val == 0:
|
|
2686
|
+
continue
|
|
2687
|
+
|
|
2688
|
+
# Fill the input vector
|
|
2689
|
+
in_coords[0] = x
|
|
2690
|
+
in_coords[1] = y
|
|
2691
|
+
|
|
2692
|
+
# Multiply
|
|
2693
|
+
out_coords = M @ in_coords
|
|
2694
|
+
X = out_coords[0]
|
|
2695
|
+
Y = out_coords[1]
|
|
2696
|
+
|
|
2697
|
+
# Multiply by drizzle_factor
|
|
2698
|
+
Xo = int(X * drizzle_factor)
|
|
2699
|
+
Yo = int(Y * drizzle_factor)
|
|
2700
|
+
|
|
2701
|
+
if 0 <= Xo < out_w and 0 <= Yo < out_h:
|
|
2702
|
+
drizzle_buffer[Yo, Xo] += val * frame_weight
|
|
2703
|
+
coverage_buffer[Yo, Xo] += frame_weight
|
|
2704
|
+
|
|
2705
|
+
return drizzle_buffer, coverage_buffer
|
|
2706
|
+
|
|
2707
|
+
|
|
2708
|
+
@njit(fastmath=True, cache=True)
|
|
2709
|
+
def drizzle_deposit_numba_footprint(
|
|
2710
|
+
img_data, # shape (H, W), mono
|
|
2711
|
+
transform, # shape (2, 3)
|
|
2712
|
+
drizzle_buffer, # shape (outH, outW)
|
|
2713
|
+
coverage_buffer,# shape (outH, outW)
|
|
2714
|
+
drizzle_factor: float,
|
|
2715
|
+
drop_shrink: float,
|
|
2716
|
+
frame_weight: float
|
|
2717
|
+
):
|
|
2718
|
+
"""
|
|
2719
|
+
Distributes each input pixel over a bounding box of width=drop_shrink
|
|
2720
|
+
in the drizzle (out) plane. (Mono 2D version)
|
|
2721
|
+
"""
|
|
2722
|
+
h, w = img_data.shape
|
|
2723
|
+
out_h, out_w = drizzle_buffer.shape
|
|
2724
|
+
|
|
2725
|
+
# Build a 3×3 matrix M
|
|
2726
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
2727
|
+
M[0, 0] = transform[0, 0] # a
|
|
2728
|
+
M[0, 1] = transform[0, 1] # b
|
|
2729
|
+
M[0, 2] = transform[0, 2] # tx
|
|
2730
|
+
M[1, 0] = transform[1, 0] # c
|
|
2731
|
+
M[1, 1] = transform[1, 1] # d
|
|
2732
|
+
M[1, 2] = transform[1, 2] # ty
|
|
2733
|
+
M[2, 2] = 1.0
|
|
2734
|
+
|
|
2735
|
+
in_coords = np.zeros(3, dtype=np.float32)
|
|
2736
|
+
in_coords[2] = 1.0
|
|
2737
|
+
|
|
2738
|
+
footprint_radius = drop_shrink * 0.5
|
|
2739
|
+
|
|
2740
|
+
for y in range(h):
|
|
2741
|
+
for x in range(w):
|
|
2742
|
+
val = img_data[y, x]
|
|
2743
|
+
if val == 0:
|
|
2744
|
+
continue
|
|
2745
|
+
|
|
2746
|
+
# Transform to output coords
|
|
2747
|
+
in_coords[0] = x
|
|
2748
|
+
in_coords[1] = y
|
|
2749
|
+
out_coords = M @ in_coords
|
|
2750
|
+
X = out_coords[0]
|
|
2751
|
+
Y = out_coords[1]
|
|
2752
|
+
|
|
2753
|
+
# Upsample
|
|
2754
|
+
Xo = X * drizzle_factor
|
|
2755
|
+
Yo = Y * drizzle_factor
|
|
2756
|
+
|
|
2757
|
+
# bounding box
|
|
2758
|
+
min_x = int(np.floor(Xo - footprint_radius))
|
|
2759
|
+
max_x = int(np.floor(Xo + footprint_radius))
|
|
2760
|
+
min_y = int(np.floor(Yo - footprint_radius))
|
|
2761
|
+
max_y = int(np.floor(Yo + footprint_radius))
|
|
2762
|
+
|
|
2763
|
+
# clip
|
|
2764
|
+
if max_x < 0 or min_x >= out_w or max_y < 0 or min_y >= out_h:
|
|
2765
|
+
continue
|
|
2766
|
+
if min_x < 0:
|
|
2767
|
+
min_x = 0
|
|
2768
|
+
if max_x >= out_w:
|
|
2769
|
+
max_x = out_w - 1
|
|
2770
|
+
if min_y < 0:
|
|
2771
|
+
min_y = 0
|
|
2772
|
+
if max_y >= out_h:
|
|
2773
|
+
max_y = out_h - 1
|
|
2774
|
+
|
|
2775
|
+
width_foot = (max_x - min_x + 1)
|
|
2776
|
+
height_foot = (max_y - min_y + 1)
|
|
2777
|
+
area_pixels = width_foot * height_foot
|
|
2778
|
+
if area_pixels <= 0:
|
|
2779
|
+
continue
|
|
2780
|
+
|
|
2781
|
+
deposit_val = (val * frame_weight) / area_pixels
|
|
2782
|
+
coverage_fraction = frame_weight / area_pixels
|
|
2783
|
+
|
|
2784
|
+
for oy in range(min_y, max_y+1):
|
|
2785
|
+
for ox in range(min_x, max_x+1):
|
|
2786
|
+
drizzle_buffer[oy, ox] += deposit_val
|
|
2787
|
+
coverage_buffer[oy, ox] += coverage_fraction
|
|
2788
|
+
|
|
2789
|
+
return drizzle_buffer, coverage_buffer
|
|
2790
|
+
|
|
2791
|
+
|
|
2792
|
+
@njit(parallel=True, cache=True)
|
|
2793
|
+
def finalize_drizzle_2d(drizzle_buffer, coverage_buffer, final_out):
|
|
2794
|
+
"""
|
|
2795
|
+
parallel-friendly final step: final_out = drizzle_buffer / coverage_buffer,
|
|
2796
|
+
with coverage < 1e-8 => 0
|
|
2797
|
+
"""
|
|
2798
|
+
out_h, out_w = drizzle_buffer.shape
|
|
2799
|
+
for y in prange(out_h):
|
|
2800
|
+
for x in range(out_w):
|
|
2801
|
+
cov = coverage_buffer[y, x]
|
|
2802
|
+
if cov < 1e-8:
|
|
2803
|
+
final_out[y, x] = 0.0
|
|
2804
|
+
else:
|
|
2805
|
+
final_out[y, x] = drizzle_buffer[y, x] / cov
|
|
2806
|
+
return final_out
|
|
2807
|
+
|
|
2808
|
+
@njit(fastmath=True, cache=True)
|
|
2809
|
+
def drizzle_deposit_color_naive(
|
|
2810
|
+
img_data, # shape (H,W,C)
|
|
2811
|
+
transform, # shape (2,3)
|
|
2812
|
+
drizzle_buffer, # shape (outH,outW,C)
|
|
2813
|
+
coverage_buffer, # shape (outH,outW,C)
|
|
2814
|
+
drizzle_factor: float,
|
|
2815
|
+
drop_shrink: float, # unused here
|
|
2816
|
+
frame_weight: float
|
|
2817
|
+
):
|
|
2818
|
+
"""
|
|
2819
|
+
Naive color deposit:
|
|
2820
|
+
Each input pixel is mapped to exactly one output pixel (ignores drop_shrink).
|
|
2821
|
+
"""
|
|
2822
|
+
H, W, channels = img_data.shape
|
|
2823
|
+
outH, outW, outC = drizzle_buffer.shape
|
|
2824
|
+
|
|
2825
|
+
# Build 3×3 matrix M
|
|
2826
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
2827
|
+
M[0, 0] = transform[0, 0]
|
|
2828
|
+
M[0, 1] = transform[0, 1]
|
|
2829
|
+
M[0, 2] = transform[0, 2]
|
|
2830
|
+
M[1, 0] = transform[1, 0]
|
|
2831
|
+
M[1, 1] = transform[1, 1]
|
|
2832
|
+
M[1, 2] = transform[1, 2]
|
|
2833
|
+
M[2, 2] = 1.0
|
|
2834
|
+
|
|
2835
|
+
in_coords = np.zeros(3, dtype=np.float32)
|
|
2836
|
+
in_coords[2] = 1.0
|
|
2837
|
+
|
|
2838
|
+
for y in range(H):
|
|
2839
|
+
for x in range(W):
|
|
2840
|
+
# 1) Transform
|
|
2841
|
+
in_coords[0] = x
|
|
2842
|
+
in_coords[1] = y
|
|
2843
|
+
out_coords = M @ in_coords
|
|
2844
|
+
X = out_coords[0]
|
|
2845
|
+
Y = out_coords[1]
|
|
2846
|
+
|
|
2847
|
+
# 2) Upsample
|
|
2848
|
+
Xo = int(X * drizzle_factor)
|
|
2849
|
+
Yo = int(Y * drizzle_factor)
|
|
2850
|
+
|
|
2851
|
+
# 3) Check bounds
|
|
2852
|
+
if 0 <= Xo < outW and 0 <= Yo < outH:
|
|
2853
|
+
# 4) For each channel
|
|
2854
|
+
for cidx in range(channels):
|
|
2855
|
+
val = img_data[y, x, cidx]
|
|
2856
|
+
if val != 0:
|
|
2857
|
+
drizzle_buffer[Yo, Xo, cidx] += val * frame_weight
|
|
2858
|
+
coverage_buffer[Yo, Xo, cidx] += frame_weight
|
|
2859
|
+
|
|
2860
|
+
return drizzle_buffer, coverage_buffer
|
|
2861
|
+
@njit(fastmath=True, cache=True)
|
|
2862
|
+
def drizzle_deposit_color_footprint(
|
|
2863
|
+
img_data, # shape (H,W,C)
|
|
2864
|
+
transform, # shape (2,3)
|
|
2865
|
+
drizzle_buffer, # shape (outH,outW,C)
|
|
2866
|
+
coverage_buffer, # shape (outH,outW,C)
|
|
2867
|
+
drizzle_factor: float,
|
|
2868
|
+
drop_shrink: float,
|
|
2869
|
+
frame_weight: float
|
|
2870
|
+
):
|
|
2871
|
+
"""
|
|
2872
|
+
Color version with a bounding-box footprint of width=drop_shrink
|
|
2873
|
+
for distributing flux in the output plane.
|
|
2874
|
+
"""
|
|
2875
|
+
H, W, channels = img_data.shape
|
|
2876
|
+
outH, outW, outC = drizzle_buffer.shape
|
|
2877
|
+
|
|
2878
|
+
# Build 3×3 matrix
|
|
2879
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
2880
|
+
M[0, 0] = transform[0, 0]
|
|
2881
|
+
M[0, 1] = transform[0, 1]
|
|
2882
|
+
M[0, 2] = transform[0, 2]
|
|
2883
|
+
M[1, 0] = transform[1, 0]
|
|
2884
|
+
M[1, 1] = transform[1, 1]
|
|
2885
|
+
M[1, 2] = transform[1, 2]
|
|
2886
|
+
M[2, 2] = 1.0
|
|
2887
|
+
|
|
2888
|
+
in_coords = np.zeros(3, dtype=np.float32)
|
|
2889
|
+
in_coords[2] = 1.0
|
|
2890
|
+
|
|
2891
|
+
footprint_radius = drop_shrink * 0.5
|
|
2892
|
+
|
|
2893
|
+
for y in range(H):
|
|
2894
|
+
for x in range(W):
|
|
2895
|
+
# Transform once per pixel
|
|
2896
|
+
in_coords[0] = x
|
|
2897
|
+
in_coords[1] = y
|
|
2898
|
+
out_coords = M @ in_coords
|
|
2899
|
+
X = out_coords[0]
|
|
2900
|
+
Y = out_coords[1]
|
|
2901
|
+
|
|
2902
|
+
# Upsample
|
|
2903
|
+
Xo = X * drizzle_factor
|
|
2904
|
+
Yo = Y * drizzle_factor
|
|
2905
|
+
|
|
2906
|
+
# bounding box
|
|
2907
|
+
min_x = int(np.floor(Xo - footprint_radius))
|
|
2908
|
+
max_x = int(np.floor(Xo + footprint_radius))
|
|
2909
|
+
min_y = int(np.floor(Yo - footprint_radius))
|
|
2910
|
+
max_y = int(np.floor(Yo + footprint_radius))
|
|
2911
|
+
|
|
2912
|
+
if max_x < 0 or min_x >= outW or max_y < 0 or min_y >= outH:
|
|
2913
|
+
continue
|
|
2914
|
+
if min_x < 0:
|
|
2915
|
+
min_x = 0
|
|
2916
|
+
if max_x >= outW:
|
|
2917
|
+
max_x = outW - 1
|
|
2918
|
+
if min_y < 0:
|
|
2919
|
+
min_y = 0
|
|
2920
|
+
if max_y >= outH:
|
|
2921
|
+
max_y = outH - 1
|
|
2922
|
+
|
|
2923
|
+
width_foot = (max_x - min_x + 1)
|
|
2924
|
+
height_foot = (max_y - min_y + 1)
|
|
2925
|
+
area_pixels = width_foot * height_foot
|
|
2926
|
+
if area_pixels <= 0:
|
|
2927
|
+
continue
|
|
2928
|
+
|
|
2929
|
+
for cidx in range(channels):
|
|
2930
|
+
val = img_data[y, x, cidx]
|
|
2931
|
+
if val == 0:
|
|
2932
|
+
continue
|
|
2933
|
+
|
|
2934
|
+
deposit_val = (val * frame_weight) / area_pixels
|
|
2935
|
+
coverage_fraction = frame_weight / area_pixels
|
|
2936
|
+
|
|
2937
|
+
for oy in range(min_y, max_y + 1):
|
|
2938
|
+
for ox in range(min_x, max_x + 1):
|
|
2939
|
+
drizzle_buffer[oy, ox, cidx] += deposit_val
|
|
2940
|
+
coverage_buffer[oy, ox, cidx] += coverage_fraction
|
|
2941
|
+
|
|
2942
|
+
return drizzle_buffer, coverage_buffer
|
|
2943
|
+
|
|
2944
|
+
|
|
2945
|
+
@njit(cache=True)
|
|
2946
|
+
def finalize_drizzle_3d(drizzle_buffer, coverage_buffer, final_out):
|
|
2947
|
+
"""
|
|
2948
|
+
final_out[y,x,c] = drizzle_buffer[y,x,c] / coverage_buffer[y,x,c]
|
|
2949
|
+
if coverage < 1e-8 => 0
|
|
2950
|
+
"""
|
|
2951
|
+
outH, outW, channels = drizzle_buffer.shape
|
|
2952
|
+
for y in range(outH):
|
|
2953
|
+
for x in range(outW):
|
|
2954
|
+
for cidx in range(channels):
|
|
2955
|
+
cov = coverage_buffer[y, x, cidx]
|
|
2956
|
+
if cov < 1e-8:
|
|
2957
|
+
final_out[y, x, cidx] = 0.0
|
|
2958
|
+
else:
|
|
2959
|
+
final_out[y, x, cidx] = drizzle_buffer[y, x, cidx] / cov
|
|
2960
|
+
return final_out
|
|
2961
|
+
|
|
2962
|
+
|
|
2963
|
+
|
|
2964
|
+
@njit(cache=True)
|
|
2965
|
+
def piecewise_linear(val, xvals, yvals):
|
|
2966
|
+
"""
|
|
2967
|
+
Performs piecewise linear interpolation:
|
|
2968
|
+
Given a scalar 'val', and arrays xvals, yvals (each of length N),
|
|
2969
|
+
finds i s.t. xvals[i] <= val < xvals[i+1],
|
|
2970
|
+
then returns the linear interpolation between yvals[i], yvals[i+1].
|
|
2971
|
+
If val < xvals[0], returns yvals[0].
|
|
2972
|
+
If val > xvals[-1], returns yvals[-1].
|
|
2973
|
+
"""
|
|
2974
|
+
if val <= xvals[0]:
|
|
2975
|
+
return yvals[0]
|
|
2976
|
+
for i in range(len(xvals)-1):
|
|
2977
|
+
if val < xvals[i+1]:
|
|
2978
|
+
# Perform a linear interpolation in interval [xvals[i], xvals[i+1]]
|
|
2979
|
+
dx = xvals[i+1] - xvals[i]
|
|
2980
|
+
dy = yvals[i+1] - yvals[i]
|
|
2981
|
+
ratio = (val - xvals[i]) / dx
|
|
2982
|
+
return yvals[i] + ratio * dy
|
|
2983
|
+
return yvals[-1]
|
|
2984
|
+
|
|
2985
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
2986
|
+
def apply_curves_numba(image, xvals, yvals):
|
|
2987
|
+
"""
|
|
2988
|
+
Numba-accelerated routine to apply piecewise linear interpolation
|
|
2989
|
+
to each pixel in 'image'.
|
|
2990
|
+
- image can be (H,W) or (H,W,3).
|
|
2991
|
+
- xvals, yvals are the curve arrays in ascending order.
|
|
2992
|
+
Returns the adjusted image as float32.
|
|
2993
|
+
"""
|
|
2994
|
+
if image.ndim == 2:
|
|
2995
|
+
H, W = image.shape
|
|
2996
|
+
out = np.empty((H, W), dtype=np.float32)
|
|
2997
|
+
for y in prange(H):
|
|
2998
|
+
for x in range(W):
|
|
2999
|
+
val = image[y, x]
|
|
3000
|
+
out[y, x] = piecewise_linear(val, xvals, yvals)
|
|
3001
|
+
return out
|
|
3002
|
+
elif image.ndim == 3:
|
|
3003
|
+
H, W, C = image.shape
|
|
3004
|
+
out = np.empty((H, W, C), dtype=np.float32)
|
|
3005
|
+
for y in prange(H):
|
|
3006
|
+
for x in range(W):
|
|
3007
|
+
for c in range(C):
|
|
3008
|
+
val = image[y, x, c]
|
|
3009
|
+
out[y, x, c] = piecewise_linear(val, xvals, yvals)
|
|
3010
|
+
return out
|
|
3011
|
+
else:
|
|
3012
|
+
# Unexpected shape
|
|
3013
|
+
return image # Fallback
|
|
3014
|
+
|
|
3015
|
+
def fast_star_detect(image,
|
|
3016
|
+
blur_size=9,
|
|
3017
|
+
threshold_factor=0.7,
|
|
3018
|
+
min_area=1,
|
|
3019
|
+
max_area=5000):
|
|
3020
|
+
"""
|
|
3021
|
+
Finds star positions via contour detection + ellipse fitting.
|
|
3022
|
+
Returns Nx2 array of (x, y) star coordinates in the same coordinate system as 'image'.
|
|
3023
|
+
"""
|
|
3024
|
+
|
|
3025
|
+
# 1) Convert to grayscale if needed
|
|
3026
|
+
if image.ndim == 3:
|
|
3027
|
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
|
3028
|
+
|
|
3029
|
+
# 2) Normalize to 8-bit [0..255]
|
|
3030
|
+
img_min, img_max = image.min(), image.max()
|
|
3031
|
+
if img_max <= img_min:
|
|
3032
|
+
return np.empty((0,2), dtype=np.float32) # All pixels same => no stars
|
|
3033
|
+
image_8u = (255.0 * (image - img_min) / (img_max - img_min)).astype(np.uint8)
|
|
3034
|
+
|
|
3035
|
+
# 3) Blur => subtract => highlight stars
|
|
3036
|
+
blurred = cv2.GaussianBlur(image_8u, (blur_size, blur_size), 0)
|
|
3037
|
+
subtracted = cv2.absdiff(image_8u, blurred)
|
|
3038
|
+
|
|
3039
|
+
# 4) Otsu's threshold => scaled by threshold_factor
|
|
3040
|
+
otsu_thresh, _ = cv2.threshold(subtracted, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
|
|
3041
|
+
final_thresh_val = max(2, int(otsu_thresh * threshold_factor))
|
|
3042
|
+
|
|
3043
|
+
_, thresh = cv2.threshold(subtracted, final_thresh_val, 255, cv2.THRESH_BINARY)
|
|
3044
|
+
|
|
3045
|
+
# 5) (Optional) morphological opening to remove single-pixel noise
|
|
3046
|
+
kernel = np.ones((2, 2), np.uint8)
|
|
3047
|
+
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
|
|
3048
|
+
|
|
3049
|
+
# 6) Find contours
|
|
3050
|
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
3051
|
+
|
|
3052
|
+
# 7) Filter by area, fit ellipse => use ellipse center as star position
|
|
3053
|
+
star_positions = []
|
|
3054
|
+
for c in contours:
|
|
3055
|
+
area = cv2.contourArea(c)
|
|
3056
|
+
if area < min_area or area > max_area:
|
|
3057
|
+
continue
|
|
3058
|
+
if len(c) < 5:
|
|
3059
|
+
# Need >=5 points to fit an ellipse
|
|
3060
|
+
continue
|
|
3061
|
+
|
|
3062
|
+
ellipse = cv2.fitEllipse(c)
|
|
3063
|
+
(cx, cy), (major_axis, minor_axis), angle = ellipse
|
|
3064
|
+
# You could check eccentricity, etc. if you want to filter out weird shapes
|
|
3065
|
+
star_positions.append((cx, cy))
|
|
3066
|
+
|
|
3067
|
+
if len(star_positions) == 0:
|
|
3068
|
+
return np.empty((0,2), dtype=np.float32)
|
|
3069
|
+
else:
|
|
3070
|
+
return np.array(star_positions, dtype=np.float32)
|
|
3071
|
+
|
|
3072
|
+
|
|
3073
|
+
@njit(fastmath=True, cache=True)
|
|
3074
|
+
def gradient_descent_to_dim_spot_numba(gray_small, start_x, start_y, patch_size):
|
|
3075
|
+
"""
|
|
3076
|
+
Numba implementation of _gradient_descent_to_dim_spot.
|
|
3077
|
+
Walks to the local minimum (median-of-patch) around (start_x, start_y).
|
|
3078
|
+
gray_small: 2D float32 array
|
|
3079
|
+
"""
|
|
3080
|
+
H, W = gray_small.shape
|
|
3081
|
+
half = patch_size // 2
|
|
3082
|
+
|
|
3083
|
+
cx = int(min(max(start_x, 0), W - 1))
|
|
3084
|
+
cy = int(min(max(start_y, 0), H - 1))
|
|
3085
|
+
|
|
3086
|
+
# Helper to compute patch median manually or efficiently
|
|
3087
|
+
# Numba supports np.median on arrays, but slicing inside a loop can be costly.
|
|
3088
|
+
# However, for small patches (e.g. 15x15), it should be okay.
|
|
3089
|
+
|
|
3090
|
+
for _ in range(60):
|
|
3091
|
+
# Current value
|
|
3092
|
+
x0 = max(0, cx - half)
|
|
3093
|
+
x1 = min(W, cx + half + 1)
|
|
3094
|
+
y0 = max(0, cy - half)
|
|
3095
|
+
y1 = min(H, cy + half + 1)
|
|
3096
|
+
sub = gray_small[y0:y1, x0:x1].flatten()
|
|
3097
|
+
if sub.size == 0:
|
|
3098
|
+
cur_val = 1e9 # Should not happen
|
|
3099
|
+
else:
|
|
3100
|
+
cur_val = np.median(sub)
|
|
3101
|
+
|
|
3102
|
+
best_x, best_y = cx, cy
|
|
3103
|
+
best_val = cur_val
|
|
3104
|
+
|
|
3105
|
+
# 3x3 search
|
|
3106
|
+
changed = False
|
|
3107
|
+
|
|
3108
|
+
# Unroll for strict 3x3 neighborhood
|
|
3109
|
+
for dy in range(-1, 2):
|
|
3110
|
+
for dx in range(-1, 2):
|
|
3111
|
+
if dx == 0 and dy == 0:
|
|
3112
|
+
continue
|
|
3113
|
+
|
|
3114
|
+
nx = cx + dx
|
|
3115
|
+
ny = cy + dy
|
|
3116
|
+
|
|
3117
|
+
if nx < 0 or ny < 0 or nx >= W or ny >= H:
|
|
3118
|
+
continue
|
|
3119
|
+
|
|
3120
|
+
# Compute median for neighbor
|
|
3121
|
+
nx0 = max(0, nx - half)
|
|
3122
|
+
nx1 = min(W, nx + half + 1)
|
|
3123
|
+
ny0 = max(0, ny - half)
|
|
3124
|
+
ny1 = min(H, ny + half + 1)
|
|
3125
|
+
|
|
3126
|
+
# In Numba, median on a slice creates a copy.
|
|
3127
|
+
# For small patches this is acceptable given the huge speedup vs Python interpreter overhead.
|
|
3128
|
+
n_sub = gray_small[ny0:ny1, nx0:nx1].flatten()
|
|
3129
|
+
if n_sub.size == 0:
|
|
3130
|
+
val = 1e9
|
|
3131
|
+
else:
|
|
3132
|
+
val = np.median(n_sub)
|
|
3133
|
+
|
|
3134
|
+
if val < best_val:
|
|
3135
|
+
best_val = val
|
|
3136
|
+
best_x = nx
|
|
3137
|
+
best_y = ny
|
|
3138
|
+
changed = True
|
|
3139
|
+
|
|
3140
|
+
if not changed:
|
|
3141
|
+
break
|
|
3142
|
+
|
|
3143
|
+
cx, cy = best_x, best_y
|
|
3144
|
+
|
|
3145
|
+
return cx, cy
|