setiastrosuitepro 1.6.2.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of setiastrosuitepro might be problematic. Click here for more details.
- setiastro/__init__.py +2 -0
- setiastro/data/SASP_data.fits +0 -0
- setiastro/data/catalogs/List_of_Galaxies_with_Distances_Gly.csv +488 -0
- setiastro/data/catalogs/astrobin_filters.csv +890 -0
- setiastro/data/catalogs/astrobin_filters_page1_local.csv +51 -0
- setiastro/data/catalogs/cali2.csv +63 -0
- setiastro/data/catalogs/cali2color.csv +65 -0
- setiastro/data/catalogs/celestial_catalog - original.csv +16471 -0
- setiastro/data/catalogs/celestial_catalog.csv +24031 -0
- setiastro/data/catalogs/detected_stars.csv +24784 -0
- setiastro/data/catalogs/fits_header_data.csv +46 -0
- setiastro/data/catalogs/test.csv +8 -0
- setiastro/data/catalogs/updated_celestial_catalog.csv +16471 -0
- setiastro/images/Astro_Spikes.png +0 -0
- setiastro/images/Background_startup.jpg +0 -0
- setiastro/images/HRDiagram.png +0 -0
- setiastro/images/LExtract.png +0 -0
- setiastro/images/LInsert.png +0 -0
- setiastro/images/Oxygenation-atm-2.svg.png +0 -0
- setiastro/images/RGB080604.png +0 -0
- setiastro/images/abeicon.png +0 -0
- setiastro/images/aberration.png +0 -0
- setiastro/images/andromedatry.png +0 -0
- setiastro/images/andromedatry_satellited.png +0 -0
- setiastro/images/annotated.png +0 -0
- setiastro/images/aperture.png +0 -0
- setiastro/images/astrosuite.ico +0 -0
- setiastro/images/astrosuite.png +0 -0
- setiastro/images/astrosuitepro.icns +0 -0
- setiastro/images/astrosuitepro.ico +0 -0
- setiastro/images/astrosuitepro.png +0 -0
- setiastro/images/background.png +0 -0
- setiastro/images/background2.png +0 -0
- setiastro/images/benchmark.png +0 -0
- setiastro/images/big_moon_stabilizer_timeline.png +0 -0
- setiastro/images/big_moon_stabilizer_timeline_clean.png +0 -0
- setiastro/images/blaster.png +0 -0
- setiastro/images/blink.png +0 -0
- setiastro/images/clahe.png +0 -0
- setiastro/images/collage.png +0 -0
- setiastro/images/colorwheel.png +0 -0
- setiastro/images/contsub.png +0 -0
- setiastro/images/convo.png +0 -0
- setiastro/images/copyslot.png +0 -0
- setiastro/images/cosmic.png +0 -0
- setiastro/images/cosmicsat.png +0 -0
- setiastro/images/crop1.png +0 -0
- setiastro/images/cropicon.png +0 -0
- setiastro/images/curves.png +0 -0
- setiastro/images/cvs.png +0 -0
- setiastro/images/debayer.png +0 -0
- setiastro/images/denoise_cnn_custom.png +0 -0
- setiastro/images/denoise_cnn_graph.png +0 -0
- setiastro/images/disk.png +0 -0
- setiastro/images/dse.png +0 -0
- setiastro/images/exoicon.png +0 -0
- setiastro/images/eye.png +0 -0
- setiastro/images/fliphorizontal.png +0 -0
- setiastro/images/flipvertical.png +0 -0
- setiastro/images/font.png +0 -0
- setiastro/images/freqsep.png +0 -0
- setiastro/images/functionbundle.png +0 -0
- setiastro/images/graxpert.png +0 -0
- setiastro/images/green.png +0 -0
- setiastro/images/gridicon.png +0 -0
- setiastro/images/halo.png +0 -0
- setiastro/images/hdr.png +0 -0
- setiastro/images/histogram.png +0 -0
- setiastro/images/hubble.png +0 -0
- setiastro/images/imagecombine.png +0 -0
- setiastro/images/invert.png +0 -0
- setiastro/images/isophote.png +0 -0
- setiastro/images/isophote_demo_figure.png +0 -0
- setiastro/images/isophote_demo_image.png +0 -0
- setiastro/images/isophote_demo_model.png +0 -0
- setiastro/images/isophote_demo_residual.png +0 -0
- setiastro/images/jwstpupil.png +0 -0
- setiastro/images/linearfit.png +0 -0
- setiastro/images/livestacking.png +0 -0
- setiastro/images/mask.png +0 -0
- setiastro/images/maskapply.png +0 -0
- setiastro/images/maskcreate.png +0 -0
- setiastro/images/maskremove.png +0 -0
- setiastro/images/morpho.png +0 -0
- setiastro/images/mosaic.png +0 -0
- setiastro/images/multiscale_decomp.png +0 -0
- setiastro/images/nbtorgb.png +0 -0
- setiastro/images/neutral.png +0 -0
- setiastro/images/nuke.png +0 -0
- setiastro/images/openfile.png +0 -0
- setiastro/images/pedestal.png +0 -0
- setiastro/images/pen.png +0 -0
- setiastro/images/pixelmath.png +0 -0
- setiastro/images/platesolve.png +0 -0
- setiastro/images/ppp.png +0 -0
- setiastro/images/pro.png +0 -0
- setiastro/images/project.png +0 -0
- setiastro/images/psf.png +0 -0
- setiastro/images/redo.png +0 -0
- setiastro/images/redoicon.png +0 -0
- setiastro/images/rescale.png +0 -0
- setiastro/images/rgbalign.png +0 -0
- setiastro/images/rgbcombo.png +0 -0
- setiastro/images/rgbextract.png +0 -0
- setiastro/images/rotate180.png +0 -0
- setiastro/images/rotateclockwise.png +0 -0
- setiastro/images/rotatecounterclockwise.png +0 -0
- setiastro/images/satellite.png +0 -0
- setiastro/images/script.png +0 -0
- setiastro/images/selectivecolor.png +0 -0
- setiastro/images/simbad.png +0 -0
- setiastro/images/slot0.png +0 -0
- setiastro/images/slot1.png +0 -0
- setiastro/images/slot2.png +0 -0
- setiastro/images/slot3.png +0 -0
- setiastro/images/slot4.png +0 -0
- setiastro/images/slot5.png +0 -0
- setiastro/images/slot6.png +0 -0
- setiastro/images/slot7.png +0 -0
- setiastro/images/slot8.png +0 -0
- setiastro/images/slot9.png +0 -0
- setiastro/images/spcc.png +0 -0
- setiastro/images/spin_precession_vs_lunar_distance.png +0 -0
- setiastro/images/spinner.gif +0 -0
- setiastro/images/stacking.png +0 -0
- setiastro/images/staradd.png +0 -0
- setiastro/images/staralign.png +0 -0
- setiastro/images/starnet.png +0 -0
- setiastro/images/starregistration.png +0 -0
- setiastro/images/starspike.png +0 -0
- setiastro/images/starstretch.png +0 -0
- setiastro/images/statstretch.png +0 -0
- setiastro/images/supernova.png +0 -0
- setiastro/images/uhs.png +0 -0
- setiastro/images/undoicon.png +0 -0
- setiastro/images/upscale.png +0 -0
- setiastro/images/viewbundle.png +0 -0
- setiastro/images/whitebalance.png +0 -0
- setiastro/images/wimi_icon_256x256.png +0 -0
- setiastro/images/wimilogo.png +0 -0
- setiastro/images/wims.png +0 -0
- setiastro/images/wrench_icon.png +0 -0
- setiastro/images/xisfliberator.png +0 -0
- setiastro/qml/ResourceMonitor.qml +126 -0
- setiastro/saspro/__init__.py +20 -0
- setiastro/saspro/__main__.py +945 -0
- setiastro/saspro/_generated/__init__.py +7 -0
- setiastro/saspro/_generated/build_info.py +3 -0
- setiastro/saspro/abe.py +1346 -0
- setiastro/saspro/abe_preset.py +196 -0
- setiastro/saspro/aberration_ai.py +694 -0
- setiastro/saspro/aberration_ai_preset.py +224 -0
- setiastro/saspro/accel_installer.py +218 -0
- setiastro/saspro/accel_workers.py +30 -0
- setiastro/saspro/add_stars.py +624 -0
- setiastro/saspro/astrobin_exporter.py +1010 -0
- setiastro/saspro/astrospike.py +153 -0
- setiastro/saspro/astrospike_python.py +1841 -0
- setiastro/saspro/autostretch.py +198 -0
- setiastro/saspro/backgroundneutral.py +602 -0
- setiastro/saspro/batch_convert.py +328 -0
- setiastro/saspro/batch_renamer.py +522 -0
- setiastro/saspro/blemish_blaster.py +491 -0
- setiastro/saspro/blink_comparator_pro.py +2926 -0
- setiastro/saspro/bundles.py +61 -0
- setiastro/saspro/bundles_dock.py +114 -0
- setiastro/saspro/cheat_sheet.py +213 -0
- setiastro/saspro/clahe.py +368 -0
- setiastro/saspro/comet_stacking.py +1442 -0
- setiastro/saspro/common_tr.py +107 -0
- setiastro/saspro/config.py +38 -0
- setiastro/saspro/config_bootstrap.py +40 -0
- setiastro/saspro/config_manager.py +316 -0
- setiastro/saspro/continuum_subtract.py +1617 -0
- setiastro/saspro/convo.py +1400 -0
- setiastro/saspro/convo_preset.py +414 -0
- setiastro/saspro/copyastro.py +190 -0
- setiastro/saspro/cosmicclarity.py +1589 -0
- setiastro/saspro/cosmicclarity_preset.py +407 -0
- setiastro/saspro/crop_dialog_pro.py +973 -0
- setiastro/saspro/crop_preset.py +189 -0
- setiastro/saspro/curve_editor_pro.py +2562 -0
- setiastro/saspro/curves_preset.py +375 -0
- setiastro/saspro/debayer.py +673 -0
- setiastro/saspro/debug_utils.py +29 -0
- setiastro/saspro/dnd_mime.py +35 -0
- setiastro/saspro/doc_manager.py +2664 -0
- setiastro/saspro/exoplanet_detector.py +2166 -0
- setiastro/saspro/file_utils.py +284 -0
- setiastro/saspro/fitsmodifier.py +748 -0
- setiastro/saspro/fix_bom.py +32 -0
- setiastro/saspro/free_torch_memory.py +48 -0
- setiastro/saspro/frequency_separation.py +1349 -0
- setiastro/saspro/function_bundle.py +1596 -0
- setiastro/saspro/generate_translations.py +3092 -0
- setiastro/saspro/ghs_dialog_pro.py +663 -0
- setiastro/saspro/ghs_preset.py +284 -0
- setiastro/saspro/graxpert.py +637 -0
- setiastro/saspro/graxpert_preset.py +287 -0
- setiastro/saspro/gui/__init__.py +0 -0
- setiastro/saspro/gui/main_window.py +8810 -0
- setiastro/saspro/gui/mixins/__init__.py +33 -0
- setiastro/saspro/gui/mixins/dock_mixin.py +362 -0
- setiastro/saspro/gui/mixins/file_mixin.py +450 -0
- setiastro/saspro/gui/mixins/geometry_mixin.py +403 -0
- setiastro/saspro/gui/mixins/header_mixin.py +441 -0
- setiastro/saspro/gui/mixins/mask_mixin.py +421 -0
- setiastro/saspro/gui/mixins/menu_mixin.py +389 -0
- setiastro/saspro/gui/mixins/theme_mixin.py +367 -0
- setiastro/saspro/gui/mixins/toolbar_mixin.py +1457 -0
- setiastro/saspro/gui/mixins/update_mixin.py +309 -0
- setiastro/saspro/gui/mixins/view_mixin.py +435 -0
- setiastro/saspro/gui/statistics_dialog.py +47 -0
- setiastro/saspro/halobgon.py +488 -0
- setiastro/saspro/header_viewer.py +448 -0
- setiastro/saspro/headless_utils.py +88 -0
- setiastro/saspro/histogram.py +756 -0
- setiastro/saspro/history_explorer.py +941 -0
- setiastro/saspro/i18n.py +168 -0
- setiastro/saspro/image_combine.py +417 -0
- setiastro/saspro/image_peeker_pro.py +1604 -0
- setiastro/saspro/imageops/__init__.py +37 -0
- setiastro/saspro/imageops/mdi_snap.py +292 -0
- setiastro/saspro/imageops/scnr.py +36 -0
- setiastro/saspro/imageops/starbasedwhitebalance.py +210 -0
- setiastro/saspro/imageops/stretch.py +236 -0
- setiastro/saspro/isophote.py +1182 -0
- setiastro/saspro/layers.py +208 -0
- setiastro/saspro/layers_dock.py +714 -0
- setiastro/saspro/lazy_imports.py +193 -0
- setiastro/saspro/legacy/__init__.py +2 -0
- setiastro/saspro/legacy/image_manager.py +2226 -0
- setiastro/saspro/legacy/numba_utils.py +3676 -0
- setiastro/saspro/legacy/xisf.py +1071 -0
- setiastro/saspro/linear_fit.py +537 -0
- setiastro/saspro/live_stacking.py +1841 -0
- setiastro/saspro/log_bus.py +5 -0
- setiastro/saspro/logging_config.py +460 -0
- setiastro/saspro/luminancerecombine.py +309 -0
- setiastro/saspro/main_helpers.py +201 -0
- setiastro/saspro/mask_creation.py +931 -0
- setiastro/saspro/masks_core.py +56 -0
- setiastro/saspro/mdi_widgets.py +353 -0
- setiastro/saspro/memory_utils.py +666 -0
- setiastro/saspro/metadata_patcher.py +75 -0
- setiastro/saspro/mfdeconv.py +3831 -0
- setiastro/saspro/mfdeconv_earlystop.py +71 -0
- setiastro/saspro/mfdeconvcudnn.py +3263 -0
- setiastro/saspro/mfdeconvsport.py +2382 -0
- setiastro/saspro/minorbodycatalog.py +567 -0
- setiastro/saspro/morphology.py +407 -0
- setiastro/saspro/multiscale_decomp.py +1293 -0
- setiastro/saspro/nbtorgb_stars.py +541 -0
- setiastro/saspro/numba_utils.py +3145 -0
- setiastro/saspro/numba_warmup.py +141 -0
- setiastro/saspro/ops/__init__.py +9 -0
- setiastro/saspro/ops/command_help_dialog.py +623 -0
- setiastro/saspro/ops/command_runner.py +217 -0
- setiastro/saspro/ops/commands.py +1594 -0
- setiastro/saspro/ops/script_editor.py +1102 -0
- setiastro/saspro/ops/scripts.py +1473 -0
- setiastro/saspro/ops/settings.py +637 -0
- setiastro/saspro/parallel_utils.py +554 -0
- setiastro/saspro/pedestal.py +121 -0
- setiastro/saspro/perfect_palette_picker.py +1071 -0
- setiastro/saspro/pipeline.py +110 -0
- setiastro/saspro/pixelmath.py +1604 -0
- setiastro/saspro/plate_solver.py +2445 -0
- setiastro/saspro/project_io.py +797 -0
- setiastro/saspro/psf_utils.py +136 -0
- setiastro/saspro/psf_viewer.py +549 -0
- setiastro/saspro/pyi_rthook_astroquery.py +95 -0
- setiastro/saspro/remove_green.py +331 -0
- setiastro/saspro/remove_stars.py +1599 -0
- setiastro/saspro/remove_stars_preset.py +404 -0
- setiastro/saspro/resources.py +501 -0
- setiastro/saspro/rgb_combination.py +208 -0
- setiastro/saspro/rgb_extract.py +19 -0
- setiastro/saspro/rgbalign.py +723 -0
- setiastro/saspro/runtime_imports.py +7 -0
- setiastro/saspro/runtime_torch.py +754 -0
- setiastro/saspro/save_options.py +73 -0
- setiastro/saspro/selective_color.py +1552 -0
- setiastro/saspro/sfcc.py +1472 -0
- setiastro/saspro/shortcuts.py +3043 -0
- setiastro/saspro/signature_insert.py +1102 -0
- setiastro/saspro/stacking_suite.py +18470 -0
- setiastro/saspro/star_alignment.py +7435 -0
- setiastro/saspro/star_alignment_preset.py +329 -0
- setiastro/saspro/star_metrics.py +49 -0
- setiastro/saspro/star_spikes.py +765 -0
- setiastro/saspro/star_stretch.py +507 -0
- setiastro/saspro/stat_stretch.py +538 -0
- setiastro/saspro/status_log_dock.py +78 -0
- setiastro/saspro/subwindow.py +3328 -0
- setiastro/saspro/supernovaasteroidhunter.py +1719 -0
- setiastro/saspro/swap_manager.py +99 -0
- setiastro/saspro/torch_backend.py +89 -0
- setiastro/saspro/torch_rejection.py +434 -0
- setiastro/saspro/translations/all_source_strings.json +3654 -0
- setiastro/saspro/translations/ar_translations.py +3865 -0
- setiastro/saspro/translations/de_translations.py +3749 -0
- setiastro/saspro/translations/es_translations.py +3939 -0
- setiastro/saspro/translations/fr_translations.py +3858 -0
- setiastro/saspro/translations/hi_translations.py +3571 -0
- setiastro/saspro/translations/integrate_translations.py +270 -0
- setiastro/saspro/translations/it_translations.py +3678 -0
- setiastro/saspro/translations/ja_translations.py +3601 -0
- setiastro/saspro/translations/pt_translations.py +3869 -0
- setiastro/saspro/translations/ru_translations.py +2848 -0
- setiastro/saspro/translations/saspro_ar.qm +0 -0
- setiastro/saspro/translations/saspro_ar.ts +255 -0
- setiastro/saspro/translations/saspro_de.qm +0 -0
- setiastro/saspro/translations/saspro_de.ts +253 -0
- setiastro/saspro/translations/saspro_es.qm +0 -0
- setiastro/saspro/translations/saspro_es.ts +12520 -0
- setiastro/saspro/translations/saspro_fr.qm +0 -0
- setiastro/saspro/translations/saspro_fr.ts +12514 -0
- setiastro/saspro/translations/saspro_hi.qm +0 -0
- setiastro/saspro/translations/saspro_hi.ts +257 -0
- setiastro/saspro/translations/saspro_it.qm +0 -0
- setiastro/saspro/translations/saspro_it.ts +12520 -0
- setiastro/saspro/translations/saspro_ja.qm +0 -0
- setiastro/saspro/translations/saspro_ja.ts +257 -0
- setiastro/saspro/translations/saspro_pt.qm +0 -0
- setiastro/saspro/translations/saspro_pt.ts +257 -0
- setiastro/saspro/translations/saspro_ru.qm +0 -0
- setiastro/saspro/translations/saspro_ru.ts +237 -0
- setiastro/saspro/translations/saspro_sw.qm +0 -0
- setiastro/saspro/translations/saspro_sw.ts +257 -0
- setiastro/saspro/translations/saspro_uk.qm +0 -0
- setiastro/saspro/translations/saspro_uk.ts +10771 -0
- setiastro/saspro/translations/saspro_zh.qm +0 -0
- setiastro/saspro/translations/saspro_zh.ts +12520 -0
- setiastro/saspro/translations/sw_translations.py +3671 -0
- setiastro/saspro/translations/uk_translations.py +3700 -0
- setiastro/saspro/translations/zh_translations.py +3675 -0
- setiastro/saspro/versioning.py +77 -0
- setiastro/saspro/view_bundle.py +1558 -0
- setiastro/saspro/wavescale_hdr.py +645 -0
- setiastro/saspro/wavescale_hdr_preset.py +101 -0
- setiastro/saspro/wavescalede.py +680 -0
- setiastro/saspro/wavescalede_preset.py +230 -0
- setiastro/saspro/wcs_update.py +374 -0
- setiastro/saspro/whitebalance.py +492 -0
- setiastro/saspro/widgets/__init__.py +48 -0
- setiastro/saspro/widgets/common_utilities.py +306 -0
- setiastro/saspro/widgets/graphics_views.py +122 -0
- setiastro/saspro/widgets/image_utils.py +518 -0
- setiastro/saspro/widgets/minigame/game.js +986 -0
- setiastro/saspro/widgets/minigame/index.html +53 -0
- setiastro/saspro/widgets/minigame/style.css +241 -0
- setiastro/saspro/widgets/preview_dialogs.py +280 -0
- setiastro/saspro/widgets/resource_monitor.py +237 -0
- setiastro/saspro/widgets/spinboxes.py +275 -0
- setiastro/saspro/widgets/themed_buttons.py +13 -0
- setiastro/saspro/widgets/wavelet_utils.py +331 -0
- setiastro/saspro/wimi.py +7996 -0
- setiastro/saspro/wims.py +578 -0
- setiastro/saspro/window_shelf.py +185 -0
- setiastro/saspro/xisf.py +1123 -0
- setiastrosuitepro-1.6.2.post1.dist-info/METADATA +278 -0
- setiastrosuitepro-1.6.2.post1.dist-info/RECORD +367 -0
- setiastrosuitepro-1.6.2.post1.dist-info/WHEEL +4 -0
- setiastrosuitepro-1.6.2.post1.dist-info/entry_points.txt +6 -0
- setiastrosuitepro-1.6.2.post1.dist-info/licenses/LICENSE +674 -0
- setiastrosuitepro-1.6.2.post1.dist-info/licenses/license.txt +2580 -0
|
@@ -0,0 +1,3676 @@
|
|
|
1
|
+
#legacy.numba_utils.py
|
|
2
|
+
import numpy as np
|
|
3
|
+
from numba import njit, prange
|
|
4
|
+
from numba.typed import List
|
|
5
|
+
import cv2
|
|
6
|
+
import math
|
|
7
|
+
|
|
8
|
+
@njit(parallel=True, fastmath=True)
|
|
9
|
+
def blend_add_numba(A, B, alpha):
|
|
10
|
+
H, W, C = A.shape
|
|
11
|
+
out = np.empty_like(A)
|
|
12
|
+
for y in prange(H):
|
|
13
|
+
for x in range(W):
|
|
14
|
+
for c in range(C):
|
|
15
|
+
v = A[y,x,c] + B[y,x,c] * alpha
|
|
16
|
+
# clamp 0..1
|
|
17
|
+
if v < 0.0: v = 0.0
|
|
18
|
+
elif v > 1.0: v = 1.0
|
|
19
|
+
out[y,x,c] = v
|
|
20
|
+
return out
|
|
21
|
+
|
|
22
|
+
@njit(parallel=True, fastmath=True)
|
|
23
|
+
def blend_subtract_numba(A, B, alpha):
|
|
24
|
+
H, W, C = A.shape
|
|
25
|
+
out = np.empty_like(A)
|
|
26
|
+
for y in prange(H):
|
|
27
|
+
for x in range(W):
|
|
28
|
+
for c in range(C):
|
|
29
|
+
v = A[y,x,c] - B[y,x,c] * alpha
|
|
30
|
+
if v < 0.0: v = 0.0
|
|
31
|
+
elif v > 1.0: v = 1.0
|
|
32
|
+
out[y,x,c] = v
|
|
33
|
+
return out
|
|
34
|
+
|
|
35
|
+
@njit(parallel=True, fastmath=True)
|
|
36
|
+
def blend_multiply_numba(A, B, alpha):
|
|
37
|
+
H, W, C = A.shape
|
|
38
|
+
out = np.empty_like(A)
|
|
39
|
+
for y in prange(H):
|
|
40
|
+
for x in range(W):
|
|
41
|
+
for c in range(C):
|
|
42
|
+
v = (A[y,x,c] * (1-alpha)) + (A[y,x,c] * B[y,x,c] * alpha)
|
|
43
|
+
if v < 0.0: v = 0.0
|
|
44
|
+
elif v > 1.0: v = 1.0
|
|
45
|
+
out[y,x,c] = v
|
|
46
|
+
return out
|
|
47
|
+
|
|
48
|
+
@njit(parallel=True, fastmath=True)
|
|
49
|
+
def blend_divide_numba(A, B, alpha):
|
|
50
|
+
H, W, C = A.shape
|
|
51
|
+
out = np.empty_like(A)
|
|
52
|
+
eps = 1e-6
|
|
53
|
+
for y in prange(H):
|
|
54
|
+
for x in range(W):
|
|
55
|
+
for c in range(C):
|
|
56
|
+
# avoid division by zero
|
|
57
|
+
b = A[y,x,c] / (B[y,x,c] + eps)
|
|
58
|
+
# clamp f(A,B)
|
|
59
|
+
if b < 0.0: b = 0.0
|
|
60
|
+
elif b > 1.0: b = 1.0
|
|
61
|
+
# mix with original
|
|
62
|
+
v = A[y,x,c] * (1.0 - alpha) + b * alpha
|
|
63
|
+
# clamp final
|
|
64
|
+
if v < 0.0: v = 0.0
|
|
65
|
+
elif v > 1.0: v = 1.0
|
|
66
|
+
out[y,x,c] = v
|
|
67
|
+
return out
|
|
68
|
+
|
|
69
|
+
@njit(parallel=True, fastmath=True)
|
|
70
|
+
def blend_screen_numba(A, B, alpha):
|
|
71
|
+
H, W, C = A.shape
|
|
72
|
+
out = np.empty_like(A)
|
|
73
|
+
for y in prange(H):
|
|
74
|
+
for x in range(W):
|
|
75
|
+
for c in range(C):
|
|
76
|
+
# Screen: 1 - (1-A)*(1-B)
|
|
77
|
+
b = 1.0 - (1.0 - A[y,x,c]) * (1.0 - B[y,x,c])
|
|
78
|
+
if b < 0.0: b = 0.0
|
|
79
|
+
elif b > 1.0: b = 1.0
|
|
80
|
+
v = A[y,x,c] * (1.0 - alpha) + b * alpha
|
|
81
|
+
if v < 0.0: v = 0.0
|
|
82
|
+
elif v > 1.0: v = 1.0
|
|
83
|
+
out[y,x,c] = v
|
|
84
|
+
return out
|
|
85
|
+
|
|
86
|
+
@njit(parallel=True, fastmath=True)
|
|
87
|
+
def blend_overlay_numba(A, B, alpha):
|
|
88
|
+
H, W, C = A.shape
|
|
89
|
+
out = np.empty_like(A)
|
|
90
|
+
for y in prange(H):
|
|
91
|
+
for x in range(W):
|
|
92
|
+
for c in range(C):
|
|
93
|
+
a = A[y,x,c]
|
|
94
|
+
b_in = B[y,x,c]
|
|
95
|
+
# Overlay: if a < .5: 2*a*b, else: 1 - 2*(1-a)*(1-b)
|
|
96
|
+
if a <= 0.5:
|
|
97
|
+
b = 2.0 * a * b_in
|
|
98
|
+
else:
|
|
99
|
+
b = 1.0 - 2.0 * (1.0 - a) * (1.0 - b_in)
|
|
100
|
+
if b < 0.0: b = 0.0
|
|
101
|
+
elif b > 1.0: b = 1.0
|
|
102
|
+
v = a * (1.0 - alpha) + b * alpha
|
|
103
|
+
if v < 0.0: v = 0.0
|
|
104
|
+
elif v > 1.0: v = 1.0
|
|
105
|
+
out[y,x,c] = v
|
|
106
|
+
return out
|
|
107
|
+
|
|
108
|
+
@njit(parallel=True, fastmath=True)
|
|
109
|
+
def blend_difference_numba(A, B, alpha):
|
|
110
|
+
H, W, C = A.shape
|
|
111
|
+
out = np.empty_like(A)
|
|
112
|
+
for y in prange(H):
|
|
113
|
+
for x in range(W):
|
|
114
|
+
for c in range(C):
|
|
115
|
+
# Difference: |A - B|
|
|
116
|
+
b = A[y,x,c] - B[y,x,c]
|
|
117
|
+
if b < 0.0: b = -b
|
|
118
|
+
# clamp f(A,B) is redundant since abs() already ≥0; we cap above 1
|
|
119
|
+
if b > 1.0: b = 1.0
|
|
120
|
+
v = A[y,x,c] * (1.0 - alpha) + b * alpha
|
|
121
|
+
if v < 0.0: v = 0.0
|
|
122
|
+
elif v > 1.0: v = 1.0
|
|
123
|
+
out[y,x,c] = v
|
|
124
|
+
return out
|
|
125
|
+
|
|
126
|
+
@njit(parallel=True, fastmath=True)
|
|
127
|
+
def rescale_image_numba(image, factor):
|
|
128
|
+
"""
|
|
129
|
+
Custom rescale function using bilinear interpolation optimized with numba.
|
|
130
|
+
Supports both mono (2D) and color (3D) images.
|
|
131
|
+
"""
|
|
132
|
+
if image.ndim == 2:
|
|
133
|
+
height, width = image.shape
|
|
134
|
+
new_width = int(width * factor)
|
|
135
|
+
new_height = int(height * factor)
|
|
136
|
+
output = np.zeros((new_height, new_width), dtype=np.float32)
|
|
137
|
+
for y in prange(new_height):
|
|
138
|
+
for x in prange(new_width):
|
|
139
|
+
src_x = x / factor
|
|
140
|
+
src_y = y / factor
|
|
141
|
+
x0, y0 = int(src_x), int(src_y)
|
|
142
|
+
x1 = x0 + 1 if x0 + 1 < width else width - 1
|
|
143
|
+
y1 = y0 + 1 if y0 + 1 < height else height - 1
|
|
144
|
+
dx = src_x - x0
|
|
145
|
+
dy = src_y - y0
|
|
146
|
+
output[y, x] = (image[y0, x0] * (1 - dx) * (1 - dy) +
|
|
147
|
+
image[y0, x1] * dx * (1 - dy) +
|
|
148
|
+
image[y1, x0] * (1 - dx) * dy +
|
|
149
|
+
image[y1, x1] * dx * dy)
|
|
150
|
+
return output
|
|
151
|
+
else:
|
|
152
|
+
height, width, channels = image.shape
|
|
153
|
+
new_width = int(width * factor)
|
|
154
|
+
new_height = int(height * factor)
|
|
155
|
+
output = np.zeros((new_height, new_width, channels), dtype=np.float32)
|
|
156
|
+
for y in prange(new_height):
|
|
157
|
+
for x in prange(new_width):
|
|
158
|
+
src_x = x / factor
|
|
159
|
+
src_y = y / factor
|
|
160
|
+
x0, y0 = int(src_x), int(src_y)
|
|
161
|
+
x1 = x0 + 1 if x0 + 1 < width else width - 1
|
|
162
|
+
y1 = y0 + 1 if y0 + 1 < height else height - 1
|
|
163
|
+
dx = src_x - x0
|
|
164
|
+
dy = src_y - y0
|
|
165
|
+
for c in range(channels):
|
|
166
|
+
output[y, x, c] = (image[y0, x0, c] * (1 - dx) * (1 - dy) +
|
|
167
|
+
image[y0, x1, c] * dx * (1 - dy) +
|
|
168
|
+
image[y1, x0, c] * (1 - dx) * dy +
|
|
169
|
+
image[y1, x1, c] * dx * dy)
|
|
170
|
+
return output
|
|
171
|
+
|
|
172
|
+
@njit(parallel=True, fastmath=True)
|
|
173
|
+
def bin2x2_numba(image):
|
|
174
|
+
"""
|
|
175
|
+
Downsample the image by 2×2 via simple averaging (“integer binning”).
|
|
176
|
+
Works on 2D (H×W) or 3D (H×W×C) arrays. If dimensions aren’t even,
|
|
177
|
+
the last row/column is dropped.
|
|
178
|
+
"""
|
|
179
|
+
h, w = image.shape[:2]
|
|
180
|
+
h2 = h // 2
|
|
181
|
+
w2 = w // 2
|
|
182
|
+
|
|
183
|
+
# allocate output
|
|
184
|
+
if image.ndim == 2:
|
|
185
|
+
out = np.empty((h2, w2), dtype=np.float32)
|
|
186
|
+
for i in prange(h2):
|
|
187
|
+
for j in prange(w2):
|
|
188
|
+
# average 2×2 block
|
|
189
|
+
s = image[2*i , 2*j ] \
|
|
190
|
+
+ image[2*i+1, 2*j ] \
|
|
191
|
+
+ image[2*i , 2*j+1] \
|
|
192
|
+
+ image[2*i+1, 2*j+1]
|
|
193
|
+
out[i, j] = s * 0.25
|
|
194
|
+
else:
|
|
195
|
+
c = image.shape[2]
|
|
196
|
+
out = np.empty((h2, w2, c), dtype=np.float32)
|
|
197
|
+
for i in prange(h2):
|
|
198
|
+
for j in prange(w2):
|
|
199
|
+
for k in range(c):
|
|
200
|
+
s = image[2*i , 2*j , k] \
|
|
201
|
+
+ image[2*i+1, 2*j , k] \
|
|
202
|
+
+ image[2*i , 2*j+1, k] \
|
|
203
|
+
+ image[2*i+1, 2*j+1, k]
|
|
204
|
+
out[i, j, k] = s * 0.25
|
|
205
|
+
|
|
206
|
+
return out
|
|
207
|
+
|
|
208
|
+
@njit(parallel=True, fastmath=True)
|
|
209
|
+
def flip_horizontal_numba(image):
|
|
210
|
+
"""
|
|
211
|
+
Flips an image horizontally using Numba JIT.
|
|
212
|
+
Works with both mono (2D) and color (3D) images.
|
|
213
|
+
"""
|
|
214
|
+
if image.ndim == 2:
|
|
215
|
+
height, width = image.shape
|
|
216
|
+
output = np.empty((height, width), dtype=image.dtype)
|
|
217
|
+
for y in prange(height):
|
|
218
|
+
for x in prange(width):
|
|
219
|
+
output[y, x] = image[y, width - x - 1]
|
|
220
|
+
return output
|
|
221
|
+
else:
|
|
222
|
+
height, width, channels = image.shape
|
|
223
|
+
output = np.empty((height, width, channels), dtype=image.dtype)
|
|
224
|
+
for y in prange(height):
|
|
225
|
+
for x in prange(width):
|
|
226
|
+
for c in range(channels):
|
|
227
|
+
output[y, x, c] = image[y, width - x - 1, c]
|
|
228
|
+
return output
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
@njit(parallel=True, fastmath=True)
|
|
232
|
+
def flip_vertical_numba(image):
|
|
233
|
+
"""
|
|
234
|
+
Flips an image vertically using Numba JIT.
|
|
235
|
+
Works with both mono (2D) and color (3D) images.
|
|
236
|
+
"""
|
|
237
|
+
if image.ndim == 2:
|
|
238
|
+
height, width = image.shape
|
|
239
|
+
output = np.empty((height, width), dtype=image.dtype)
|
|
240
|
+
for y in prange(height):
|
|
241
|
+
for x in prange(width):
|
|
242
|
+
output[y, x] = image[height - y - 1, x]
|
|
243
|
+
return output
|
|
244
|
+
else:
|
|
245
|
+
height, width, channels = image.shape
|
|
246
|
+
output = np.empty((height, width, channels), dtype=image.dtype)
|
|
247
|
+
for y in prange(height):
|
|
248
|
+
for x in prange(width):
|
|
249
|
+
for c in range(channels):
|
|
250
|
+
output[y, x, c] = image[height - y - 1, x, c]
|
|
251
|
+
return output
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
@njit(parallel=True, fastmath=True)
|
|
255
|
+
def rotate_90_clockwise_numba(image):
|
|
256
|
+
"""
|
|
257
|
+
Rotates the image 90 degrees clockwise.
|
|
258
|
+
Works with both mono (2D) and color (3D) images.
|
|
259
|
+
"""
|
|
260
|
+
if image.ndim == 2:
|
|
261
|
+
height, width = image.shape
|
|
262
|
+
output = np.empty((width, height), dtype=image.dtype)
|
|
263
|
+
for y in prange(height):
|
|
264
|
+
for x in prange(width):
|
|
265
|
+
output[x, height - 1 - y] = image[y, x]
|
|
266
|
+
return output
|
|
267
|
+
else:
|
|
268
|
+
height, width, channels = image.shape
|
|
269
|
+
output = np.empty((width, height, channels), dtype=image.dtype)
|
|
270
|
+
for y in prange(height):
|
|
271
|
+
for x in prange(width):
|
|
272
|
+
for c in range(channels):
|
|
273
|
+
output[x, height - 1 - y, c] = image[y, x, c]
|
|
274
|
+
return output
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
@njit(parallel=True, fastmath=True)
|
|
278
|
+
def rotate_90_counterclockwise_numba(image):
|
|
279
|
+
"""
|
|
280
|
+
Rotates the image 90 degrees counterclockwise.
|
|
281
|
+
Works with both mono (2D) and color (3D) images.
|
|
282
|
+
"""
|
|
283
|
+
if image.ndim == 2:
|
|
284
|
+
height, width = image.shape
|
|
285
|
+
output = np.empty((width, height), dtype=image.dtype)
|
|
286
|
+
for y in prange(height):
|
|
287
|
+
for x in prange(width):
|
|
288
|
+
output[width - 1 - x, y] = image[y, x]
|
|
289
|
+
return output
|
|
290
|
+
else:
|
|
291
|
+
height, width, channels = image.shape
|
|
292
|
+
output = np.empty((width, height, channels), dtype=image.dtype)
|
|
293
|
+
for y in prange(height):
|
|
294
|
+
for x in prange(width):
|
|
295
|
+
for c in range(channels):
|
|
296
|
+
output[width - 1 - x, y, c] = image[y, x, c]
|
|
297
|
+
return output
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
@njit(parallel=True, fastmath=True)
|
|
301
|
+
def invert_image_numba(image):
|
|
302
|
+
"""
|
|
303
|
+
Inverts an image (1 - pixel value) using Numba JIT.
|
|
304
|
+
Works with both mono (2D) and color (3D) images.
|
|
305
|
+
"""
|
|
306
|
+
if image.ndim == 2:
|
|
307
|
+
height, width = image.shape
|
|
308
|
+
output = np.empty((height, width), dtype=image.dtype)
|
|
309
|
+
for y in prange(height):
|
|
310
|
+
for x in prange(width):
|
|
311
|
+
output[y, x] = 1.0 - image[y, x]
|
|
312
|
+
return output
|
|
313
|
+
else:
|
|
314
|
+
height, width, channels = image.shape
|
|
315
|
+
output = np.empty((height, width, channels), dtype=image.dtype)
|
|
316
|
+
for y in prange(height):
|
|
317
|
+
for x in prange(width):
|
|
318
|
+
for c in range(channels):
|
|
319
|
+
output[y, x, c] = 1.0 - image[y, x, c]
|
|
320
|
+
return output
|
|
321
|
+
|
|
322
|
+
@njit(parallel=True, fastmath=True)
|
|
323
|
+
def rotate_180_numba(image):
|
|
324
|
+
"""
|
|
325
|
+
Rotates the image 180 degrees.
|
|
326
|
+
Works with both mono (2D) and color (3D) images.
|
|
327
|
+
"""
|
|
328
|
+
if image.ndim == 2:
|
|
329
|
+
height, width = image.shape
|
|
330
|
+
output = np.empty((height, width), dtype=image.dtype)
|
|
331
|
+
for y in prange(height):
|
|
332
|
+
for x in prange(width):
|
|
333
|
+
output[y, x] = image[height - 1 - y, width - 1 - x]
|
|
334
|
+
return output
|
|
335
|
+
else:
|
|
336
|
+
height, width, channels = image.shape
|
|
337
|
+
output = np.empty((height, width, channels), dtype=image.dtype)
|
|
338
|
+
for y in prange(height):
|
|
339
|
+
for x in prange(width):
|
|
340
|
+
for c in range(channels):
|
|
341
|
+
output[y, x, c] = image[height - 1 - y, width - 1 - x, c]
|
|
342
|
+
return output
|
|
343
|
+
|
|
344
|
+
def normalize_flat_cfa_inplace(flat2d: np.ndarray, pattern: str, *, combine_greens: bool = True) -> np.ndarray:
|
|
345
|
+
"""
|
|
346
|
+
Normalize a Bayer/mosaic flat so each CFA plane has median 1.0.
|
|
347
|
+
Operates in-place on flat2d and returns it.
|
|
348
|
+
|
|
349
|
+
pattern: 'RGGB','BGGR','GRBG','GBRG'
|
|
350
|
+
combine_greens: if True, use one median for both greens (reduces checkerboard risk)
|
|
351
|
+
"""
|
|
352
|
+
pat = (pattern or "RGGB").strip().upper()
|
|
353
|
+
if pat not in ("RGGB", "BGGR", "GRBG", "GBRG"):
|
|
354
|
+
pat = "RGGB"
|
|
355
|
+
|
|
356
|
+
# map (row_parity, col_parity) -> plane key
|
|
357
|
+
# row0: even rows, row1: odd rows; col0: even cols, col1: odd cols
|
|
358
|
+
if pat == "RGGB":
|
|
359
|
+
m = {(0,0):"R", (0,1):"G1", (1,0):"G2", (1,1):"B"}
|
|
360
|
+
elif pat == "BGGR":
|
|
361
|
+
m = {(0,0):"B", (0,1):"G1", (1,0):"G2", (1,1):"R"}
|
|
362
|
+
elif pat == "GRBG":
|
|
363
|
+
m = {(0,0):"G1", (0,1):"R", (1,0):"B", (1,1):"G2"}
|
|
364
|
+
else: # "GBRG"
|
|
365
|
+
m = {(0,0):"G1", (0,1):"B", (1,0):"R", (1,1):"G2"}
|
|
366
|
+
|
|
367
|
+
# build slice views
|
|
368
|
+
planes = {
|
|
369
|
+
m[(0,0)]: flat2d[0::2, 0::2],
|
|
370
|
+
m[(0,1)]: flat2d[0::2, 1::2],
|
|
371
|
+
m[(1,0)]: flat2d[1::2, 0::2],
|
|
372
|
+
m[(1,1)]: flat2d[1::2, 1::2],
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
def safe_median(a: np.ndarray) -> float:
|
|
376
|
+
v = a[np.isfinite(a) & (a > 0)]
|
|
377
|
+
if v.size == 0:
|
|
378
|
+
return 1.0
|
|
379
|
+
d = float(np.median(v))
|
|
380
|
+
return d if np.isfinite(d) and d > 0 else 1.0
|
|
381
|
+
|
|
382
|
+
# greens
|
|
383
|
+
if combine_greens and ("G1" in planes) and ("G2" in planes):
|
|
384
|
+
g = np.concatenate([
|
|
385
|
+
planes["G1"][np.isfinite(planes["G1"]) & (planes["G1"] > 0)].ravel(),
|
|
386
|
+
planes["G2"][np.isfinite(planes["G2"]) & (planes["G2"] > 0)].ravel(),
|
|
387
|
+
])
|
|
388
|
+
denom_g = float(np.median(g)) if g.size else 1.0
|
|
389
|
+
if not np.isfinite(denom_g) or denom_g <= 0:
|
|
390
|
+
denom_g = 1.0
|
|
391
|
+
planes["G1"][:] = planes["G1"] / denom_g
|
|
392
|
+
planes["G2"][:] = planes["G2"] / denom_g
|
|
393
|
+
else:
|
|
394
|
+
for k in ("G1","G2"):
|
|
395
|
+
if k in planes:
|
|
396
|
+
d = safe_median(planes[k])
|
|
397
|
+
planes[k][:] = planes[k] / d
|
|
398
|
+
|
|
399
|
+
# R / B
|
|
400
|
+
for k in ("R","B"):
|
|
401
|
+
if k in planes:
|
|
402
|
+
d = safe_median(planes[k])
|
|
403
|
+
planes[k][:] = planes[k] / d
|
|
404
|
+
|
|
405
|
+
# final safety
|
|
406
|
+
np.nan_to_num(flat2d, copy=False, nan=1.0, posinf=1.0, neginf=1.0)
|
|
407
|
+
flat2d[flat2d == 0] = 1.0
|
|
408
|
+
return flat2d
|
|
409
|
+
|
|
410
|
+
@njit(parallel=True, fastmath=True)
|
|
411
|
+
def apply_flat_division_numba_2d(image, master_flat, master_bias=None):
|
|
412
|
+
"""
|
|
413
|
+
Mono version: image.shape == (H,W)
|
|
414
|
+
"""
|
|
415
|
+
if master_bias is not None:
|
|
416
|
+
master_flat = master_flat - master_bias
|
|
417
|
+
image = image - master_bias
|
|
418
|
+
|
|
419
|
+
median_flat = np.mean(master_flat)
|
|
420
|
+
height, width = image.shape
|
|
421
|
+
|
|
422
|
+
for y in prange(height):
|
|
423
|
+
for x in range(width):
|
|
424
|
+
image[y, x] /= (master_flat[y, x] / median_flat)
|
|
425
|
+
|
|
426
|
+
return image
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
@njit(parallel=True, fastmath=True)
|
|
430
|
+
def apply_flat_division_numba_3d(image, master_flat, master_bias=None):
|
|
431
|
+
"""
|
|
432
|
+
Color version: image.shape == (H,W,C)
|
|
433
|
+
"""
|
|
434
|
+
if master_bias is not None:
|
|
435
|
+
master_flat = master_flat - master_bias
|
|
436
|
+
image = image - master_bias
|
|
437
|
+
|
|
438
|
+
median_flat = np.mean(master_flat)
|
|
439
|
+
height, width, channels = image.shape
|
|
440
|
+
|
|
441
|
+
for y in prange(height):
|
|
442
|
+
for x in range(width):
|
|
443
|
+
for c in range(channels):
|
|
444
|
+
image[y, x, c] /= (master_flat[y, x, c] / median_flat)
|
|
445
|
+
|
|
446
|
+
return image
|
|
447
|
+
|
|
448
|
+
@njit(parallel=True, fastmath=True)
|
|
449
|
+
def _flat_div_2d(img, flat):
|
|
450
|
+
h, w = img.shape
|
|
451
|
+
for y in prange(h):
|
|
452
|
+
for x in range(w):
|
|
453
|
+
f = flat[y, x]
|
|
454
|
+
if (not np.isfinite(f)) or f <= 0.0:
|
|
455
|
+
f = 1.0
|
|
456
|
+
img[y, x] = img[y, x] / f
|
|
457
|
+
return img
|
|
458
|
+
|
|
459
|
+
@njit(parallel=True, fastmath=True)
|
|
460
|
+
def _flat_div_hwc(img, flat):
|
|
461
|
+
h, w, c = img.shape
|
|
462
|
+
flat_is_2d = (flat.ndim == 2)
|
|
463
|
+
for y in prange(h):
|
|
464
|
+
for x in range(w):
|
|
465
|
+
if flat_is_2d:
|
|
466
|
+
f0 = flat[y, x]
|
|
467
|
+
if (not np.isfinite(f0)) or f0 <= 0.0:
|
|
468
|
+
f0 = 1.0
|
|
469
|
+
for k in range(c):
|
|
470
|
+
img[y, x, k] = img[y, x, k] / f0
|
|
471
|
+
else:
|
|
472
|
+
for k in range(c):
|
|
473
|
+
f = flat[y, x, k]
|
|
474
|
+
if (not np.isfinite(f)) or f <= 0.0:
|
|
475
|
+
f = 1.0
|
|
476
|
+
img[y, x, k] = img[y, x, k] / f
|
|
477
|
+
return img
|
|
478
|
+
|
|
479
|
+
@njit(parallel=True, fastmath=True)
|
|
480
|
+
def _flat_div_chw(img, flat):
|
|
481
|
+
c, h, w = img.shape
|
|
482
|
+
flat_is_2d = (flat.ndim == 2)
|
|
483
|
+
for y in prange(h):
|
|
484
|
+
for x in range(w):
|
|
485
|
+
if flat_is_2d:
|
|
486
|
+
f0 = flat[y, x]
|
|
487
|
+
if (not np.isfinite(f0)) or f0 <= 0.0:
|
|
488
|
+
f0 = 1.0
|
|
489
|
+
for k in range(c):
|
|
490
|
+
img[k, y, x] = img[k, y, x] / f0
|
|
491
|
+
else:
|
|
492
|
+
for k in range(c):
|
|
493
|
+
f = flat[k, y, x]
|
|
494
|
+
if (not np.isfinite(f)) or f <= 0.0:
|
|
495
|
+
f = 1.0
|
|
496
|
+
img[k, y, x] = img[k, y, x] / f
|
|
497
|
+
return img
|
|
498
|
+
|
|
499
|
+
def apply_flat_division_numba(image, master_flat, master_bias=None):
|
|
500
|
+
"""
|
|
501
|
+
Supports:
|
|
502
|
+
- 2D mono/bayer: (H,W)
|
|
503
|
+
- Color HWC: (H,W,3)
|
|
504
|
+
- Color CHW: (3,H,W)
|
|
505
|
+
|
|
506
|
+
NOTE: master_bias arg kept for API compatibility; do bias/dark subtraction outside.
|
|
507
|
+
"""
|
|
508
|
+
if image.ndim == 2:
|
|
509
|
+
return _flat_div_2d(image, master_flat)
|
|
510
|
+
|
|
511
|
+
if image.ndim == 3:
|
|
512
|
+
# CHW common in your pipeline
|
|
513
|
+
if image.shape[0] == 3 and image.shape[-1] != 3:
|
|
514
|
+
return _flat_div_chw(image, master_flat)
|
|
515
|
+
# HWC
|
|
516
|
+
if image.shape[-1] == 3:
|
|
517
|
+
return _flat_div_hwc(image, master_flat)
|
|
518
|
+
|
|
519
|
+
# fallback: treat as HWC
|
|
520
|
+
return _flat_div_hwc(image, master_flat)
|
|
521
|
+
|
|
522
|
+
raise ValueError(f"apply_flat_division_numba: expected 2D or 3D, got shape {image.shape}")
|
|
523
|
+
|
|
524
|
+
def _bayerpat_to_id(pat: str) -> int:
|
|
525
|
+
pat = (pat or "RGGB").strip().upper()
|
|
526
|
+
if pat == "RGGB": return 0
|
|
527
|
+
if pat == "BGGR": return 1
|
|
528
|
+
if pat == "GRBG": return 2
|
|
529
|
+
if pat == "GBRG": return 3
|
|
530
|
+
return 0
|
|
531
|
+
|
|
532
|
+
def _bayer_plane_medians(flat2d: np.ndarray, pat: str) -> np.ndarray:
|
|
533
|
+
pat = (pat or "RGGB").strip().upper()
|
|
534
|
+
if pat == "RGGB":
|
|
535
|
+
r = np.median(flat2d[0::2, 0::2])
|
|
536
|
+
g1 = np.median(flat2d[0::2, 1::2])
|
|
537
|
+
g2 = np.median(flat2d[1::2, 0::2])
|
|
538
|
+
b = np.median(flat2d[1::2, 1::2])
|
|
539
|
+
elif pat == "BGGR":
|
|
540
|
+
b = np.median(flat2d[0::2, 0::2])
|
|
541
|
+
g1 = np.median(flat2d[0::2, 1::2])
|
|
542
|
+
g2 = np.median(flat2d[1::2, 0::2])
|
|
543
|
+
r = np.median(flat2d[1::2, 1::2])
|
|
544
|
+
elif pat == "GRBG":
|
|
545
|
+
g1 = np.median(flat2d[0::2, 0::2])
|
|
546
|
+
r = np.median(flat2d[0::2, 1::2])
|
|
547
|
+
b = np.median(flat2d[1::2, 0::2])
|
|
548
|
+
g2 = np.median(flat2d[1::2, 1::2])
|
|
549
|
+
else: # GBRG
|
|
550
|
+
g1 = np.median(flat2d[0::2, 0::2])
|
|
551
|
+
b = np.median(flat2d[0::2, 1::2])
|
|
552
|
+
r = np.median(flat2d[1::2, 0::2])
|
|
553
|
+
g2 = np.median(flat2d[1::2, 1::2])
|
|
554
|
+
|
|
555
|
+
med4 = np.array([r, g1, g2, b], dtype=np.float32)
|
|
556
|
+
med4[~np.isfinite(med4)] = 1.0
|
|
557
|
+
med4[med4 <= 0] = 1.0
|
|
558
|
+
return med4
|
|
559
|
+
|
|
560
|
+
@njit(parallel=True, fastmath=True)
|
|
561
|
+
def apply_flat_division_numba_bayer_2d(image, master_flat, med4, pat_id):
|
|
562
|
+
"""
|
|
563
|
+
Bayer-aware mono division. image/master_flat are (H,W).
|
|
564
|
+
med4 is [R,G1,G2,B] for that master_flat, pat_id in {0..3}.
|
|
565
|
+
"""
|
|
566
|
+
H, W = image.shape
|
|
567
|
+
for y in prange(H):
|
|
568
|
+
y1 = y & 1
|
|
569
|
+
for x in range(W):
|
|
570
|
+
x1 = x & 1
|
|
571
|
+
|
|
572
|
+
# map parity->plane index
|
|
573
|
+
if pat_id == 0: # RGGB: (0,0)R (0,1)G1 (1,0)G2 (1,1)B
|
|
574
|
+
pi = 0 if (y1==0 and x1==0) else 1 if (y1==0 and x1==1) else 2 if (y1==1 and x1==0) else 3
|
|
575
|
+
elif pat_id == 1: # BGGR
|
|
576
|
+
pi = 3 if (y1==1 and x1==1) else 1 if (y1==0 and x1==1) else 2 if (y1==1 and x1==0) else 0
|
|
577
|
+
elif pat_id == 2: # GRBG
|
|
578
|
+
pi = 1 if (y1==0 and x1==0) else 0 if (y1==0 and x1==1) else 3 if (y1==1 and x1==0) else 2
|
|
579
|
+
else: # GBRG
|
|
580
|
+
pi = 1 if (y1==0 and x1==0) else 3 if (y1==0 and x1==1) else 0 if (y1==1 and x1==0) else 2
|
|
581
|
+
|
|
582
|
+
denom = master_flat[y, x] / med4[pi]
|
|
583
|
+
if denom == 0.0 or not np.isfinite(denom):
|
|
584
|
+
denom = 1.0
|
|
585
|
+
image[y, x] /= denom
|
|
586
|
+
return image
|
|
587
|
+
|
|
588
|
+
def apply_flat_division_bayer(image2d: np.ndarray, flat2d: np.ndarray, bayerpat: str):
|
|
589
|
+
med4 = _bayer_plane_medians(flat2d, bayerpat)
|
|
590
|
+
pid = _bayerpat_to_id(bayerpat)
|
|
591
|
+
return apply_flat_division_numba_bayer_2d(image2d, flat2d, med4, pid)
|
|
592
|
+
|
|
593
|
+
@njit(parallel=True)
|
|
594
|
+
def subtract_dark_3d(frames, dark_frame):
|
|
595
|
+
"""
|
|
596
|
+
For mono stack:
|
|
597
|
+
frames.shape == (F,H,W)
|
|
598
|
+
dark_frame.shape == (H,W)
|
|
599
|
+
Returns the same shape (F,H,W).
|
|
600
|
+
"""
|
|
601
|
+
num_frames, height, width = frames.shape
|
|
602
|
+
result = np.empty_like(frames, dtype=np.float32)
|
|
603
|
+
|
|
604
|
+
for i in prange(num_frames):
|
|
605
|
+
# Subtract the dark frame from each 2D slice
|
|
606
|
+
result[i] = frames[i] - dark_frame
|
|
607
|
+
|
|
608
|
+
return result
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
@njit(parallel=True)
|
|
612
|
+
def subtract_dark_4d(frames, dark_frame):
|
|
613
|
+
"""
|
|
614
|
+
For color stack:
|
|
615
|
+
frames.shape == (F,H,W,C)
|
|
616
|
+
dark_frame.shape == (H,W,C)
|
|
617
|
+
Returns the same shape (F,H,W,C).
|
|
618
|
+
"""
|
|
619
|
+
num_frames, height, width, channels = frames.shape
|
|
620
|
+
result = np.empty_like(frames, dtype=np.float32)
|
|
621
|
+
|
|
622
|
+
for i in prange(num_frames):
|
|
623
|
+
for y in range(height):
|
|
624
|
+
for x in range(width):
|
|
625
|
+
for c in range(channels):
|
|
626
|
+
result[i, y, x, c] = frames[i, y, x, c] - dark_frame[y, x, c]
|
|
627
|
+
|
|
628
|
+
return result
|
|
629
|
+
|
|
630
|
+
def subtract_dark(frames, dark_frame):
|
|
631
|
+
"""
|
|
632
|
+
Dispatcher function that calls the correct Numba function
|
|
633
|
+
depending on whether 'frames' is 3D or 4D.
|
|
634
|
+
"""
|
|
635
|
+
if frames.ndim == 3:
|
|
636
|
+
# frames: (F,H,W), dark_frame: (H,W)
|
|
637
|
+
return subtract_dark_3d(frames, dark_frame)
|
|
638
|
+
elif frames.ndim == 4:
|
|
639
|
+
# frames: (F,H,W,C), dark_frame: (H,W,C)
|
|
640
|
+
return subtract_dark_4d(frames, dark_frame)
|
|
641
|
+
else:
|
|
642
|
+
raise ValueError(f"subtract_dark: frames must be 3D or 4D, got {frames.shape}")
|
|
643
|
+
|
|
644
|
+
|
|
645
|
+
import numpy as np
|
|
646
|
+
from numba import njit, prange
|
|
647
|
+
|
|
648
|
+
# -------------------------------
|
|
649
|
+
# Windsorized Sigma Clipping (Weighted, Iterative)
|
|
650
|
+
# -------------------------------
|
|
651
|
+
|
|
652
|
+
@njit(parallel=True, fastmath=True)
|
|
653
|
+
def windsorized_sigma_clip_weighted_3d_iter(stack, weights, lower=2.5, upper=2.5, iterations=2):
|
|
654
|
+
"""
|
|
655
|
+
Iterative Weighted Windsorized Sigma Clipping for a 3D mono stack.
|
|
656
|
+
stack.shape == (F,H,W)
|
|
657
|
+
weights.shape can be (F,) or (F,H,W).
|
|
658
|
+
Returns a tuple:
|
|
659
|
+
(clipped, rejection_mask)
|
|
660
|
+
where:
|
|
661
|
+
clipped is a 2D image (H,W),
|
|
662
|
+
rejection_mask is a boolean array of shape (F,H,W) with True indicating rejection.
|
|
663
|
+
"""
|
|
664
|
+
num_frames, height, width = stack.shape
|
|
665
|
+
clipped = np.zeros((height, width), dtype=np.float32)
|
|
666
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
667
|
+
|
|
668
|
+
# Check weights shape
|
|
669
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
670
|
+
pass
|
|
671
|
+
elif weights.ndim == 3 and weights.shape == stack.shape:
|
|
672
|
+
pass
|
|
673
|
+
else:
|
|
674
|
+
raise ValueError("windsorized_sigma_clip_weighted_3d_iter: mismatch in shapes for 3D stack & weights")
|
|
675
|
+
|
|
676
|
+
for i in prange(height):
|
|
677
|
+
for j in range(width):
|
|
678
|
+
pixel_values = stack[:, i, j] # shape=(F,)
|
|
679
|
+
if weights.ndim == 1:
|
|
680
|
+
pixel_weights = weights[:] # shape (F,)
|
|
681
|
+
else:
|
|
682
|
+
pixel_weights = weights[:, i, j]
|
|
683
|
+
# Start with nonzero pixels as valid
|
|
684
|
+
valid_mask = pixel_values != 0
|
|
685
|
+
for _ in range(iterations):
|
|
686
|
+
if np.sum(valid_mask) == 0:
|
|
687
|
+
break
|
|
688
|
+
valid_vals = pixel_values[valid_mask]
|
|
689
|
+
median_val = np.median(valid_vals)
|
|
690
|
+
std_dev = np.std(valid_vals)
|
|
691
|
+
lower_bound = median_val - lower * std_dev
|
|
692
|
+
upper_bound = median_val + upper * std_dev
|
|
693
|
+
valid_mask = valid_mask & (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
|
|
694
|
+
# Record rejections: a pixel is rejected if not valid.
|
|
695
|
+
for f in range(num_frames):
|
|
696
|
+
rej_mask[f, i, j] = not valid_mask[f]
|
|
697
|
+
valid_vals = pixel_values[valid_mask]
|
|
698
|
+
valid_w = pixel_weights[valid_mask]
|
|
699
|
+
wsum = np.sum(valid_w)
|
|
700
|
+
if wsum > 0:
|
|
701
|
+
clipped[i, j] = np.sum(valid_vals * valid_w) / wsum
|
|
702
|
+
else:
|
|
703
|
+
nonzero = pixel_values[pixel_values != 0]
|
|
704
|
+
if nonzero.size > 0:
|
|
705
|
+
clipped[i, j] = np.median(nonzero)
|
|
706
|
+
else:
|
|
707
|
+
clipped[i, j] = 0.0
|
|
708
|
+
return clipped, rej_mask
|
|
709
|
+
|
|
710
|
+
|
|
711
|
+
@njit(parallel=True, fastmath=True)
|
|
712
|
+
def windsorized_sigma_clip_weighted_4d_iter(stack, weights, lower=2.5, upper=2.5, iterations=2):
|
|
713
|
+
"""
|
|
714
|
+
Iterative Weighted Windsorized Sigma Clipping for a 4D color stack.
|
|
715
|
+
stack.shape == (F,H,W,C)
|
|
716
|
+
weights.shape can be (F,) or (F,H,W,C).
|
|
717
|
+
Returns a tuple:
|
|
718
|
+
(clipped, rejection_mask)
|
|
719
|
+
where:
|
|
720
|
+
clipped is a 3D image (H,W,C),
|
|
721
|
+
rejection_mask is a boolean array of shape (F,H,W,C).
|
|
722
|
+
"""
|
|
723
|
+
num_frames, height, width, channels = stack.shape
|
|
724
|
+
clipped = np.zeros((height, width, channels), dtype=np.float32)
|
|
725
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
726
|
+
|
|
727
|
+
# Check weights shape
|
|
728
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
729
|
+
pass
|
|
730
|
+
elif weights.ndim == 4 and weights.shape == stack.shape:
|
|
731
|
+
pass
|
|
732
|
+
else:
|
|
733
|
+
raise ValueError("windsorized_sigma_clip_weighted_4d_iter: mismatch in shapes for 4D stack & weights")
|
|
734
|
+
|
|
735
|
+
for i in prange(height):
|
|
736
|
+
for j in range(width):
|
|
737
|
+
for c in range(channels):
|
|
738
|
+
pixel_values = stack[:, i, j, c] # shape=(F,)
|
|
739
|
+
if weights.ndim == 1:
|
|
740
|
+
pixel_weights = weights[:]
|
|
741
|
+
else:
|
|
742
|
+
pixel_weights = weights[:, i, j, c]
|
|
743
|
+
valid_mask = pixel_values != 0
|
|
744
|
+
for _ in range(iterations):
|
|
745
|
+
if np.sum(valid_mask) == 0:
|
|
746
|
+
break
|
|
747
|
+
valid_vals = pixel_values[valid_mask]
|
|
748
|
+
median_val = np.median(valid_vals)
|
|
749
|
+
std_dev = np.std(valid_vals)
|
|
750
|
+
lower_bound = median_val - lower * std_dev
|
|
751
|
+
upper_bound = median_val + upper * std_dev
|
|
752
|
+
valid_mask = valid_mask & (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
|
|
753
|
+
for f in range(num_frames):
|
|
754
|
+
rej_mask[f, i, j, c] = not valid_mask[f]
|
|
755
|
+
valid_vals = pixel_values[valid_mask]
|
|
756
|
+
valid_w = pixel_weights[valid_mask]
|
|
757
|
+
wsum = np.sum(valid_w)
|
|
758
|
+
if wsum > 0:
|
|
759
|
+
clipped[i, j, c] = np.sum(valid_vals * valid_w) / wsum
|
|
760
|
+
else:
|
|
761
|
+
nonzero = pixel_values[pixel_values != 0]
|
|
762
|
+
if nonzero.size > 0:
|
|
763
|
+
clipped[i, j, c] = np.median(nonzero)
|
|
764
|
+
else:
|
|
765
|
+
clipped[i, j, c] = 0.0
|
|
766
|
+
return clipped, rej_mask
|
|
767
|
+
|
|
768
|
+
|
|
769
|
+
def windsorized_sigma_clip_weighted(stack, weights, lower=2.5, upper=2.5, iterations=2):
|
|
770
|
+
"""
|
|
771
|
+
Dispatcher that calls the appropriate iterative Numba function.
|
|
772
|
+
Now returns (clipped, rejection_mask).
|
|
773
|
+
"""
|
|
774
|
+
if stack.ndim == 3:
|
|
775
|
+
return windsorized_sigma_clip_weighted_3d_iter(stack, weights, lower, upper, iterations)
|
|
776
|
+
elif stack.ndim == 4:
|
|
777
|
+
return windsorized_sigma_clip_weighted_4d_iter(stack, weights, lower, upper, iterations)
|
|
778
|
+
else:
|
|
779
|
+
raise ValueError(f"windsorized_sigma_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
# -------------------------------
|
|
783
|
+
# Kappa-Sigma Clipping (Weighted)
|
|
784
|
+
# -------------------------------
|
|
785
|
+
|
|
786
|
+
@njit(parallel=True, fastmath=True)
|
|
787
|
+
def kappa_sigma_clip_weighted_3d(stack, weights, kappa=2.5, iterations=3):
|
|
788
|
+
"""
|
|
789
|
+
Kappa-Sigma Clipping for a 3D mono stack.
|
|
790
|
+
stack.shape == (F,H,W)
|
|
791
|
+
Returns a tuple: (clipped, rejection_mask)
|
|
792
|
+
where rejection_mask is of shape (F,H,W) indicating per-frame rejections.
|
|
793
|
+
"""
|
|
794
|
+
num_frames, height, width = stack.shape
|
|
795
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
796
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
797
|
+
|
|
798
|
+
for i in prange(height):
|
|
799
|
+
for j in range(width):
|
|
800
|
+
pixel_values = stack[:, i, j].copy()
|
|
801
|
+
if weights.ndim == 1:
|
|
802
|
+
pixel_weights = weights[:]
|
|
803
|
+
else:
|
|
804
|
+
pixel_weights = weights[:, i, j].copy()
|
|
805
|
+
|
|
806
|
+
valid_mask = pixel_values != 0
|
|
807
|
+
|
|
808
|
+
med = 0.0
|
|
809
|
+
for _ in range(iterations):
|
|
810
|
+
count = 0
|
|
811
|
+
for k in range(num_frames):
|
|
812
|
+
if valid_mask[k]:
|
|
813
|
+
count += 1
|
|
814
|
+
|
|
815
|
+
if count == 0:
|
|
816
|
+
break
|
|
817
|
+
|
|
818
|
+
current_vals = pixel_values[valid_mask]
|
|
819
|
+
|
|
820
|
+
med = np.median(current_vals)
|
|
821
|
+
std = np.std(current_vals)
|
|
822
|
+
lower_bound = med - kappa * std
|
|
823
|
+
upper_bound = med + kappa * std
|
|
824
|
+
|
|
825
|
+
for k in range(num_frames):
|
|
826
|
+
if valid_mask[k]:
|
|
827
|
+
val = pixel_values[k]
|
|
828
|
+
if val < lower_bound or val > upper_bound:
|
|
829
|
+
valid_mask[k] = False
|
|
830
|
+
|
|
831
|
+
for f in range(num_frames):
|
|
832
|
+
rej_mask[f, i, j] = not valid_mask[f]
|
|
833
|
+
|
|
834
|
+
wsum = 0.0
|
|
835
|
+
vsum = 0.0
|
|
836
|
+
for k in range(num_frames):
|
|
837
|
+
if valid_mask[k]:
|
|
838
|
+
w = pixel_weights[k]
|
|
839
|
+
v = pixel_values[k]
|
|
840
|
+
wsum += w
|
|
841
|
+
vsum += v * w
|
|
842
|
+
|
|
843
|
+
if wsum > 0:
|
|
844
|
+
clipped[i, j] = vsum / wsum
|
|
845
|
+
else:
|
|
846
|
+
clipped[i, j] = med
|
|
847
|
+
return clipped, rej_mask
|
|
848
|
+
|
|
849
|
+
|
|
850
|
+
@njit(parallel=True, fastmath=True)
|
|
851
|
+
def kappa_sigma_clip_weighted_4d(stack, weights, kappa=2.5, iterations=3):
|
|
852
|
+
"""
|
|
853
|
+
Kappa-Sigma Clipping for a 4D color stack.
|
|
854
|
+
stack.shape == (F,H,W,C)
|
|
855
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
|
|
856
|
+
"""
|
|
857
|
+
num_frames, height, width, channels = stack.shape
|
|
858
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
859
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
860
|
+
|
|
861
|
+
for i in prange(height):
|
|
862
|
+
for j in range(width):
|
|
863
|
+
for c in range(channels):
|
|
864
|
+
pixel_values = stack[:, i, j, c].copy()
|
|
865
|
+
if weights.ndim == 1:
|
|
866
|
+
pixel_weights = weights[:]
|
|
867
|
+
else:
|
|
868
|
+
pixel_weights = weights[:, i, j, c].copy()
|
|
869
|
+
|
|
870
|
+
valid_mask = pixel_values != 0
|
|
871
|
+
|
|
872
|
+
med = 0.0
|
|
873
|
+
for _ in range(iterations):
|
|
874
|
+
count = 0
|
|
875
|
+
for k in range(num_frames):
|
|
876
|
+
if valid_mask[k]:
|
|
877
|
+
count += 1
|
|
878
|
+
|
|
879
|
+
if count == 0:
|
|
880
|
+
break
|
|
881
|
+
|
|
882
|
+
current_vals = pixel_values[valid_mask]
|
|
883
|
+
|
|
884
|
+
med = np.median(current_vals)
|
|
885
|
+
std = np.std(current_vals)
|
|
886
|
+
lower_bound = med - kappa * std
|
|
887
|
+
upper_bound = med + kappa * std
|
|
888
|
+
|
|
889
|
+
for k in range(num_frames):
|
|
890
|
+
if valid_mask[k]:
|
|
891
|
+
val = pixel_values[k]
|
|
892
|
+
if val < lower_bound or val > upper_bound:
|
|
893
|
+
valid_mask[k] = False
|
|
894
|
+
|
|
895
|
+
for f in range(num_frames):
|
|
896
|
+
rej_mask[f, i, j, c] = not valid_mask[f]
|
|
897
|
+
|
|
898
|
+
wsum = 0.0
|
|
899
|
+
vsum = 0.0
|
|
900
|
+
for k in range(num_frames):
|
|
901
|
+
if valid_mask[k]:
|
|
902
|
+
w = pixel_weights[k]
|
|
903
|
+
v = pixel_values[k]
|
|
904
|
+
wsum += w
|
|
905
|
+
vsum += v * w
|
|
906
|
+
|
|
907
|
+
if wsum > 0:
|
|
908
|
+
clipped[i, j, c] = vsum / wsum
|
|
909
|
+
else:
|
|
910
|
+
clipped[i, j, c] = med
|
|
911
|
+
return clipped, rej_mask
|
|
912
|
+
|
|
913
|
+
|
|
914
|
+
def kappa_sigma_clip_weighted(stack, weights, kappa=2.5, iterations=3):
|
|
915
|
+
"""
|
|
916
|
+
Dispatcher that returns (clipped, rejection_mask) for kappa-sigma clipping.
|
|
917
|
+
"""
|
|
918
|
+
if stack.ndim == 3:
|
|
919
|
+
return kappa_sigma_clip_weighted_3d(stack, weights, kappa, iterations)
|
|
920
|
+
elif stack.ndim == 4:
|
|
921
|
+
return kappa_sigma_clip_weighted_4d(stack, weights, kappa, iterations)
|
|
922
|
+
else:
|
|
923
|
+
raise ValueError(f"kappa_sigma_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
924
|
+
|
|
925
|
+
|
|
926
|
+
# -------------------------------
|
|
927
|
+
# Trimmed Mean (Weighted)
|
|
928
|
+
# -------------------------------
|
|
929
|
+
|
|
930
|
+
@njit(parallel=True, fastmath=True)
|
|
931
|
+
def trimmed_mean_weighted_3d(stack, weights, trim_fraction=0.1):
|
|
932
|
+
"""
|
|
933
|
+
Trimmed Mean for a 3D mono stack.
|
|
934
|
+
stack.shape == (F,H,W)
|
|
935
|
+
Returns (clipped, rejection_mask) where rejection_mask (F,H,W) flags frames that were trimmed.
|
|
936
|
+
"""
|
|
937
|
+
num_frames, height, width = stack.shape
|
|
938
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
939
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
940
|
+
|
|
941
|
+
for i in prange(height):
|
|
942
|
+
for j in range(width):
|
|
943
|
+
pix_all = stack[:, i, j]
|
|
944
|
+
if weights.ndim == 1:
|
|
945
|
+
w_all = weights[:]
|
|
946
|
+
else:
|
|
947
|
+
w_all = weights[:, i, j]
|
|
948
|
+
# Exclude zeros and record original indices.
|
|
949
|
+
valid = pix_all != 0
|
|
950
|
+
pix = pix_all[valid]
|
|
951
|
+
w = w_all[valid]
|
|
952
|
+
orig_idx = np.empty(pix_all.shape[0], dtype=np.int64)
|
|
953
|
+
count = 0
|
|
954
|
+
for f in range(num_frames):
|
|
955
|
+
if valid[f]:
|
|
956
|
+
orig_idx[count] = f
|
|
957
|
+
count += 1
|
|
958
|
+
n = pix.size
|
|
959
|
+
if n == 0:
|
|
960
|
+
clipped[i, j] = 0.0
|
|
961
|
+
# Mark all as rejected.
|
|
962
|
+
for f in range(num_frames):
|
|
963
|
+
if not valid[f]:
|
|
964
|
+
rej_mask[f, i, j] = True
|
|
965
|
+
continue
|
|
966
|
+
trim = int(trim_fraction * n)
|
|
967
|
+
order = np.argsort(pix)
|
|
968
|
+
# Determine which indices (in the valid list) are kept.
|
|
969
|
+
if n > 2 * trim:
|
|
970
|
+
keep_order = order[trim:n - trim]
|
|
971
|
+
else:
|
|
972
|
+
keep_order = order
|
|
973
|
+
# Build a mask for the valid pixels (length n) that are kept.
|
|
974
|
+
keep_mask = np.zeros(n, dtype=np.bool_)
|
|
975
|
+
for k in range(keep_order.size):
|
|
976
|
+
keep_mask[keep_order[k]] = True
|
|
977
|
+
# Map back to original frame indices.
|
|
978
|
+
for idx in range(n):
|
|
979
|
+
frame = orig_idx[idx]
|
|
980
|
+
if not keep_mask[idx]:
|
|
981
|
+
rej_mask[frame, i, j] = True
|
|
982
|
+
else:
|
|
983
|
+
rej_mask[frame, i, j] = False
|
|
984
|
+
# Compute weighted average of kept values.
|
|
985
|
+
sorted_pix = pix[order]
|
|
986
|
+
sorted_w = w[order]
|
|
987
|
+
if n > 2 * trim:
|
|
988
|
+
trimmed_values = sorted_pix[trim:n - trim]
|
|
989
|
+
trimmed_weights = sorted_w[trim:n - trim]
|
|
990
|
+
else:
|
|
991
|
+
trimmed_values = sorted_pix
|
|
992
|
+
trimmed_weights = sorted_w
|
|
993
|
+
wsum = trimmed_weights.sum()
|
|
994
|
+
if wsum > 0:
|
|
995
|
+
clipped[i, j] = np.sum(trimmed_values * trimmed_weights) / wsum
|
|
996
|
+
else:
|
|
997
|
+
clipped[i, j] = np.median(trimmed_values)
|
|
998
|
+
return clipped, rej_mask
|
|
999
|
+
|
|
1000
|
+
|
|
1001
|
+
@njit(parallel=True, fastmath=True)
|
|
1002
|
+
def trimmed_mean_weighted_4d(stack, weights, trim_fraction=0.1):
|
|
1003
|
+
"""
|
|
1004
|
+
Trimmed Mean for a 4D color stack.
|
|
1005
|
+
stack.shape == (F,H,W,C)
|
|
1006
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
|
|
1007
|
+
"""
|
|
1008
|
+
num_frames, height, width, channels = stack.shape
|
|
1009
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
1010
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
1011
|
+
|
|
1012
|
+
for i in prange(height):
|
|
1013
|
+
for j in range(width):
|
|
1014
|
+
for c in range(channels):
|
|
1015
|
+
pix_all = stack[:, i, j, c]
|
|
1016
|
+
if weights.ndim == 1:
|
|
1017
|
+
w_all = weights[:]
|
|
1018
|
+
else:
|
|
1019
|
+
w_all = weights[:, i, j, c]
|
|
1020
|
+
valid = pix_all != 0
|
|
1021
|
+
pix = pix_all[valid]
|
|
1022
|
+
w = w_all[valid]
|
|
1023
|
+
orig_idx = np.empty(pix_all.shape[0], dtype=np.int64)
|
|
1024
|
+
count = 0
|
|
1025
|
+
for f in range(num_frames):
|
|
1026
|
+
if valid[f]:
|
|
1027
|
+
orig_idx[count] = f
|
|
1028
|
+
count += 1
|
|
1029
|
+
n = pix.size
|
|
1030
|
+
if n == 0:
|
|
1031
|
+
clipped[i, j, c] = 0.0
|
|
1032
|
+
for f in range(num_frames):
|
|
1033
|
+
if not valid[f]:
|
|
1034
|
+
rej_mask[f, i, j, c] = True
|
|
1035
|
+
continue
|
|
1036
|
+
trim = int(trim_fraction * n)
|
|
1037
|
+
order = np.argsort(pix)
|
|
1038
|
+
if n > 2 * trim:
|
|
1039
|
+
keep_order = order[trim:n - trim]
|
|
1040
|
+
else:
|
|
1041
|
+
keep_order = order
|
|
1042
|
+
keep_mask = np.zeros(n, dtype=np.bool_)
|
|
1043
|
+
for k in range(keep_order.size):
|
|
1044
|
+
keep_mask[keep_order[k]] = True
|
|
1045
|
+
for idx in range(n):
|
|
1046
|
+
frame = orig_idx[idx]
|
|
1047
|
+
if not keep_mask[idx]:
|
|
1048
|
+
rej_mask[frame, i, j, c] = True
|
|
1049
|
+
else:
|
|
1050
|
+
rej_mask[frame, i, j, c] = False
|
|
1051
|
+
sorted_pix = pix[order]
|
|
1052
|
+
sorted_w = w[order]
|
|
1053
|
+
if n > 2 * trim:
|
|
1054
|
+
trimmed_values = sorted_pix[trim:n - trim]
|
|
1055
|
+
trimmed_weights = sorted_w[trim:n - trim]
|
|
1056
|
+
else:
|
|
1057
|
+
trimmed_values = sorted_pix
|
|
1058
|
+
trimmed_weights = sorted_w
|
|
1059
|
+
wsum = trimmed_weights.sum()
|
|
1060
|
+
if wsum > 0:
|
|
1061
|
+
clipped[i, j, c] = np.sum(trimmed_values * trimmed_weights) / wsum
|
|
1062
|
+
else:
|
|
1063
|
+
clipped[i, j, c] = np.median(trimmed_values)
|
|
1064
|
+
return clipped, rej_mask
|
|
1065
|
+
|
|
1066
|
+
|
|
1067
|
+
def trimmed_mean_weighted(stack, weights, trim_fraction=0.1):
|
|
1068
|
+
"""
|
|
1069
|
+
Dispatcher that returns (clipped, rejection_mask) for trimmed mean.
|
|
1070
|
+
"""
|
|
1071
|
+
if stack.ndim == 3:
|
|
1072
|
+
return trimmed_mean_weighted_3d(stack, weights, trim_fraction)
|
|
1073
|
+
elif stack.ndim == 4:
|
|
1074
|
+
return trimmed_mean_weighted_4d(stack, weights, trim_fraction)
|
|
1075
|
+
else:
|
|
1076
|
+
raise ValueError(f"trimmed_mean_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
1077
|
+
|
|
1078
|
+
|
|
1079
|
+
# -------------------------------
|
|
1080
|
+
# Extreme Studentized Deviate (ESD) Clipping (Weighted)
|
|
1081
|
+
# -------------------------------
|
|
1082
|
+
|
|
1083
|
+
@njit(parallel=True, fastmath=True)
|
|
1084
|
+
def esd_clip_weighted_3d(stack, weights, threshold=3.0):
|
|
1085
|
+
"""
|
|
1086
|
+
ESD Clipping for a 3D mono stack.
|
|
1087
|
+
stack.shape == (F,H,W)
|
|
1088
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W).
|
|
1089
|
+
"""
|
|
1090
|
+
num_frames, height, width = stack.shape
|
|
1091
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
1092
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
1093
|
+
|
|
1094
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1095
|
+
pass
|
|
1096
|
+
elif weights.ndim == 3 and weights.shape == stack.shape:
|
|
1097
|
+
pass
|
|
1098
|
+
else:
|
|
1099
|
+
raise ValueError("esd_clip_weighted_3d: mismatch in shapes for 3D stack & weights")
|
|
1100
|
+
|
|
1101
|
+
for i in prange(height):
|
|
1102
|
+
for j in range(width):
|
|
1103
|
+
pix = stack[:, i, j]
|
|
1104
|
+
if weights.ndim == 1:
|
|
1105
|
+
w = weights[:]
|
|
1106
|
+
else:
|
|
1107
|
+
w = weights[:, i, j]
|
|
1108
|
+
valid = pix != 0
|
|
1109
|
+
values = pix[valid]
|
|
1110
|
+
wvals = w[valid]
|
|
1111
|
+
if values.size == 0:
|
|
1112
|
+
clipped[i, j] = 0.0
|
|
1113
|
+
for f in range(num_frames):
|
|
1114
|
+
if not valid[f]:
|
|
1115
|
+
rej_mask[f, i, j] = True
|
|
1116
|
+
continue
|
|
1117
|
+
mean_val = np.mean(values)
|
|
1118
|
+
std_val = np.std(values)
|
|
1119
|
+
if std_val == 0:
|
|
1120
|
+
clipped[i, j] = mean_val
|
|
1121
|
+
for f in range(num_frames):
|
|
1122
|
+
rej_mask[f, i, j] = False
|
|
1123
|
+
continue
|
|
1124
|
+
z_scores = np.abs((values - mean_val) / std_val)
|
|
1125
|
+
valid2 = z_scores < threshold
|
|
1126
|
+
# Mark rejected: for the valid entries, use valid2.
|
|
1127
|
+
idx = 0
|
|
1128
|
+
for f in range(num_frames):
|
|
1129
|
+
if valid[f]:
|
|
1130
|
+
if not valid2[idx]:
|
|
1131
|
+
rej_mask[f, i, j] = True
|
|
1132
|
+
else:
|
|
1133
|
+
rej_mask[f, i, j] = False
|
|
1134
|
+
idx += 1
|
|
1135
|
+
else:
|
|
1136
|
+
rej_mask[f, i, j] = True
|
|
1137
|
+
values = values[valid2]
|
|
1138
|
+
wvals = wvals[valid2]
|
|
1139
|
+
wsum = wvals.sum()
|
|
1140
|
+
if wsum > 0:
|
|
1141
|
+
clipped[i, j] = np.sum(values * wvals) / wsum
|
|
1142
|
+
else:
|
|
1143
|
+
clipped[i, j] = mean_val
|
|
1144
|
+
return clipped, rej_mask
|
|
1145
|
+
|
|
1146
|
+
|
|
1147
|
+
@njit(parallel=True, fastmath=True)
|
|
1148
|
+
def esd_clip_weighted_4d(stack, weights, threshold=3.0):
|
|
1149
|
+
"""
|
|
1150
|
+
ESD Clipping for a 4D color stack.
|
|
1151
|
+
stack.shape == (F,H,W,C)
|
|
1152
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
|
|
1153
|
+
"""
|
|
1154
|
+
num_frames, height, width, channels = stack.shape
|
|
1155
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
1156
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
1157
|
+
|
|
1158
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1159
|
+
pass
|
|
1160
|
+
elif weights.ndim == 4 and weights.shape == stack.shape:
|
|
1161
|
+
pass
|
|
1162
|
+
else:
|
|
1163
|
+
raise ValueError("esd_clip_weighted_4d: mismatch in shapes for 4D stack & weights")
|
|
1164
|
+
|
|
1165
|
+
for i in prange(height):
|
|
1166
|
+
for j in range(width):
|
|
1167
|
+
for c in range(channels):
|
|
1168
|
+
pix = stack[:, i, j, c]
|
|
1169
|
+
if weights.ndim == 1:
|
|
1170
|
+
w = weights[:]
|
|
1171
|
+
else:
|
|
1172
|
+
w = weights[:, i, j, c]
|
|
1173
|
+
valid = pix != 0
|
|
1174
|
+
values = pix[valid]
|
|
1175
|
+
wvals = w[valid]
|
|
1176
|
+
if values.size == 0:
|
|
1177
|
+
clipped[i, j, c] = 0.0
|
|
1178
|
+
for f in range(num_frames):
|
|
1179
|
+
if not valid[f]:
|
|
1180
|
+
rej_mask[f, i, j, c] = True
|
|
1181
|
+
continue
|
|
1182
|
+
mean_val = np.mean(values)
|
|
1183
|
+
std_val = np.std(values)
|
|
1184
|
+
if std_val == 0:
|
|
1185
|
+
clipped[i, j, c] = mean_val
|
|
1186
|
+
for f in range(num_frames):
|
|
1187
|
+
rej_mask[f, i, j, c] = False
|
|
1188
|
+
continue
|
|
1189
|
+
z_scores = np.abs((values - mean_val) / std_val)
|
|
1190
|
+
valid2 = z_scores < threshold
|
|
1191
|
+
idx = 0
|
|
1192
|
+
for f in range(num_frames):
|
|
1193
|
+
if valid[f]:
|
|
1194
|
+
if not valid2[idx]:
|
|
1195
|
+
rej_mask[f, i, j, c] = True
|
|
1196
|
+
else:
|
|
1197
|
+
rej_mask[f, i, j, c] = False
|
|
1198
|
+
idx += 1
|
|
1199
|
+
else:
|
|
1200
|
+
rej_mask[f, i, j, c] = True
|
|
1201
|
+
values = values[valid2]
|
|
1202
|
+
wvals = wvals[valid2]
|
|
1203
|
+
wsum = wvals.sum()
|
|
1204
|
+
if wsum > 0:
|
|
1205
|
+
clipped[i, j, c] = np.sum(values * wvals) / wsum
|
|
1206
|
+
else:
|
|
1207
|
+
clipped[i, j, c] = mean_val
|
|
1208
|
+
return clipped, rej_mask
|
|
1209
|
+
|
|
1210
|
+
|
|
1211
|
+
def esd_clip_weighted(stack, weights, threshold=3.0):
|
|
1212
|
+
"""
|
|
1213
|
+
Dispatcher that returns (clipped, rejection_mask) for ESD clipping.
|
|
1214
|
+
"""
|
|
1215
|
+
if stack.ndim == 3:
|
|
1216
|
+
return esd_clip_weighted_3d(stack, weights, threshold)
|
|
1217
|
+
elif stack.ndim == 4:
|
|
1218
|
+
return esd_clip_weighted_4d(stack, weights, threshold)
|
|
1219
|
+
else:
|
|
1220
|
+
raise ValueError(f"esd_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
1221
|
+
|
|
1222
|
+
|
|
1223
|
+
# -------------------------------
|
|
1224
|
+
# Biweight Location (Weighted)
|
|
1225
|
+
# -------------------------------
|
|
1226
|
+
|
|
1227
|
+
@njit(parallel=True, fastmath=True)
|
|
1228
|
+
def biweight_location_weighted_3d(stack, weights, tuning_constant=6.0):
|
|
1229
|
+
"""
|
|
1230
|
+
Biweight Location for a 3D mono stack.
|
|
1231
|
+
stack.shape == (F,H,W)
|
|
1232
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W).
|
|
1233
|
+
"""
|
|
1234
|
+
num_frames, height, width = stack.shape
|
|
1235
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
1236
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
1237
|
+
|
|
1238
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1239
|
+
pass
|
|
1240
|
+
elif weights.ndim == 3 and weights.shape == stack.shape:
|
|
1241
|
+
pass
|
|
1242
|
+
else:
|
|
1243
|
+
raise ValueError("biweight_location_weighted_3d: mismatch in shapes for 3D stack & weights")
|
|
1244
|
+
|
|
1245
|
+
for i in prange(height):
|
|
1246
|
+
for j in range(width):
|
|
1247
|
+
x = stack[:, i, j]
|
|
1248
|
+
if weights.ndim == 1:
|
|
1249
|
+
w = weights[:]
|
|
1250
|
+
else:
|
|
1251
|
+
w = weights[:, i, j]
|
|
1252
|
+
valid = x != 0
|
|
1253
|
+
x_valid = x[valid]
|
|
1254
|
+
w_valid = w[valid]
|
|
1255
|
+
# Record rejections for zeros:
|
|
1256
|
+
for f in range(num_frames):
|
|
1257
|
+
if not valid[f]:
|
|
1258
|
+
rej_mask[f, i, j] = True
|
|
1259
|
+
else:
|
|
1260
|
+
rej_mask[f, i, j] = False # initialize as accepted; may update below
|
|
1261
|
+
n = x_valid.size
|
|
1262
|
+
if n == 0:
|
|
1263
|
+
clipped[i, j] = 0.0
|
|
1264
|
+
continue
|
|
1265
|
+
M = np.median(x_valid)
|
|
1266
|
+
mad = np.median(np.abs(x_valid - M))
|
|
1267
|
+
if mad == 0:
|
|
1268
|
+
clipped[i, j] = M
|
|
1269
|
+
continue
|
|
1270
|
+
u = (x_valid - M) / (tuning_constant * mad)
|
|
1271
|
+
mask = np.abs(u) < 1
|
|
1272
|
+
# Mark frames that were excluded by the biweight rejection:
|
|
1273
|
+
idx = 0
|
|
1274
|
+
for f in range(num_frames):
|
|
1275
|
+
if valid[f]:
|
|
1276
|
+
if not mask[idx]:
|
|
1277
|
+
rej_mask[f, i, j] = True
|
|
1278
|
+
idx += 1
|
|
1279
|
+
x_masked = x_valid[mask]
|
|
1280
|
+
w_masked = w_valid[mask]
|
|
1281
|
+
numerator = ((x_masked - M) * (1 - u[mask]**2)**2 * w_masked).sum()
|
|
1282
|
+
denominator = ((1 - u[mask]**2)**2 * w_masked).sum()
|
|
1283
|
+
if denominator != 0:
|
|
1284
|
+
biweight = M + numerator / denominator
|
|
1285
|
+
else:
|
|
1286
|
+
biweight = M
|
|
1287
|
+
clipped[i, j] = biweight
|
|
1288
|
+
return clipped, rej_mask
|
|
1289
|
+
|
|
1290
|
+
|
|
1291
|
+
@njit(parallel=True, fastmath=True)
|
|
1292
|
+
def biweight_location_weighted_4d(stack, weights, tuning_constant=6.0):
|
|
1293
|
+
"""
|
|
1294
|
+
Biweight Location for a 4D color stack.
|
|
1295
|
+
stack.shape == (F,H,W,C)
|
|
1296
|
+
Returns (clipped, rejection_mask) where rejection_mask has shape (F,H,W,C).
|
|
1297
|
+
"""
|
|
1298
|
+
num_frames, height, width, channels = stack.shape
|
|
1299
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
1300
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
1301
|
+
|
|
1302
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1303
|
+
pass
|
|
1304
|
+
elif weights.ndim == 4 and weights.shape == stack.shape:
|
|
1305
|
+
pass
|
|
1306
|
+
else:
|
|
1307
|
+
raise ValueError("biweight_location_weighted_4d: mismatch in shapes for 4D stack & weights")
|
|
1308
|
+
|
|
1309
|
+
for i in prange(height):
|
|
1310
|
+
for j in range(width):
|
|
1311
|
+
for c in range(channels):
|
|
1312
|
+
x = stack[:, i, j, c]
|
|
1313
|
+
if weights.ndim == 1:
|
|
1314
|
+
w = weights[:]
|
|
1315
|
+
else:
|
|
1316
|
+
w = weights[:, i, j, c]
|
|
1317
|
+
valid = x != 0
|
|
1318
|
+
x_valid = x[valid]
|
|
1319
|
+
w_valid = w[valid]
|
|
1320
|
+
for f in range(num_frames):
|
|
1321
|
+
if not valid[f]:
|
|
1322
|
+
rej_mask[f, i, j, c] = True
|
|
1323
|
+
else:
|
|
1324
|
+
rej_mask[f, i, j, c] = False
|
|
1325
|
+
n = x_valid.size
|
|
1326
|
+
if n == 0:
|
|
1327
|
+
clipped[i, j, c] = 0.0
|
|
1328
|
+
continue
|
|
1329
|
+
M = np.median(x_valid)
|
|
1330
|
+
mad = np.median(np.abs(x_valid - M))
|
|
1331
|
+
if mad == 0:
|
|
1332
|
+
clipped[i, j, c] = M
|
|
1333
|
+
continue
|
|
1334
|
+
u = (x_valid - M) / (tuning_constant * mad)
|
|
1335
|
+
mask = np.abs(u) < 1
|
|
1336
|
+
idx = 0
|
|
1337
|
+
for f in range(num_frames):
|
|
1338
|
+
if valid[f]:
|
|
1339
|
+
if not mask[idx]:
|
|
1340
|
+
rej_mask[f, i, j, c] = True
|
|
1341
|
+
idx += 1
|
|
1342
|
+
x_masked = x_valid[mask]
|
|
1343
|
+
w_masked = w_valid[mask]
|
|
1344
|
+
numerator = ((x_masked - M) * (1 - u[mask]**2)**2 * w_masked).sum()
|
|
1345
|
+
denominator = ((1 - u[mask]**2)**2 * w_masked).sum()
|
|
1346
|
+
if denominator != 0:
|
|
1347
|
+
biweight = M + numerator / denominator
|
|
1348
|
+
else:
|
|
1349
|
+
biweight = M
|
|
1350
|
+
clipped[i, j, c] = biweight
|
|
1351
|
+
return clipped, rej_mask
|
|
1352
|
+
|
|
1353
|
+
|
|
1354
|
+
def biweight_location_weighted(stack, weights, tuning_constant=6.0):
|
|
1355
|
+
"""
|
|
1356
|
+
Dispatcher that returns (clipped, rejection_mask) for biweight location.
|
|
1357
|
+
"""
|
|
1358
|
+
if stack.ndim == 3:
|
|
1359
|
+
return biweight_location_weighted_3d(stack, weights, tuning_constant)
|
|
1360
|
+
elif stack.ndim == 4:
|
|
1361
|
+
return biweight_location_weighted_4d(stack, weights, tuning_constant)
|
|
1362
|
+
else:
|
|
1363
|
+
raise ValueError(f"biweight_location_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
1364
|
+
|
|
1365
|
+
|
|
1366
|
+
# -------------------------------
|
|
1367
|
+
# Modified Z-Score Clipping (Weighted)
|
|
1368
|
+
# -------------------------------
|
|
1369
|
+
|
|
1370
|
+
@njit(parallel=True, fastmath=True)
|
|
1371
|
+
def modified_zscore_clip_weighted_3d(stack, weights, threshold=3.5):
|
|
1372
|
+
"""
|
|
1373
|
+
Modified Z-Score Clipping for a 3D mono stack.
|
|
1374
|
+
stack.shape == (F,H,W)
|
|
1375
|
+
Returns (clipped, rejection_mask) with rejection_mask shape (F,H,W).
|
|
1376
|
+
"""
|
|
1377
|
+
num_frames, height, width = stack.shape
|
|
1378
|
+
clipped = np.empty((height, width), dtype=np.float32)
|
|
1379
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
1380
|
+
|
|
1381
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1382
|
+
pass
|
|
1383
|
+
elif weights.ndim == 3 and weights.shape == stack.shape:
|
|
1384
|
+
pass
|
|
1385
|
+
else:
|
|
1386
|
+
raise ValueError("modified_zscore_clip_weighted_3d: mismatch in shapes for 3D stack & weights")
|
|
1387
|
+
|
|
1388
|
+
for i in prange(height):
|
|
1389
|
+
for j in range(width):
|
|
1390
|
+
x = stack[:, i, j]
|
|
1391
|
+
if weights.ndim == 1:
|
|
1392
|
+
w = weights[:]
|
|
1393
|
+
else:
|
|
1394
|
+
w = weights[:, i, j]
|
|
1395
|
+
valid = x != 0
|
|
1396
|
+
x_valid = x[valid]
|
|
1397
|
+
w_valid = w[valid]
|
|
1398
|
+
if x_valid.size == 0:
|
|
1399
|
+
clipped[i, j] = 0.0
|
|
1400
|
+
for f in range(num_frames):
|
|
1401
|
+
if not valid[f]:
|
|
1402
|
+
rej_mask[f, i, j] = True
|
|
1403
|
+
continue
|
|
1404
|
+
median_val = np.median(x_valid)
|
|
1405
|
+
mad = np.median(np.abs(x_valid - median_val))
|
|
1406
|
+
if mad == 0:
|
|
1407
|
+
clipped[i, j] = median_val
|
|
1408
|
+
for f in range(num_frames):
|
|
1409
|
+
rej_mask[f, i, j] = False
|
|
1410
|
+
continue
|
|
1411
|
+
modified_z = 0.6745 * (x_valid - median_val) / mad
|
|
1412
|
+
valid2 = np.abs(modified_z) < threshold
|
|
1413
|
+
idx = 0
|
|
1414
|
+
for f in range(num_frames):
|
|
1415
|
+
if valid[f]:
|
|
1416
|
+
if not valid2[idx]:
|
|
1417
|
+
rej_mask[f, i, j] = True
|
|
1418
|
+
else:
|
|
1419
|
+
rej_mask[f, i, j] = False
|
|
1420
|
+
idx += 1
|
|
1421
|
+
else:
|
|
1422
|
+
rej_mask[f, i, j] = True
|
|
1423
|
+
x_final = x_valid[valid2]
|
|
1424
|
+
w_final = w_valid[valid2]
|
|
1425
|
+
wsum = w_final.sum()
|
|
1426
|
+
if wsum > 0:
|
|
1427
|
+
clipped[i, j] = np.sum(x_final * w_final) / wsum
|
|
1428
|
+
else:
|
|
1429
|
+
clipped[i, j] = median_val
|
|
1430
|
+
return clipped, rej_mask
|
|
1431
|
+
|
|
1432
|
+
|
|
1433
|
+
@njit(parallel=True, fastmath=True)
|
|
1434
|
+
def modified_zscore_clip_weighted_4d(stack, weights, threshold=3.5):
|
|
1435
|
+
"""
|
|
1436
|
+
Modified Z-Score Clipping for a 4D color stack.
|
|
1437
|
+
stack.shape == (F,H,W,C)
|
|
1438
|
+
Returns (clipped, rejection_mask) with rejection_mask shape (F,H,W,C).
|
|
1439
|
+
"""
|
|
1440
|
+
num_frames, height, width, channels = stack.shape
|
|
1441
|
+
clipped = np.empty((height, width, channels), dtype=np.float32)
|
|
1442
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
1443
|
+
|
|
1444
|
+
if weights.ndim == 1 and weights.shape[0] == num_frames:
|
|
1445
|
+
pass
|
|
1446
|
+
elif weights.ndim == 4 and weights.shape == stack.shape:
|
|
1447
|
+
pass
|
|
1448
|
+
else:
|
|
1449
|
+
raise ValueError("modified_zscore_clip_weighted_4d: mismatch in shapes for 4D stack & weights")
|
|
1450
|
+
|
|
1451
|
+
for i in prange(height):
|
|
1452
|
+
for j in range(width):
|
|
1453
|
+
for c in range(channels):
|
|
1454
|
+
x = stack[:, i, j, c]
|
|
1455
|
+
if weights.ndim == 1:
|
|
1456
|
+
w = weights[:]
|
|
1457
|
+
else:
|
|
1458
|
+
w = weights[:, i, j, c]
|
|
1459
|
+
valid = x != 0
|
|
1460
|
+
x_valid = x[valid]
|
|
1461
|
+
w_valid = w[valid]
|
|
1462
|
+
if x_valid.size == 0:
|
|
1463
|
+
clipped[i, j, c] = 0.0
|
|
1464
|
+
for f in range(num_frames):
|
|
1465
|
+
if not valid[f]:
|
|
1466
|
+
rej_mask[f, i, j, c] = True
|
|
1467
|
+
continue
|
|
1468
|
+
median_val = np.median(x_valid)
|
|
1469
|
+
mad = np.median(np.abs(x_valid - median_val))
|
|
1470
|
+
if mad == 0:
|
|
1471
|
+
clipped[i, j, c] = median_val
|
|
1472
|
+
for f in range(num_frames):
|
|
1473
|
+
rej_mask[f, i, j, c] = False
|
|
1474
|
+
continue
|
|
1475
|
+
modified_z = 0.6745 * (x_valid - median_val) / mad
|
|
1476
|
+
valid2 = np.abs(modified_z) < threshold
|
|
1477
|
+
idx = 0
|
|
1478
|
+
for f in range(num_frames):
|
|
1479
|
+
if valid[f]:
|
|
1480
|
+
if not valid2[idx]:
|
|
1481
|
+
rej_mask[f, i, j, c] = True
|
|
1482
|
+
else:
|
|
1483
|
+
rej_mask[f, i, j, c] = False
|
|
1484
|
+
idx += 1
|
|
1485
|
+
else:
|
|
1486
|
+
rej_mask[f, i, j, c] = True
|
|
1487
|
+
x_final = x_valid[valid2]
|
|
1488
|
+
w_final = w_valid[valid2]
|
|
1489
|
+
wsum = w_final.sum()
|
|
1490
|
+
if wsum > 0:
|
|
1491
|
+
clipped[i, j, c] = np.sum(x_final * w_final) / wsum
|
|
1492
|
+
else:
|
|
1493
|
+
clipped[i, j, c] = median_val
|
|
1494
|
+
return clipped, rej_mask
|
|
1495
|
+
|
|
1496
|
+
|
|
1497
|
+
def modified_zscore_clip_weighted(stack, weights, threshold=3.5):
|
|
1498
|
+
"""
|
|
1499
|
+
Dispatcher that returns (clipped, rejection_mask) for modified z-score clipping.
|
|
1500
|
+
"""
|
|
1501
|
+
if stack.ndim == 3:
|
|
1502
|
+
return modified_zscore_clip_weighted_3d(stack, weights, threshold)
|
|
1503
|
+
elif stack.ndim == 4:
|
|
1504
|
+
return modified_zscore_clip_weighted_4d(stack, weights, threshold)
|
|
1505
|
+
else:
|
|
1506
|
+
raise ValueError(f"modified_zscore_clip_weighted: stack must be 3D or 4D, got {stack.shape}")
|
|
1507
|
+
|
|
1508
|
+
|
|
1509
|
+
# -------------------------------
|
|
1510
|
+
# Windsorized Sigma Clipping (Non-weighted)
|
|
1511
|
+
# -------------------------------
|
|
1512
|
+
|
|
1513
|
+
@njit(parallel=True, fastmath=True)
|
|
1514
|
+
def windsorized_sigma_clip_3d(stack, lower=2.5, upper=2.5):
|
|
1515
|
+
"""
|
|
1516
|
+
Windsorized Sigma Clipping for a 3D mono stack (non-weighted).
|
|
1517
|
+
stack.shape == (F,H,W)
|
|
1518
|
+
Returns (clipped, rejection_mask) where rejection_mask is (F,H,W).
|
|
1519
|
+
"""
|
|
1520
|
+
num_frames, height, width = stack.shape
|
|
1521
|
+
clipped = np.zeros((height, width), dtype=np.float32)
|
|
1522
|
+
rej_mask = np.zeros((num_frames, height, width), dtype=np.bool_)
|
|
1523
|
+
|
|
1524
|
+
for i in prange(height):
|
|
1525
|
+
for j in range(width):
|
|
1526
|
+
pixel_values = stack[:, i, j]
|
|
1527
|
+
median_val = np.median(pixel_values)
|
|
1528
|
+
std_dev = np.std(pixel_values)
|
|
1529
|
+
lower_bound = median_val - lower * std_dev
|
|
1530
|
+
upper_bound = median_val + upper * std_dev
|
|
1531
|
+
valid = (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
|
|
1532
|
+
for f in range(num_frames):
|
|
1533
|
+
rej_mask[f, i, j] = not valid[f]
|
|
1534
|
+
valid_vals = pixel_values[valid]
|
|
1535
|
+
if valid_vals.size > 0:
|
|
1536
|
+
clipped[i, j] = np.mean(valid_vals)
|
|
1537
|
+
else:
|
|
1538
|
+
clipped[i, j] = median_val
|
|
1539
|
+
return clipped, rej_mask
|
|
1540
|
+
|
|
1541
|
+
|
|
1542
|
+
@njit(parallel=True, fastmath=True)
|
|
1543
|
+
def windsorized_sigma_clip_4d(stack, lower=2.5, upper=2.5):
|
|
1544
|
+
"""
|
|
1545
|
+
Windsorized Sigma Clipping for a 4D color stack (non-weighted).
|
|
1546
|
+
stack.shape == (F,H,W,C)
|
|
1547
|
+
Returns (clipped, rejection_mask) where rejection_mask is (F,H,W,C).
|
|
1548
|
+
"""
|
|
1549
|
+
num_frames, height, width, channels = stack.shape
|
|
1550
|
+
clipped = np.zeros((height, width, channels), dtype=np.float32)
|
|
1551
|
+
rej_mask = np.zeros((num_frames, height, width, channels), dtype=np.bool_)
|
|
1552
|
+
|
|
1553
|
+
for i in prange(height):
|
|
1554
|
+
for j in range(width):
|
|
1555
|
+
for c in range(channels):
|
|
1556
|
+
pixel_values = stack[:, i, j, c]
|
|
1557
|
+
median_val = np.median(pixel_values)
|
|
1558
|
+
std_dev = np.std(pixel_values)
|
|
1559
|
+
lower_bound = median_val - lower * std_dev
|
|
1560
|
+
upper_bound = median_val + upper * std_dev
|
|
1561
|
+
valid = (pixel_values >= lower_bound) & (pixel_values <= upper_bound)
|
|
1562
|
+
for f in range(num_frames):
|
|
1563
|
+
rej_mask[f, i, j, c] = not valid[f]
|
|
1564
|
+
valid_vals = pixel_values[valid]
|
|
1565
|
+
if valid_vals.size > 0:
|
|
1566
|
+
clipped[i, j, c] = np.mean(valid_vals)
|
|
1567
|
+
else:
|
|
1568
|
+
clipped[i, j, c] = median_val
|
|
1569
|
+
return clipped, rej_mask
|
|
1570
|
+
|
|
1571
|
+
|
|
1572
|
+
def windsorized_sigma_clip(stack, lower=2.5, upper=2.5):
|
|
1573
|
+
"""
|
|
1574
|
+
Dispatcher function that calls either the 3D or 4D specialized Numba function,
|
|
1575
|
+
depending on 'stack.ndim'.
|
|
1576
|
+
"""
|
|
1577
|
+
if stack.ndim == 3:
|
|
1578
|
+
return windsorized_sigma_clip_3d(stack, lower, upper)
|
|
1579
|
+
elif stack.ndim == 4:
|
|
1580
|
+
return windsorized_sigma_clip_4d(stack, lower, upper)
|
|
1581
|
+
else:
|
|
1582
|
+
raise ValueError(f"windsorized_sigma_clip: stack must be 3D or 4D, got {stack.shape}")
|
|
1583
|
+
|
|
1584
|
+
def max_value_stack(stack, weights=None):
|
|
1585
|
+
"""
|
|
1586
|
+
Stacking by taking the maximum value along the frame axis.
|
|
1587
|
+
Returns (clipped, rejection_mask) for compatibility:
|
|
1588
|
+
- clipped: H×W (or H×W×C)
|
|
1589
|
+
- rejection_mask: same shape as stack, all False
|
|
1590
|
+
"""
|
|
1591
|
+
clipped = np.max(stack, axis=0)
|
|
1592
|
+
rej_mask = np.zeros(stack.shape, dtype=bool)
|
|
1593
|
+
return clipped, rej_mask
|
|
1594
|
+
|
|
1595
|
+
@njit(parallel=True)
|
|
1596
|
+
def subtract_dark_with_pedestal_3d(frames, dark_frame, pedestal):
|
|
1597
|
+
"""
|
|
1598
|
+
For mono stack:
|
|
1599
|
+
frames.shape == (F,H,W)
|
|
1600
|
+
dark_frame.shape == (H,W)
|
|
1601
|
+
Adds 'pedestal' after subtracting dark_frame from each frame.
|
|
1602
|
+
Returns the same shape (F,H,W).
|
|
1603
|
+
"""
|
|
1604
|
+
num_frames, height, width = frames.shape
|
|
1605
|
+
result = np.empty_like(frames, dtype=np.float32)
|
|
1606
|
+
|
|
1607
|
+
# Validate dark_frame shape
|
|
1608
|
+
if dark_frame.ndim != 2 or dark_frame.shape != (height, width):
|
|
1609
|
+
raise ValueError(
|
|
1610
|
+
"subtract_dark_with_pedestal_3d: for 3D frames, dark_frame must be 2D (H,W)"
|
|
1611
|
+
)
|
|
1612
|
+
|
|
1613
|
+
for i in prange(num_frames):
|
|
1614
|
+
for y in range(height):
|
|
1615
|
+
for x in range(width):
|
|
1616
|
+
result[i, y, x] = frames[i, y, x] - dark_frame[y, x] + pedestal
|
|
1617
|
+
|
|
1618
|
+
return result
|
|
1619
|
+
|
|
1620
|
+
@njit(parallel=True)
|
|
1621
|
+
def subtract_dark_with_pedestal_4d(frames, dark_frame, pedestal):
|
|
1622
|
+
"""
|
|
1623
|
+
For color stack:
|
|
1624
|
+
frames.shape == (F,H,W,C)
|
|
1625
|
+
dark_frame.shape == (H,W,C)
|
|
1626
|
+
Adds 'pedestal' after subtracting dark_frame from each frame.
|
|
1627
|
+
Returns the same shape (F,H,W,C).
|
|
1628
|
+
"""
|
|
1629
|
+
num_frames, height, width, channels = frames.shape
|
|
1630
|
+
result = np.empty_like(frames, dtype=np.float32)
|
|
1631
|
+
|
|
1632
|
+
# Validate dark_frame shape
|
|
1633
|
+
if dark_frame.ndim != 3 or dark_frame.shape != (height, width, channels):
|
|
1634
|
+
raise ValueError(
|
|
1635
|
+
"subtract_dark_with_pedestal_4d: for 4D frames, dark_frame must be 3D (H,W,C)"
|
|
1636
|
+
)
|
|
1637
|
+
|
|
1638
|
+
for i in prange(num_frames):
|
|
1639
|
+
for y in range(height):
|
|
1640
|
+
for x in range(width):
|
|
1641
|
+
for c in range(channels):
|
|
1642
|
+
result[i, y, x, c] = frames[i, y, x, c] - dark_frame[y, x, c] + pedestal
|
|
1643
|
+
|
|
1644
|
+
return result
|
|
1645
|
+
|
|
1646
|
+
def subtract_dark_with_pedestal(frames, dark_frame, pedestal):
|
|
1647
|
+
"""
|
|
1648
|
+
Dispatcher function that calls either the 3D or 4D specialized Numba function
|
|
1649
|
+
depending on 'frames.ndim'.
|
|
1650
|
+
"""
|
|
1651
|
+
if frames.ndim == 3:
|
|
1652
|
+
return subtract_dark_with_pedestal_3d(frames, dark_frame, pedestal)
|
|
1653
|
+
elif frames.ndim == 4:
|
|
1654
|
+
return subtract_dark_with_pedestal_4d(frames, dark_frame, pedestal)
|
|
1655
|
+
else:
|
|
1656
|
+
raise ValueError(
|
|
1657
|
+
f"subtract_dark_with_pedestal: frames must be 3D or 4D, got {frames.shape}"
|
|
1658
|
+
)
|
|
1659
|
+
|
|
1660
|
+
|
|
1661
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
1662
|
+
def _parallel_measure_frames_stack(stack): # stack: float32[N,H,W] or float32[N,H,W,C]
|
|
1663
|
+
n = stack.shape[0]
|
|
1664
|
+
means = np.empty(n, np.float32)
|
|
1665
|
+
for i in prange(n):
|
|
1666
|
+
# Option A: mean then cast
|
|
1667
|
+
# m = np.mean(stack[i])
|
|
1668
|
+
# means[i] = np.float32(m)
|
|
1669
|
+
|
|
1670
|
+
# Option B (often a hair faster): sum / size then cast
|
|
1671
|
+
s = np.sum(stack[i]) # no kwargs
|
|
1672
|
+
means[i] = np.float32(s / stack[i].size)
|
|
1673
|
+
return means
|
|
1674
|
+
|
|
1675
|
+
def parallel_measure_frames(images_py):
|
|
1676
|
+
a = [np.ascontiguousarray(x, dtype=np.float32) for x in images_py]
|
|
1677
|
+
a = [x[:, :, None] if x.ndim == 2 else x for x in a]
|
|
1678
|
+
stack = np.ascontiguousarray(np.stack(a, axis=0)) # (N,H,W,C)
|
|
1679
|
+
return _parallel_measure_frames_stack(stack)
|
|
1680
|
+
|
|
1681
|
+
@njit(fastmath=True)
|
|
1682
|
+
def fast_mad(image):
|
|
1683
|
+
""" Computes the Median Absolute Deviation (MAD) as a robust noise estimator. """
|
|
1684
|
+
flat_image = image.ravel() # ✅ Flatten the 2D array into 1D
|
|
1685
|
+
median_val = np.median(flat_image) # Compute median
|
|
1686
|
+
mad = np.median(np.abs(flat_image - median_val)) # Compute MAD
|
|
1687
|
+
return mad * 1.4826 # ✅ Scale MAD to match standard deviation (for Gaussian noise)
|
|
1688
|
+
|
|
1689
|
+
|
|
1690
|
+
|
|
1691
|
+
@njit(fastmath=True)
|
|
1692
|
+
def compute_snr(image):
|
|
1693
|
+
""" Computes the Signal-to-Noise Ratio (SNR) using fast Numba std. """
|
|
1694
|
+
mean_signal = np.mean(image)
|
|
1695
|
+
noise = compute_noise(image)
|
|
1696
|
+
return mean_signal / noise if noise > 0 else 0
|
|
1697
|
+
|
|
1698
|
+
|
|
1699
|
+
|
|
1700
|
+
|
|
1701
|
+
@njit(fastmath=True)
|
|
1702
|
+
def compute_noise(image):
|
|
1703
|
+
""" Estimates noise using Median Absolute Deviation (MAD). """
|
|
1704
|
+
return fast_mad(image)
|
|
1705
|
+
|
|
1706
|
+
def _downsample_for_stars(img: np.ndarray, factor: int = 4) -> np.ndarray:
|
|
1707
|
+
"""
|
|
1708
|
+
Very cheap spatial downsample for star counting.
|
|
1709
|
+
Works on mono or RGB. Returns float32 2D.
|
|
1710
|
+
"""
|
|
1711
|
+
if img.ndim == 3 and img.shape[-1] == 3:
|
|
1712
|
+
# luma first
|
|
1713
|
+
r, g, b = img[..., 0], img[..., 1], img[..., 2]
|
|
1714
|
+
img = 0.2126*r + 0.7152*g + 0.0722*b
|
|
1715
|
+
img = np.asarray(img, dtype=np.float32, order="C")
|
|
1716
|
+
if factor <= 1:
|
|
1717
|
+
return img
|
|
1718
|
+
# stride (fast & cache friendly), not interpolation
|
|
1719
|
+
return img[::factor, ::factor]
|
|
1720
|
+
|
|
1721
|
+
|
|
1722
|
+
def fast_star_count_lite(img: np.ndarray,
|
|
1723
|
+
sample_stride: int = 8,
|
|
1724
|
+
localmax_k: int = 3,
|
|
1725
|
+
thr_sigma: float = 4.0,
|
|
1726
|
+
max_ecc_samples: int = 200) -> tuple[int, float]:
|
|
1727
|
+
"""
|
|
1728
|
+
Super-fast star counter:
|
|
1729
|
+
• sample a tiny subset to estimate background mean/std
|
|
1730
|
+
• local-maxima on small image
|
|
1731
|
+
• optional rough eccentricity on a small random subset
|
|
1732
|
+
Returns (count, avg_ecc).
|
|
1733
|
+
"""
|
|
1734
|
+
# img is 2D float32, already downsampled
|
|
1735
|
+
H, W = img.shape
|
|
1736
|
+
# 1) quick background stats on a sparse grid
|
|
1737
|
+
samp = img[::sample_stride, ::sample_stride]
|
|
1738
|
+
mu = float(np.mean(samp))
|
|
1739
|
+
sigma = float(np.std(samp))
|
|
1740
|
+
thr = mu + thr_sigma * max(sigma, 1e-6)
|
|
1741
|
+
|
|
1742
|
+
# 2) find local maxima above threshold
|
|
1743
|
+
# small structuring element; k must be odd
|
|
1744
|
+
k = localmax_k if (localmax_k % 2 == 1) else (localmax_k + 1)
|
|
1745
|
+
se = np.ones((k, k), np.uint8)
|
|
1746
|
+
# dilate the image (on float -> do it via cv2.dilate after scaling)
|
|
1747
|
+
# scale to 16-bit to keep numeric fidelity (cheap)
|
|
1748
|
+
scaled = (img * (65535.0 / max(np.max(img), 1e-6))).astype(np.uint16)
|
|
1749
|
+
dil = cv2.dilate(scaled, se)
|
|
1750
|
+
# peaks are pixels that equal the local max and exceed thr
|
|
1751
|
+
peaks = (scaled == dil) & (img > thr)
|
|
1752
|
+
count = int(np.count_nonzero(peaks))
|
|
1753
|
+
|
|
1754
|
+
# 3) (optional) rough eccentricity on a tiny subset
|
|
1755
|
+
if count == 0:
|
|
1756
|
+
return 0, 0.0
|
|
1757
|
+
if max_ecc_samples <= 0:
|
|
1758
|
+
return count, 0.0
|
|
1759
|
+
|
|
1760
|
+
ys, xs = np.where(peaks)
|
|
1761
|
+
if xs.size > max_ecc_samples:
|
|
1762
|
+
idx = np.random.choice(xs.size, max_ecc_samples, replace=False)
|
|
1763
|
+
xs, ys = xs[idx], ys[idx]
|
|
1764
|
+
|
|
1765
|
+
ecc_vals = []
|
|
1766
|
+
# small window around each peak
|
|
1767
|
+
r = 2 # 5×5 window
|
|
1768
|
+
for x, y in zip(xs, ys):
|
|
1769
|
+
x0, x1 = max(0, x - r), min(W, x + r + 1)
|
|
1770
|
+
y0, y1 = max(0, y - r), min(H, y + r + 1)
|
|
1771
|
+
patch = img[y0:y1, x0:x1]
|
|
1772
|
+
if patch.size < 9:
|
|
1773
|
+
continue
|
|
1774
|
+
# second moments for ellipse approximation
|
|
1775
|
+
yy, xx = np.mgrid[y0:y1, x0:x1]
|
|
1776
|
+
yy = yy.astype(np.float32) - y
|
|
1777
|
+
xx = xx.astype(np.float32) - x
|
|
1778
|
+
w = patch - patch.min()
|
|
1779
|
+
s = float(w.sum())
|
|
1780
|
+
if s <= 0:
|
|
1781
|
+
continue
|
|
1782
|
+
mxx = float((w * (xx*xx)).sum() / s)
|
|
1783
|
+
myy = float((w * (yy*yy)).sum() / s)
|
|
1784
|
+
# approximate major/minor from variances
|
|
1785
|
+
a = math.sqrt(max(mxx, myy))
|
|
1786
|
+
b = math.sqrt(min(mxx, myy))
|
|
1787
|
+
if a > 1e-6:
|
|
1788
|
+
e = math.sqrt(max(0.0, 1.0 - (b*b)/(a*a)))
|
|
1789
|
+
ecc_vals.append(e)
|
|
1790
|
+
avg_ecc = float(np.mean(ecc_vals)) if ecc_vals else 0.0
|
|
1791
|
+
return count, avg_ecc
|
|
1792
|
+
|
|
1793
|
+
|
|
1794
|
+
|
|
1795
|
+
def compute_star_count_fast_preview(preview_2d: np.ndarray) -> tuple[int, float]:
|
|
1796
|
+
"""
|
|
1797
|
+
Wrapper used in measurement: downsample aggressively and run the lite counter.
|
|
1798
|
+
"""
|
|
1799
|
+
tiny = _downsample_for_stars(preview_2d, factor=4) # try 4–8 depending on your sensor
|
|
1800
|
+
return fast_star_count_lite(tiny, sample_stride=8, localmax_k=3, thr_sigma=4.0, max_ecc_samples=120)
|
|
1801
|
+
|
|
1802
|
+
|
|
1803
|
+
|
|
1804
|
+
def compute_star_count(image):
|
|
1805
|
+
"""Fast star detection with robust pre-stretch for linear data."""
|
|
1806
|
+
return fast_star_count(image)
|
|
1807
|
+
|
|
1808
|
+
def fast_star_count(
|
|
1809
|
+
image,
|
|
1810
|
+
blur_size=None, # adaptive if None
|
|
1811
|
+
threshold_factor=0.8,
|
|
1812
|
+
min_area=None, # adaptive if None
|
|
1813
|
+
max_area=None, # adaptive if None
|
|
1814
|
+
*,
|
|
1815
|
+
gamma=0.45, # <1 brightens faint signal; 0.35–0.55 is a good range
|
|
1816
|
+
p_lo=0.1, # robust low percentile for stretch
|
|
1817
|
+
p_hi=99.8 # robust high percentile for stretch
|
|
1818
|
+
):
|
|
1819
|
+
"""
|
|
1820
|
+
Estimate star count + avg eccentricity from a 2D float/uint8 image.
|
|
1821
|
+
Now does robust percentile stretch + gamma in float BEFORE 8-bit/Otsu.
|
|
1822
|
+
"""
|
|
1823
|
+
# 1) Ensure 2D grayscale (stay float32)
|
|
1824
|
+
if image.ndim == 3:
|
|
1825
|
+
# RGB -> luma
|
|
1826
|
+
r, g, b = image[..., 0], image[..., 1], image[..., 2]
|
|
1827
|
+
img = (0.2126 * r + 0.7152 * g + 0.0722 * b).astype(np.float32, copy=False)
|
|
1828
|
+
else:
|
|
1829
|
+
img = np.asarray(image, dtype=np.float32, order="C")
|
|
1830
|
+
|
|
1831
|
+
H, W = img.shape[:2]
|
|
1832
|
+
short_side = max(1, min(H, W))
|
|
1833
|
+
|
|
1834
|
+
# Adaptive params
|
|
1835
|
+
if blur_size is None:
|
|
1836
|
+
k = max(3, int(round(short_side / 80)))
|
|
1837
|
+
blur_size = k if (k % 2 == 1) else (k + 1)
|
|
1838
|
+
if min_area is None:
|
|
1839
|
+
min_area = 1
|
|
1840
|
+
if max_area is None:
|
|
1841
|
+
max_area = max(100, int(0.01 * H * W))
|
|
1842
|
+
|
|
1843
|
+
# 2) Robust percentile stretch in float (no 8-bit yet)
|
|
1844
|
+
# This lifts the sky background and pulls faint stars up before thresholding.
|
|
1845
|
+
lo = float(np.percentile(img, p_lo))
|
|
1846
|
+
hi = float(np.percentile(img, p_hi))
|
|
1847
|
+
if not (hi > lo):
|
|
1848
|
+
lo, hi = float(img.min()), float(img.max())
|
|
1849
|
+
if not (hi > lo):
|
|
1850
|
+
return 0, 0.0
|
|
1851
|
+
|
|
1852
|
+
norm = (img - lo) / max(1e-8, (hi - lo))
|
|
1853
|
+
norm = np.clip(norm, 0.0, 1.0)
|
|
1854
|
+
|
|
1855
|
+
# 3) Gamma (<1 brightens low end)
|
|
1856
|
+
if gamma and gamma > 0:
|
|
1857
|
+
norm = np.power(norm, gamma, dtype=np.float32)
|
|
1858
|
+
|
|
1859
|
+
# 4) Convert to 8-bit ONLY after stretch/gamma (preserves faint structure)
|
|
1860
|
+
image_8u = (norm * 255.0).astype(np.uint8)
|
|
1861
|
+
|
|
1862
|
+
# 5) Blur + subtract (unsharp-ish)
|
|
1863
|
+
blurred = cv2.GaussianBlur(image_8u, (blur_size, blur_size), 0)
|
|
1864
|
+
sub = cv2.absdiff(image_8u, blurred)
|
|
1865
|
+
|
|
1866
|
+
# 6) Otsu + threshold_factor
|
|
1867
|
+
otsu, _ = cv2.threshold(sub, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
|
|
1868
|
+
thr = max(2, int(otsu * threshold_factor))
|
|
1869
|
+
_, mask = cv2.threshold(sub, thr, 255, cv2.THRESH_BINARY)
|
|
1870
|
+
|
|
1871
|
+
# 7) Morph open *only* on larger frames (tiny frames lose stars otherwise)
|
|
1872
|
+
if short_side >= 600:
|
|
1873
|
+
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((2, 2), np.uint8))
|
|
1874
|
+
|
|
1875
|
+
# 8) Contours → area filter → eccentricity
|
|
1876
|
+
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
1877
|
+
|
|
1878
|
+
star_count = 0
|
|
1879
|
+
ecc_values = []
|
|
1880
|
+
for c in contours:
|
|
1881
|
+
area = cv2.contourArea(c)
|
|
1882
|
+
if area < min_area or area > max_area:
|
|
1883
|
+
continue
|
|
1884
|
+
if len(c) < 5:
|
|
1885
|
+
continue
|
|
1886
|
+
(_, _), (a, b), _ = cv2.fitEllipse(c)
|
|
1887
|
+
if b > a: a, b = b, a
|
|
1888
|
+
if a > 0:
|
|
1889
|
+
e = math.sqrt(max(0.0, 1.0 - (b * b) / (a * a)))
|
|
1890
|
+
else:
|
|
1891
|
+
e = 0.0
|
|
1892
|
+
ecc_values.append(e)
|
|
1893
|
+
star_count += 1
|
|
1894
|
+
|
|
1895
|
+
# 9) Gentle fallback if too few detections: lower threshold & smaller blur
|
|
1896
|
+
if star_count < 5:
|
|
1897
|
+
k2 = max(3, (blur_size // 2) | 1)
|
|
1898
|
+
blurred2 = cv2.GaussianBlur(image_8u, (k2, k2), 0)
|
|
1899
|
+
sub2 = cv2.absdiff(image_8u, blurred2)
|
|
1900
|
+
otsu2, _ = cv2.threshold(sub2, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
|
|
1901
|
+
thr2 = max(2, int(otsu2 * 0.6)) # more permissive
|
|
1902
|
+
_, mask2 = cv2.threshold(sub2, thr2, 255, cv2.THRESH_BINARY)
|
|
1903
|
+
contours2, _ = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
1904
|
+
star_count = 0
|
|
1905
|
+
ecc_values = []
|
|
1906
|
+
for c in contours2:
|
|
1907
|
+
area = cv2.contourArea(c)
|
|
1908
|
+
if area < 1 or area > max_area:
|
|
1909
|
+
continue
|
|
1910
|
+
if len(c) < 5:
|
|
1911
|
+
continue
|
|
1912
|
+
(_, _), (a, b), _ = cv2.fitEllipse(c)
|
|
1913
|
+
if b > a: a, b = b, a
|
|
1914
|
+
e = math.sqrt(max(0.0, 1.0 - (b * b) / (a * a))) if a > 0 else 0.0
|
|
1915
|
+
ecc_values.append(e)
|
|
1916
|
+
star_count += 1
|
|
1917
|
+
|
|
1918
|
+
avg_ecc = float(np.mean(ecc_values)) if star_count > 0 else 0.0
|
|
1919
|
+
return star_count, avg_ecc
|
|
1920
|
+
|
|
1921
|
+
@njit(parallel=True, fastmath=True)
|
|
1922
|
+
def normalize_images_3d(stack, ref_median):
|
|
1923
|
+
"""
|
|
1924
|
+
Normalizes each frame in a 3D mono stack (F,H,W)
|
|
1925
|
+
so that its median equals ref_median.
|
|
1926
|
+
|
|
1927
|
+
Returns a 3D result (F,H,W).
|
|
1928
|
+
"""
|
|
1929
|
+
num_frames, height, width = stack.shape
|
|
1930
|
+
normalized_stack = np.zeros_like(stack, dtype=np.float32)
|
|
1931
|
+
|
|
1932
|
+
for i in prange(num_frames):
|
|
1933
|
+
# shape of one frame: (H,W)
|
|
1934
|
+
img = stack[i]
|
|
1935
|
+
img_median = np.median(img)
|
|
1936
|
+
|
|
1937
|
+
# Prevent division by zero
|
|
1938
|
+
scale_factor = ref_median / max(img_median, 1e-6)
|
|
1939
|
+
# Scale the entire 2D frame
|
|
1940
|
+
normalized_stack[i] = img * scale_factor
|
|
1941
|
+
|
|
1942
|
+
return normalized_stack
|
|
1943
|
+
|
|
1944
|
+
@njit(parallel=True, fastmath=True)
|
|
1945
|
+
def normalize_images_4d(stack, ref_median):
|
|
1946
|
+
"""
|
|
1947
|
+
Normalizes each frame in a 4D color stack (F,H,W,C)
|
|
1948
|
+
so that its median equals ref_median.
|
|
1949
|
+
|
|
1950
|
+
Returns a 4D result (F,H,W,C).
|
|
1951
|
+
"""
|
|
1952
|
+
num_frames, height, width, channels = stack.shape
|
|
1953
|
+
normalized_stack = np.zeros_like(stack, dtype=np.float32)
|
|
1954
|
+
|
|
1955
|
+
for i in prange(num_frames):
|
|
1956
|
+
# shape of one frame: (H,W,C)
|
|
1957
|
+
img = stack[i] # (H,W,C)
|
|
1958
|
+
# Flatten to 1D to compute median across all channels/pixels
|
|
1959
|
+
img_median = np.median(img.ravel())
|
|
1960
|
+
|
|
1961
|
+
# Prevent division by zero
|
|
1962
|
+
scale_factor = ref_median / max(img_median, 1e-6)
|
|
1963
|
+
|
|
1964
|
+
# Scale the entire 3D frame
|
|
1965
|
+
for y in range(height):
|
|
1966
|
+
for x in range(width):
|
|
1967
|
+
for c in range(channels):
|
|
1968
|
+
normalized_stack[i, y, x, c] = img[y, x, c] * scale_factor
|
|
1969
|
+
|
|
1970
|
+
return normalized_stack
|
|
1971
|
+
|
|
1972
|
+
def normalize_images(stack, ref_median):
|
|
1973
|
+
"""
|
|
1974
|
+
Dispatcher that calls either the 3D or 4D specialized Numba function
|
|
1975
|
+
depending on 'stack.ndim'.
|
|
1976
|
+
|
|
1977
|
+
- If stack.ndim == 3, we assume shape (F,H,W).
|
|
1978
|
+
- If stack.ndim == 4, we assume shape (F,H,W,C).
|
|
1979
|
+
"""
|
|
1980
|
+
if stack.ndim == 3:
|
|
1981
|
+
return normalize_images_3d(stack, ref_median)
|
|
1982
|
+
elif stack.ndim == 4:
|
|
1983
|
+
return normalize_images_4d(stack, ref_median)
|
|
1984
|
+
else:
|
|
1985
|
+
raise ValueError(f"normalize_images: stack must be 3D or 4D, got shape {stack.shape}")
|
|
1986
|
+
|
|
1987
|
+
@njit(parallel=True, fastmath=True)
|
|
1988
|
+
def _bilinear_interpolate_numba(out):
|
|
1989
|
+
H, W, C = out.shape
|
|
1990
|
+
for c in range(C):
|
|
1991
|
+
for y in prange(H):
|
|
1992
|
+
for x in range(W):
|
|
1993
|
+
if out[y, x, c] == 0:
|
|
1994
|
+
sumv = 0.0
|
|
1995
|
+
cnt = 0
|
|
1996
|
+
# 3x3 neighborhood average of non-zero samples (simple & fast)
|
|
1997
|
+
for dy in (-1, 0, 1):
|
|
1998
|
+
yy = y + dy
|
|
1999
|
+
if yy < 0 or yy >= H:
|
|
2000
|
+
continue
|
|
2001
|
+
for dx in (-1, 0, 1):
|
|
2002
|
+
xx = x + dx
|
|
2003
|
+
if xx < 0 or xx >= W:
|
|
2004
|
+
continue
|
|
2005
|
+
v = out[yy, xx, c]
|
|
2006
|
+
if v != 0:
|
|
2007
|
+
sumv += v
|
|
2008
|
+
cnt += 1
|
|
2009
|
+
if cnt > 0:
|
|
2010
|
+
out[y, x, c] = sumv / cnt
|
|
2011
|
+
return out
|
|
2012
|
+
|
|
2013
|
+
|
|
2014
|
+
@njit(parallel=True, fastmath=True)
|
|
2015
|
+
def _edge_aware_interpolate_numba(out):
|
|
2016
|
+
"""
|
|
2017
|
+
For each pixel in out (shape: (H,W,3)) where out[y,x,c] == 0,
|
|
2018
|
+
use a simple edge-aware approach:
|
|
2019
|
+
1) Compute horizontal gradient = abs( left - right )
|
|
2020
|
+
2) Compute vertical gradient = abs( top - bottom )
|
|
2021
|
+
3) Choose the direction with the smaller gradient => average neighbors
|
|
2022
|
+
4) If neighbors are missing or zero, fallback to a small 3x3 average
|
|
2023
|
+
|
|
2024
|
+
This is simpler than AHD but usually better than naive bilinear
|
|
2025
|
+
for high-contrast features like star cores.
|
|
2026
|
+
"""
|
|
2027
|
+
H, W, C = out.shape
|
|
2028
|
+
|
|
2029
|
+
for c in range(C):
|
|
2030
|
+
for y in prange(H):
|
|
2031
|
+
for x in range(W):
|
|
2032
|
+
if out[y, x, c] == 0:
|
|
2033
|
+
# Gather immediate neighbors
|
|
2034
|
+
left = 0.0
|
|
2035
|
+
right = 0.0
|
|
2036
|
+
top = 0.0
|
|
2037
|
+
bottom = 0.0
|
|
2038
|
+
have_left = False
|
|
2039
|
+
have_right = False
|
|
2040
|
+
have_top = False
|
|
2041
|
+
have_bottom = False
|
|
2042
|
+
|
|
2043
|
+
# Left
|
|
2044
|
+
if x - 1 >= 0:
|
|
2045
|
+
val = out[y, x - 1, c]
|
|
2046
|
+
if val != 0:
|
|
2047
|
+
left = val
|
|
2048
|
+
have_left = True
|
|
2049
|
+
|
|
2050
|
+
# Right
|
|
2051
|
+
if x + 1 < W:
|
|
2052
|
+
val = out[y, x + 1, c]
|
|
2053
|
+
if val != 0:
|
|
2054
|
+
right = val
|
|
2055
|
+
have_right = True
|
|
2056
|
+
|
|
2057
|
+
# Top
|
|
2058
|
+
if y - 1 >= 0:
|
|
2059
|
+
val = out[y - 1, x, c]
|
|
2060
|
+
if val != 0:
|
|
2061
|
+
top = val
|
|
2062
|
+
have_top = True
|
|
2063
|
+
|
|
2064
|
+
# Bottom
|
|
2065
|
+
if y + 1 < H:
|
|
2066
|
+
val = out[y + 1, x, c]
|
|
2067
|
+
if val != 0:
|
|
2068
|
+
bottom = val
|
|
2069
|
+
have_bottom = True
|
|
2070
|
+
|
|
2071
|
+
# Compute gradients
|
|
2072
|
+
# If we don't have valid neighbors for that direction,
|
|
2073
|
+
# set the gradient to a large number => won't be chosen
|
|
2074
|
+
gh = 1e6
|
|
2075
|
+
gv = 1e6
|
|
2076
|
+
|
|
2077
|
+
if have_left and have_right:
|
|
2078
|
+
gh = abs(left - right)
|
|
2079
|
+
if have_top and have_bottom:
|
|
2080
|
+
gv = abs(top - bottom)
|
|
2081
|
+
|
|
2082
|
+
# Decide which direction to interpolate
|
|
2083
|
+
if gh < gv and have_left and have_right:
|
|
2084
|
+
# Horizontal interpolation
|
|
2085
|
+
out[y, x, c] = 0.5 * (left + right)
|
|
2086
|
+
elif gv <= gh and have_top and have_bottom:
|
|
2087
|
+
# Vertical interpolation
|
|
2088
|
+
out[y, x, c] = 0.5 * (top + bottom)
|
|
2089
|
+
else:
|
|
2090
|
+
# Fallback: average 3×3 region
|
|
2091
|
+
sumv = 0.0
|
|
2092
|
+
count = 0
|
|
2093
|
+
for dy in range(-1, 2):
|
|
2094
|
+
for dx in range(-1, 2):
|
|
2095
|
+
yy = y + dy
|
|
2096
|
+
xx = x + dx
|
|
2097
|
+
if 0 <= yy < H and 0 <= xx < W:
|
|
2098
|
+
val = out[yy, xx, c]
|
|
2099
|
+
if val != 0:
|
|
2100
|
+
sumv += val
|
|
2101
|
+
count += 1
|
|
2102
|
+
if count > 0:
|
|
2103
|
+
out[y, x, c] = sumv / count
|
|
2104
|
+
|
|
2105
|
+
return out
|
|
2106
|
+
# === Separate Full-Resolution Demosaicing Kernels ===
|
|
2107
|
+
# These njit functions assume the raw image is arranged in a Bayer pattern
|
|
2108
|
+
# and that we want a full (H,W,3) output.
|
|
2109
|
+
|
|
2110
|
+
@njit(parallel=True, fastmath=True)
|
|
2111
|
+
def debayer_RGGB_fullres_fast(image, interpolate=True):
|
|
2112
|
+
H, W = image.shape
|
|
2113
|
+
out = np.zeros((H, W, 3), dtype=image.dtype)
|
|
2114
|
+
for y in prange(H):
|
|
2115
|
+
for x in range(W):
|
|
2116
|
+
if (y & 1) == 0:
|
|
2117
|
+
if (x & 1) == 0: out[y, x, 0] = image[y, x] # R
|
|
2118
|
+
else: out[y, x, 1] = image[y, x] # G
|
|
2119
|
+
else:
|
|
2120
|
+
if (x & 1) == 0: out[y, x, 1] = image[y, x] # G
|
|
2121
|
+
else: out[y, x, 2] = image[y, x] # B
|
|
2122
|
+
if interpolate:
|
|
2123
|
+
_edge_aware_interpolate_numba(out)
|
|
2124
|
+
return out
|
|
2125
|
+
|
|
2126
|
+
@njit(parallel=True, fastmath=True)
|
|
2127
|
+
def debayer_BGGR_fullres_fast(image, interpolate=True):
|
|
2128
|
+
H, W = image.shape
|
|
2129
|
+
out = np.zeros((H, W, 3), dtype=image.dtype)
|
|
2130
|
+
for y in prange(H):
|
|
2131
|
+
for x in range(W):
|
|
2132
|
+
if (y & 1) == 0:
|
|
2133
|
+
if (x & 1) == 0: out[y, x, 2] = image[y, x] # B
|
|
2134
|
+
else: out[y, x, 1] = image[y, x] # G
|
|
2135
|
+
else:
|
|
2136
|
+
if (x & 1) == 0: out[y, x, 1] = image[y, x] # G
|
|
2137
|
+
else: out[y, x, 0] = image[y, x] # R
|
|
2138
|
+
if interpolate:
|
|
2139
|
+
_edge_aware_interpolate_numba(out)
|
|
2140
|
+
return out
|
|
2141
|
+
|
|
2142
|
+
@njit(parallel=True, fastmath=True)
|
|
2143
|
+
def debayer_GRBG_fullres_fast(image, interpolate=True):
|
|
2144
|
+
H, W = image.shape
|
|
2145
|
+
out = np.zeros((H, W, 3), dtype=image.dtype)
|
|
2146
|
+
for y in prange(H):
|
|
2147
|
+
for x in range(W):
|
|
2148
|
+
if (y & 1) == 0:
|
|
2149
|
+
if (x & 1) == 0: out[y, x, 1] = image[y, x] # G
|
|
2150
|
+
else: out[y, x, 0] = image[y, x] # R
|
|
2151
|
+
else:
|
|
2152
|
+
if (x & 1) == 0: out[y, x, 2] = image[y, x] # B
|
|
2153
|
+
else: out[y, x, 1] = image[y, x] # G
|
|
2154
|
+
if interpolate:
|
|
2155
|
+
_edge_aware_interpolate_numba(out)
|
|
2156
|
+
return out
|
|
2157
|
+
|
|
2158
|
+
@njit(parallel=True, fastmath=True)
|
|
2159
|
+
def debayer_GBRG_fullres_fast(image, interpolate=True):
|
|
2160
|
+
H, W = image.shape
|
|
2161
|
+
out = np.zeros((H, W, 3), dtype=image.dtype)
|
|
2162
|
+
for y in prange(H):
|
|
2163
|
+
for x in range(W):
|
|
2164
|
+
if (y & 1) == 0:
|
|
2165
|
+
if (x & 1) == 0: out[y, x, 1] = image[y, x] # G
|
|
2166
|
+
else: out[y, x, 2] = image[y, x] # B
|
|
2167
|
+
else:
|
|
2168
|
+
if (x & 1) == 0: out[y, x, 0] = image[y, x] # R
|
|
2169
|
+
else: out[y, x, 1] = image[y, x] # G
|
|
2170
|
+
if interpolate:
|
|
2171
|
+
_edge_aware_interpolate_numba(out)
|
|
2172
|
+
return out
|
|
2173
|
+
|
|
2174
|
+
# === Python-Level Dispatch Function ===
|
|
2175
|
+
# Since Numba cannot easily compare strings in nopython mode,
|
|
2176
|
+
# we do the if/elif check here in Python and then call the appropriate njit function.
|
|
2177
|
+
|
|
2178
|
+
def debayer_fits_fast(image_data, bayer_pattern, cfa_drizzle=False, method="edge"):
|
|
2179
|
+
bp = (bayer_pattern or "").upper()
|
|
2180
|
+
interpolate = not cfa_drizzle
|
|
2181
|
+
|
|
2182
|
+
# 1) lay down the known samples per CFA
|
|
2183
|
+
if bp == 'RGGB':
|
|
2184
|
+
out = debayer_RGGB_fullres_fast(image_data, interpolate=False)
|
|
2185
|
+
elif bp == 'BGGR':
|
|
2186
|
+
out = debayer_BGGR_fullres_fast(image_data, interpolate=False)
|
|
2187
|
+
elif bp == 'GRBG':
|
|
2188
|
+
out = debayer_GRBG_fullres_fast(image_data, interpolate=False)
|
|
2189
|
+
elif bp == 'GBRG':
|
|
2190
|
+
out = debayer_GBRG_fullres_fast(image_data, interpolate=False)
|
|
2191
|
+
else:
|
|
2192
|
+
raise ValueError(f"Unsupported Bayer pattern: {bayer_pattern}")
|
|
2193
|
+
|
|
2194
|
+
# 2) perform interpolation unless doing CFA-drizzle
|
|
2195
|
+
if interpolate:
|
|
2196
|
+
m = (method or "edge").lower()
|
|
2197
|
+
if m == "edge":
|
|
2198
|
+
_edge_aware_interpolate_numba(out)
|
|
2199
|
+
elif m == "bilinear":
|
|
2200
|
+
_bilinear_interpolate_numba(out)
|
|
2201
|
+
else:
|
|
2202
|
+
# fallback to edge-aware if unknown
|
|
2203
|
+
_edge_aware_interpolate_numba(out)
|
|
2204
|
+
|
|
2205
|
+
return out
|
|
2206
|
+
|
|
2207
|
+
def debayer_raw_fast(raw_image_data, bayer_pattern="RGGB", cfa_drizzle=False, method="edge"):
|
|
2208
|
+
return debayer_fits_fast(raw_image_data, bayer_pattern, cfa_drizzle=cfa_drizzle, method=method)
|
|
2209
|
+
|
|
2210
|
+
@njit(parallel=True, fastmath=True)
|
|
2211
|
+
def applyPixelMath_numba(image_array, amount):
|
|
2212
|
+
factor = 3 ** amount
|
|
2213
|
+
denom_factor = 3 ** amount - 1
|
|
2214
|
+
height, width, channels = image_array.shape
|
|
2215
|
+
output = np.empty_like(image_array, dtype=np.float32)
|
|
2216
|
+
|
|
2217
|
+
for y in prange(height):
|
|
2218
|
+
for x in prange(width):
|
|
2219
|
+
for c in prange(channels):
|
|
2220
|
+
val = (factor * image_array[y, x, c]) / (denom_factor * image_array[y, x, c] + 1)
|
|
2221
|
+
output[y, x, c] = min(max(val, 0.0), 1.0) # Equivalent to np.clip()
|
|
2222
|
+
|
|
2223
|
+
return output
|
|
2224
|
+
|
|
2225
|
+
@njit(parallel=True, fastmath=True)
|
|
2226
|
+
def adjust_saturation_numba(image_array, saturation_factor):
|
|
2227
|
+
height, width, channels = image_array.shape
|
|
2228
|
+
output = np.empty_like(image_array, dtype=np.float32)
|
|
2229
|
+
|
|
2230
|
+
for y in prange(int(height)): # Ensure y is an integer
|
|
2231
|
+
for x in prange(int(width)): # Ensure x is an integer
|
|
2232
|
+
r, g, b = image_array[int(y), int(x)] # Force integer indexing
|
|
2233
|
+
|
|
2234
|
+
# Convert RGB to HSV manually
|
|
2235
|
+
max_val = max(r, g, b)
|
|
2236
|
+
min_val = min(r, g, b)
|
|
2237
|
+
delta = max_val - min_val
|
|
2238
|
+
|
|
2239
|
+
# Compute Hue (H)
|
|
2240
|
+
if delta == 0:
|
|
2241
|
+
h = 0
|
|
2242
|
+
elif max_val == r:
|
|
2243
|
+
h = (60 * ((g - b) / delta) + 360) % 360
|
|
2244
|
+
elif max_val == g:
|
|
2245
|
+
h = (60 * ((b - r) / delta) + 120) % 360
|
|
2246
|
+
else:
|
|
2247
|
+
h = (60 * ((r - g) / delta) + 240) % 360
|
|
2248
|
+
|
|
2249
|
+
# Compute Saturation (S)
|
|
2250
|
+
s = (delta / max_val) if max_val != 0 else 0
|
|
2251
|
+
s *= saturation_factor # Apply saturation adjustment
|
|
2252
|
+
s = min(max(s, 0.0), 1.0) # Clip saturation
|
|
2253
|
+
|
|
2254
|
+
# Convert back to RGB
|
|
2255
|
+
if s == 0:
|
|
2256
|
+
r, g, b = max_val, max_val, max_val
|
|
2257
|
+
else:
|
|
2258
|
+
c = s * max_val
|
|
2259
|
+
x_val = c * (1 - abs((h / 60) % 2 - 1))
|
|
2260
|
+
m = max_val - c
|
|
2261
|
+
|
|
2262
|
+
if 0 <= h < 60:
|
|
2263
|
+
r, g, b = c, x_val, 0
|
|
2264
|
+
elif 60 <= h < 120:
|
|
2265
|
+
r, g, b = x_val, c, 0
|
|
2266
|
+
elif 120 <= h < 180:
|
|
2267
|
+
r, g, b = 0, c, x_val
|
|
2268
|
+
elif 180 <= h < 240:
|
|
2269
|
+
r, g, b = 0, x_val, c
|
|
2270
|
+
elif 240 <= h < 300:
|
|
2271
|
+
r, g, b = x_val, 0, c
|
|
2272
|
+
else:
|
|
2273
|
+
r, g, b = c, 0, x_val
|
|
2274
|
+
|
|
2275
|
+
r, g, b = r + m, g + m, b + m # Add m to shift brightness
|
|
2276
|
+
|
|
2277
|
+
# ✅ Fix: Explicitly cast indices to integers
|
|
2278
|
+
output[int(y), int(x), 0] = r
|
|
2279
|
+
output[int(y), int(x), 1] = g
|
|
2280
|
+
output[int(y), int(x), 2] = b
|
|
2281
|
+
|
|
2282
|
+
return output
|
|
2283
|
+
|
|
2284
|
+
|
|
2285
|
+
|
|
2286
|
+
|
|
2287
|
+
@njit(parallel=True, fastmath=True)
|
|
2288
|
+
def applySCNR_numba(image_array):
|
|
2289
|
+
height, width, _ = image_array.shape
|
|
2290
|
+
output = np.empty_like(image_array, dtype=np.float32)
|
|
2291
|
+
|
|
2292
|
+
for y in prange(int(height)):
|
|
2293
|
+
for x in prange(int(width)):
|
|
2294
|
+
r, g, b = image_array[y, x]
|
|
2295
|
+
g = min(g, (r + b) / 2) # Reduce green to the average of red & blue
|
|
2296
|
+
|
|
2297
|
+
# ✅ Fix: Assign channels individually instead of a tuple
|
|
2298
|
+
output[int(y), int(x), 0] = r
|
|
2299
|
+
output[int(y), int(x), 1] = g
|
|
2300
|
+
output[int(y), int(x), 2] = b
|
|
2301
|
+
|
|
2302
|
+
|
|
2303
|
+
return output
|
|
2304
|
+
|
|
2305
|
+
# D65 reference
|
|
2306
|
+
_Xn, _Yn, _Zn = 0.95047, 1.00000, 1.08883
|
|
2307
|
+
|
|
2308
|
+
# Matrix for RGB -> XYZ (sRGB => D65)
|
|
2309
|
+
_M_rgb2xyz = np.array([
|
|
2310
|
+
[0.4124564, 0.3575761, 0.1804375],
|
|
2311
|
+
[0.2126729, 0.7151522, 0.0721750],
|
|
2312
|
+
[0.0193339, 0.1191920, 0.9503041]
|
|
2313
|
+
], dtype=np.float32)
|
|
2314
|
+
|
|
2315
|
+
# Matrix for XYZ -> RGB (sRGB => D65)
|
|
2316
|
+
_M_xyz2rgb = np.array([
|
|
2317
|
+
[ 3.2404542, -1.5371385, -0.4985314],
|
|
2318
|
+
[-0.9692660, 1.8760108, 0.0415560],
|
|
2319
|
+
[ 0.0556434, -0.2040259, 1.0572252]
|
|
2320
|
+
], dtype=np.float32)
|
|
2321
|
+
|
|
2322
|
+
|
|
2323
|
+
|
|
2324
|
+
@njit(parallel=True, fastmath=True)
|
|
2325
|
+
def apply_lut_gray(image_in, lut):
|
|
2326
|
+
"""
|
|
2327
|
+
Numba-accelerated application of 'lut' to a single-channel image_in in [0..1].
|
|
2328
|
+
'lut' is a 1D array of shape (size,) also in [0..1].
|
|
2329
|
+
"""
|
|
2330
|
+
out = np.empty_like(image_in)
|
|
2331
|
+
height, width = image_in.shape
|
|
2332
|
+
size_lut = len(lut) - 1
|
|
2333
|
+
|
|
2334
|
+
for y in prange(height):
|
|
2335
|
+
for x in range(width):
|
|
2336
|
+
v = image_in[y, x]
|
|
2337
|
+
idx = int(v * size_lut + 0.5)
|
|
2338
|
+
if idx < 0: idx = 0
|
|
2339
|
+
elif idx > size_lut: idx = size_lut
|
|
2340
|
+
out[y, x] = lut[idx]
|
|
2341
|
+
|
|
2342
|
+
return out
|
|
2343
|
+
|
|
2344
|
+
@njit(parallel=True, fastmath=True)
|
|
2345
|
+
def apply_lut_color(image_in, lut):
|
|
2346
|
+
"""
|
|
2347
|
+
Numba-accelerated application of 'lut' to a 3-channel image_in in [0..1].
|
|
2348
|
+
'lut' is a 1D array of shape (size,) also in [0..1].
|
|
2349
|
+
"""
|
|
2350
|
+
out = np.empty_like(image_in)
|
|
2351
|
+
height, width, channels = image_in.shape
|
|
2352
|
+
size_lut = len(lut) - 1
|
|
2353
|
+
|
|
2354
|
+
for y in prange(height):
|
|
2355
|
+
for x in range(width):
|
|
2356
|
+
for c in range(channels):
|
|
2357
|
+
v = image_in[y, x, c]
|
|
2358
|
+
idx = int(v * size_lut + 0.5)
|
|
2359
|
+
if idx < 0: idx = 0
|
|
2360
|
+
elif idx > size_lut: idx = size_lut
|
|
2361
|
+
out[y, x, c] = lut[idx]
|
|
2362
|
+
|
|
2363
|
+
return out
|
|
2364
|
+
|
|
2365
|
+
@njit(parallel=True, fastmath=True)
|
|
2366
|
+
def apply_lut_mono_inplace(array2d, lut):
|
|
2367
|
+
"""
|
|
2368
|
+
In-place LUT application on a single-channel 2D array in [0..1].
|
|
2369
|
+
'lut' has shape (size,) also in [0..1].
|
|
2370
|
+
"""
|
|
2371
|
+
H, W = array2d.shape
|
|
2372
|
+
size_lut = len(lut) - 1
|
|
2373
|
+
for y in prange(H):
|
|
2374
|
+
for x in prange(W):
|
|
2375
|
+
v = array2d[y, x]
|
|
2376
|
+
idx = int(v * size_lut + 0.5)
|
|
2377
|
+
if idx < 0:
|
|
2378
|
+
idx = 0
|
|
2379
|
+
elif idx > size_lut:
|
|
2380
|
+
idx = size_lut
|
|
2381
|
+
array2d[y, x] = lut[idx]
|
|
2382
|
+
|
|
2383
|
+
@njit(parallel=True, fastmath=True)
|
|
2384
|
+
def apply_lut_color_inplace(array3d, lut):
|
|
2385
|
+
"""
|
|
2386
|
+
In-place LUT application on a 3-channel array in [0..1].
|
|
2387
|
+
'lut' has shape (size,) also in [0..1].
|
|
2388
|
+
"""
|
|
2389
|
+
H, W, C = array3d.shape
|
|
2390
|
+
size_lut = len(lut) - 1
|
|
2391
|
+
for y in prange(H):
|
|
2392
|
+
for x in prange(W):
|
|
2393
|
+
for c in range(C):
|
|
2394
|
+
v = array3d[y, x, c]
|
|
2395
|
+
idx = int(v * size_lut + 0.5)
|
|
2396
|
+
if idx < 0:
|
|
2397
|
+
idx = 0
|
|
2398
|
+
elif idx > size_lut:
|
|
2399
|
+
idx = size_lut
|
|
2400
|
+
array3d[y, x, c] = lut[idx]
|
|
2401
|
+
|
|
2402
|
+
@njit(parallel=True, fastmath=True)
|
|
2403
|
+
def rgb_to_xyz_numba(rgb):
|
|
2404
|
+
"""
|
|
2405
|
+
Convert an image from sRGB to XYZ (D65).
|
|
2406
|
+
rgb: float32 array in [0..1], shape (H,W,3)
|
|
2407
|
+
returns xyz in [0..maybe >1], shape (H,W,3)
|
|
2408
|
+
"""
|
|
2409
|
+
H, W, _ = rgb.shape
|
|
2410
|
+
out = np.empty((H, W, 3), dtype=np.float32)
|
|
2411
|
+
for y in prange(H):
|
|
2412
|
+
for x in prange(W):
|
|
2413
|
+
r = rgb[y, x, 0]
|
|
2414
|
+
g = rgb[y, x, 1]
|
|
2415
|
+
b = rgb[y, x, 2]
|
|
2416
|
+
# Multiply by M_rgb2xyz
|
|
2417
|
+
X = _M_rgb2xyz[0,0]*r + _M_rgb2xyz[0,1]*g + _M_rgb2xyz[0,2]*b
|
|
2418
|
+
Y = _M_rgb2xyz[1,0]*r + _M_rgb2xyz[1,1]*g + _M_rgb2xyz[1,2]*b
|
|
2419
|
+
Z = _M_rgb2xyz[2,0]*r + _M_rgb2xyz[2,1]*g + _M_rgb2xyz[2,2]*b
|
|
2420
|
+
out[y, x, 0] = X
|
|
2421
|
+
out[y, x, 1] = Y
|
|
2422
|
+
out[y, x, 2] = Z
|
|
2423
|
+
return out
|
|
2424
|
+
|
|
2425
|
+
@njit(parallel=True, fastmath=True)
|
|
2426
|
+
def xyz_to_rgb_numba(xyz):
|
|
2427
|
+
"""
|
|
2428
|
+
Convert an image from XYZ (D65) to sRGB.
|
|
2429
|
+
xyz: float32 array, shape (H,W,3)
|
|
2430
|
+
returns rgb in [0..1], shape (H,W,3)
|
|
2431
|
+
"""
|
|
2432
|
+
H, W, _ = xyz.shape
|
|
2433
|
+
out = np.empty((H, W, 3), dtype=np.float32)
|
|
2434
|
+
for y in prange(H):
|
|
2435
|
+
for x in prange(W):
|
|
2436
|
+
X = xyz[y, x, 0]
|
|
2437
|
+
Y = xyz[y, x, 1]
|
|
2438
|
+
Z = xyz[y, x, 2]
|
|
2439
|
+
# Multiply by M_xyz2rgb
|
|
2440
|
+
r = _M_xyz2rgb[0,0]*X + _M_xyz2rgb[0,1]*Y + _M_xyz2rgb[0,2]*Z
|
|
2441
|
+
g = _M_xyz2rgb[1,0]*X + _M_xyz2rgb[1,1]*Y + _M_xyz2rgb[1,2]*Z
|
|
2442
|
+
b = _M_xyz2rgb[2,0]*X + _M_xyz2rgb[2,1]*Y + _M_xyz2rgb[2,2]*Z
|
|
2443
|
+
# Clip to [0..1]
|
|
2444
|
+
if r < 0: r = 0
|
|
2445
|
+
elif r > 1: r = 1
|
|
2446
|
+
if g < 0: g = 0
|
|
2447
|
+
elif g > 1: g = 1
|
|
2448
|
+
if b < 0: b = 0
|
|
2449
|
+
elif b > 1: b = 1
|
|
2450
|
+
out[y, x, 0] = r
|
|
2451
|
+
out[y, x, 1] = g
|
|
2452
|
+
out[y, x, 2] = b
|
|
2453
|
+
return out
|
|
2454
|
+
|
|
2455
|
+
@njit
|
|
2456
|
+
def f_lab_numba(t):
|
|
2457
|
+
delta = 6/29
|
|
2458
|
+
out = np.empty_like(t, dtype=np.float32)
|
|
2459
|
+
for i in range(t.size):
|
|
2460
|
+
val = t.flat[i]
|
|
2461
|
+
if val > delta**3:
|
|
2462
|
+
out.flat[i] = val**(1/3)
|
|
2463
|
+
else:
|
|
2464
|
+
out.flat[i] = val/(3*delta*delta) + (4/29)
|
|
2465
|
+
return out
|
|
2466
|
+
|
|
2467
|
+
@njit(parallel=True, fastmath=True)
|
|
2468
|
+
def xyz_to_lab_numba(xyz):
|
|
2469
|
+
"""
|
|
2470
|
+
xyz => shape(H,W,3), in D65.
|
|
2471
|
+
returns lab in shape(H,W,3): L in [0..100], a,b in ~[-128..127].
|
|
2472
|
+
"""
|
|
2473
|
+
H, W, _ = xyz.shape
|
|
2474
|
+
out = np.empty((H,W,3), dtype=np.float32)
|
|
2475
|
+
for y in prange(H):
|
|
2476
|
+
for x in prange(W):
|
|
2477
|
+
X = xyz[y, x, 0] / _Xn
|
|
2478
|
+
Y = xyz[y, x, 1] / _Yn
|
|
2479
|
+
Z = xyz[y, x, 2] / _Zn
|
|
2480
|
+
fx = (X)**(1/3) if X > (6/29)**3 else X/(3*(6/29)**2) + 4/29
|
|
2481
|
+
fy = (Y)**(1/3) if Y > (6/29)**3 else Y/(3*(6/29)**2) + 4/29
|
|
2482
|
+
fz = (Z)**(1/3) if Z > (6/29)**3 else Z/(3*(6/29)**2) + 4/29
|
|
2483
|
+
L = 116*fy - 16
|
|
2484
|
+
a = 500*(fx - fy)
|
|
2485
|
+
b = 200*(fy - fz)
|
|
2486
|
+
out[y, x, 0] = L
|
|
2487
|
+
out[y, x, 1] = a
|
|
2488
|
+
out[y, x, 2] = b
|
|
2489
|
+
return out
|
|
2490
|
+
|
|
2491
|
+
@njit(parallel=True, fastmath=True)
|
|
2492
|
+
def lab_to_xyz_numba(lab):
|
|
2493
|
+
"""
|
|
2494
|
+
lab => shape(H,W,3): L in [0..100], a,b in ~[-128..127].
|
|
2495
|
+
returns xyz shape(H,W,3).
|
|
2496
|
+
"""
|
|
2497
|
+
H, W, _ = lab.shape
|
|
2498
|
+
out = np.empty((H,W,3), dtype=np.float32)
|
|
2499
|
+
delta = 6/29
|
|
2500
|
+
for y in prange(H):
|
|
2501
|
+
for x in prange(W):
|
|
2502
|
+
L = lab[y, x, 0]
|
|
2503
|
+
a = lab[y, x, 1]
|
|
2504
|
+
b = lab[y, x, 2]
|
|
2505
|
+
fy = (L+16)/116
|
|
2506
|
+
fx = fy + a/500
|
|
2507
|
+
fz = fy - b/200
|
|
2508
|
+
|
|
2509
|
+
if fx > delta:
|
|
2510
|
+
xr = fx**3
|
|
2511
|
+
else:
|
|
2512
|
+
xr = 3*delta*delta*(fx - 4/29)
|
|
2513
|
+
if fy > delta:
|
|
2514
|
+
yr = fy**3
|
|
2515
|
+
else:
|
|
2516
|
+
yr = 3*delta*delta*(fy - 4/29)
|
|
2517
|
+
if fz > delta:
|
|
2518
|
+
zr = fz**3
|
|
2519
|
+
else:
|
|
2520
|
+
zr = 3*delta*delta*(fz - 4/29)
|
|
2521
|
+
|
|
2522
|
+
X = _Xn * xr
|
|
2523
|
+
Y = _Yn * yr
|
|
2524
|
+
Z = _Zn * zr
|
|
2525
|
+
out[y, x, 0] = X
|
|
2526
|
+
out[y, x, 1] = Y
|
|
2527
|
+
out[y, x, 2] = Z
|
|
2528
|
+
return out
|
|
2529
|
+
|
|
2530
|
+
@njit(parallel=True, fastmath=True)
|
|
2531
|
+
def rgb_to_hsv_numba(rgb):
|
|
2532
|
+
H, W, _ = rgb.shape
|
|
2533
|
+
out = np.empty((H,W,3), dtype=np.float32)
|
|
2534
|
+
for y in prange(H):
|
|
2535
|
+
for x in prange(W):
|
|
2536
|
+
r = rgb[y,x,0]
|
|
2537
|
+
g = rgb[y,x,1]
|
|
2538
|
+
b = rgb[y,x,2]
|
|
2539
|
+
cmax = max(r,g,b)
|
|
2540
|
+
cmin = min(r,g,b)
|
|
2541
|
+
delta = cmax - cmin
|
|
2542
|
+
# Hue
|
|
2543
|
+
h = 0.0
|
|
2544
|
+
if delta != 0.0:
|
|
2545
|
+
if cmax == r:
|
|
2546
|
+
h = 60*(((g-b)/delta) % 6)
|
|
2547
|
+
elif cmax == g:
|
|
2548
|
+
h = 60*(((b-r)/delta) + 2)
|
|
2549
|
+
else:
|
|
2550
|
+
h = 60*(((r-g)/delta) + 4)
|
|
2551
|
+
# Saturation
|
|
2552
|
+
s = 0.0
|
|
2553
|
+
if cmax > 0.0:
|
|
2554
|
+
s = delta / cmax
|
|
2555
|
+
v = cmax
|
|
2556
|
+
out[y,x,0] = h
|
|
2557
|
+
out[y,x,1] = s
|
|
2558
|
+
out[y,x,2] = v
|
|
2559
|
+
return out
|
|
2560
|
+
|
|
2561
|
+
@njit(parallel=True, fastmath=True)
|
|
2562
|
+
def hsv_to_rgb_numba(hsv):
|
|
2563
|
+
H, W, _ = hsv.shape
|
|
2564
|
+
out = np.empty((H,W,3), dtype=np.float32)
|
|
2565
|
+
for y in prange(H):
|
|
2566
|
+
for x in prange(W):
|
|
2567
|
+
h = hsv[y,x,0]
|
|
2568
|
+
s = hsv[y,x,1]
|
|
2569
|
+
v = hsv[y,x,2]
|
|
2570
|
+
c = v*s
|
|
2571
|
+
hh = (h/60.0) % 6
|
|
2572
|
+
x_ = c*(1 - abs(hh % 2 - 1))
|
|
2573
|
+
m = v - c
|
|
2574
|
+
r = 0.0
|
|
2575
|
+
g = 0.0
|
|
2576
|
+
b = 0.0
|
|
2577
|
+
if 0 <= hh < 1:
|
|
2578
|
+
r,g,b = c,x_,0
|
|
2579
|
+
elif 1 <= hh < 2:
|
|
2580
|
+
r,g,b = x_,c,0
|
|
2581
|
+
elif 2 <= hh < 3:
|
|
2582
|
+
r,g,b = 0,c,x_
|
|
2583
|
+
elif 3 <= hh < 4:
|
|
2584
|
+
r,g,b = 0,x_,c
|
|
2585
|
+
elif 4 <= hh < 5:
|
|
2586
|
+
r,g,b = x_,0,c
|
|
2587
|
+
else:
|
|
2588
|
+
r,g,b = c,0,x_
|
|
2589
|
+
out[y,x,0] = (r + m)
|
|
2590
|
+
out[y,x,1] = (g + m)
|
|
2591
|
+
out[y,x,2] = (b + m)
|
|
2592
|
+
return out
|
|
2593
|
+
|
|
2594
|
+
@njit(parallel=True, fastmath=True)
|
|
2595
|
+
def _cosmetic_correction_core(src, dst, H, W, C,
|
|
2596
|
+
hot_sigma, cold_sigma,
|
|
2597
|
+
star_mean_ratio, # e.g. 0.18..0.30
|
|
2598
|
+
star_max_ratio, # e.g. 0.45..0.65
|
|
2599
|
+
sat_threshold, # absolute cutoff in src units
|
|
2600
|
+
cold_cluster_max # max # of neighbors below low before we skip
|
|
2601
|
+
):
|
|
2602
|
+
"""
|
|
2603
|
+
Read from src, write to dst. Center is EXCLUDED from stats.
|
|
2604
|
+
Star guard: if ring mean or ring max are a decent fraction of center, skip (likely a PSF).
|
|
2605
|
+
Cold guard: if many neighbors are also low, skip (structure/shadow, not a dead pixel).
|
|
2606
|
+
"""
|
|
2607
|
+
local_vals = np.empty(8, dtype=np.float32)
|
|
2608
|
+
|
|
2609
|
+
for y in prange(1, H-1):
|
|
2610
|
+
for x in range(1, W-1):
|
|
2611
|
+
for c in range(C if src.ndim == 3 else 1):
|
|
2612
|
+
# gather 8-neighbor ring (no center)
|
|
2613
|
+
k = 0
|
|
2614
|
+
ring_sum = 0.0
|
|
2615
|
+
ring_max = -1e30
|
|
2616
|
+
for dy in (-1, 0, 1):
|
|
2617
|
+
for dx in (-1, 0, 1):
|
|
2618
|
+
if dy == 0 and dx == 0:
|
|
2619
|
+
continue
|
|
2620
|
+
if src.ndim == 3:
|
|
2621
|
+
v = src[y+dy, x+dx, c]
|
|
2622
|
+
else:
|
|
2623
|
+
v = src[y+dy, x+dx]
|
|
2624
|
+
local_vals[k] = v
|
|
2625
|
+
ring_sum += v
|
|
2626
|
+
if v > ring_max:
|
|
2627
|
+
ring_max = v
|
|
2628
|
+
k += 1
|
|
2629
|
+
|
|
2630
|
+
# median and MAD from ring only
|
|
2631
|
+
M = np.median(local_vals)
|
|
2632
|
+
abs_devs = np.empty(8, dtype=np.float32)
|
|
2633
|
+
for i in range(8):
|
|
2634
|
+
abs_devs[i] = abs(local_vals[i] - M)
|
|
2635
|
+
MAD = np.median(abs_devs)
|
|
2636
|
+
sigma = 1.4826 * MAD + 1e-8 # epsilon guard
|
|
2637
|
+
|
|
2638
|
+
# center
|
|
2639
|
+
T = src[y, x, c] if src.ndim == 3 else src[y, x]
|
|
2640
|
+
|
|
2641
|
+
# saturation guard
|
|
2642
|
+
if T >= sat_threshold:
|
|
2643
|
+
if src.ndim == 3: dst[y, x, c] = T
|
|
2644
|
+
else: dst[y, x] = T
|
|
2645
|
+
continue
|
|
2646
|
+
|
|
2647
|
+
high = M + hot_sigma * sigma
|
|
2648
|
+
low = M - cold_sigma * sigma
|
|
2649
|
+
|
|
2650
|
+
replace = False
|
|
2651
|
+
|
|
2652
|
+
if T > high:
|
|
2653
|
+
# Star guard for HOT: neighbors should not form a footprint
|
|
2654
|
+
ring_mean = ring_sum / 8.0
|
|
2655
|
+
if (ring_mean / (T + 1e-8) < star_mean_ratio) and (ring_max / (T + 1e-8) < star_max_ratio):
|
|
2656
|
+
replace = True
|
|
2657
|
+
elif T < low:
|
|
2658
|
+
# Cold pixel: only if it's isolated (few neighbors also low)
|
|
2659
|
+
count_below = 0
|
|
2660
|
+
for i in range(8):
|
|
2661
|
+
if local_vals[i] < low:
|
|
2662
|
+
count_below += 1
|
|
2663
|
+
if count_below <= cold_cluster_max:
|
|
2664
|
+
replace = True
|
|
2665
|
+
|
|
2666
|
+
if replace:
|
|
2667
|
+
if src.ndim == 3: dst[y, x, c] = M
|
|
2668
|
+
else: dst[y, x] = M
|
|
2669
|
+
else:
|
|
2670
|
+
if src.ndim == 3: dst[y, x, c] = T
|
|
2671
|
+
else: dst[y, x] = T
|
|
2672
|
+
|
|
2673
|
+
|
|
2674
|
+
def bulk_cosmetic_correction_numba(image,
|
|
2675
|
+
hot_sigma=5.0,
|
|
2676
|
+
cold_sigma=5.0,
|
|
2677
|
+
star_mean_ratio=0.22,
|
|
2678
|
+
star_max_ratio=0.55,
|
|
2679
|
+
sat_quantile=0.9995):
|
|
2680
|
+
"""
|
|
2681
|
+
Star-safe cosmetic correction for 2D (mono) or 3D (RGB) arrays.
|
|
2682
|
+
Reads from the original, writes to a new array (two-pass).
|
|
2683
|
+
- star_mean_ratio: how large neighbor mean must be vs center to *skip* (PSF)
|
|
2684
|
+
- star_max_ratio : how large neighbor max must be vs center to *skip* (PSF)
|
|
2685
|
+
- sat_quantile : top quantile to protect from edits (bright cores)
|
|
2686
|
+
"""
|
|
2687
|
+
img = image.astype(np.float32, copy=False)
|
|
2688
|
+
was_gray = (img.ndim == 2)
|
|
2689
|
+
if was_gray:
|
|
2690
|
+
src = img[:, :, None]
|
|
2691
|
+
else:
|
|
2692
|
+
src = img
|
|
2693
|
+
|
|
2694
|
+
H, W, C = src.shape
|
|
2695
|
+
dst = src.copy()
|
|
2696
|
+
|
|
2697
|
+
# per-channel saturation guards
|
|
2698
|
+
sat_thresholds = np.empty(C, dtype=np.float32)
|
|
2699
|
+
for ci in range(C):
|
|
2700
|
+
plane = src[:, :, ci]
|
|
2701
|
+
# Compute in Python (Numba doesn't support np.quantile well)
|
|
2702
|
+
sat_thresholds[ci] = float(np.quantile(plane, sat_quantile))
|
|
2703
|
+
|
|
2704
|
+
# run per-channel to use per-channel saturation
|
|
2705
|
+
for ci in range(C):
|
|
2706
|
+
_cosmetic_correction_core(src[:, :, ci], dst[:, :, ci],
|
|
2707
|
+
H, W, 1,
|
|
2708
|
+
float(hot_sigma), float(cold_sigma),
|
|
2709
|
+
float(star_mean_ratio), float(star_max_ratio),
|
|
2710
|
+
float(sat_thresholds[ci]),
|
|
2711
|
+
1) # cold_cluster_max: allow 1 neighbor to be low
|
|
2712
|
+
|
|
2713
|
+
if was_gray:
|
|
2714
|
+
return dst[:, :, 0]
|
|
2715
|
+
return dst
|
|
2716
|
+
|
|
2717
|
+
|
|
2718
|
+
def bulk_cosmetic_correction_bayer(image,
|
|
2719
|
+
hot_sigma=5.5,
|
|
2720
|
+
cold_sigma=5.0,
|
|
2721
|
+
star_mean_ratio=0.22,
|
|
2722
|
+
star_max_ratio=0.55,
|
|
2723
|
+
sat_quantile=0.9995,
|
|
2724
|
+
pattern="RGGB"):
|
|
2725
|
+
"""
|
|
2726
|
+
Bayer-safe cosmetic correction. Work on same-color sub-planes (2-px stride),
|
|
2727
|
+
then write results back. Defaults assume normalized or 16/32f data.
|
|
2728
|
+
"""
|
|
2729
|
+
H, W = image.shape
|
|
2730
|
+
corrected = image.astype(np.float32).copy()
|
|
2731
|
+
|
|
2732
|
+
if pattern.upper() not in ("RGGB", "BGGR", "GRBG", "GBRG"):
|
|
2733
|
+
pattern = "RGGB"
|
|
2734
|
+
|
|
2735
|
+
# index maps for each CFA pattern (row0,col0 offsets)
|
|
2736
|
+
if pattern.upper() == "RGGB":
|
|
2737
|
+
r0, c0 = 0, 0
|
|
2738
|
+
g1r, g1c = 0, 1
|
|
2739
|
+
g2r, g2c = 1, 0
|
|
2740
|
+
b0, b0c = 1, 1
|
|
2741
|
+
elif pattern.upper() == "BGGR":
|
|
2742
|
+
r0, c0 = 1, 1
|
|
2743
|
+
g1r, g1c = 1, 0
|
|
2744
|
+
g2r, g2c = 0, 1
|
|
2745
|
+
b0, b0c = 0, 0
|
|
2746
|
+
elif pattern.upper() == "GRBG":
|
|
2747
|
+
r0, c0 = 0, 1
|
|
2748
|
+
g1r, g1c = 0, 0
|
|
2749
|
+
g2r, g2c = 1, 1
|
|
2750
|
+
b0, b0c = 1, 0
|
|
2751
|
+
else: # GBRG
|
|
2752
|
+
r0, c0 = 1, 0
|
|
2753
|
+
g1r, g1c = 0, 0
|
|
2754
|
+
g2r, g2c = 1, 1
|
|
2755
|
+
b0, b0c = 0, 1
|
|
2756
|
+
|
|
2757
|
+
# helper to process a same-color plane view
|
|
2758
|
+
def _process_plane(view):
|
|
2759
|
+
return bulk_cosmetic_correction_numba(
|
|
2760
|
+
view,
|
|
2761
|
+
hot_sigma=hot_sigma,
|
|
2762
|
+
cold_sigma=cold_sigma,
|
|
2763
|
+
star_mean_ratio=star_mean_ratio,
|
|
2764
|
+
star_max_ratio=star_max_ratio,
|
|
2765
|
+
sat_quantile=sat_quantile
|
|
2766
|
+
)
|
|
2767
|
+
|
|
2768
|
+
# Red
|
|
2769
|
+
red = corrected[r0:H:2, c0:W:2]
|
|
2770
|
+
corrected[r0:H:2, c0:W:2] = _process_plane(red)
|
|
2771
|
+
|
|
2772
|
+
# Blue
|
|
2773
|
+
blue = corrected[b0:H:2, b0c:W:2]
|
|
2774
|
+
corrected[b0:H:2, b0c:W:2] = _process_plane(blue)
|
|
2775
|
+
|
|
2776
|
+
# Greens
|
|
2777
|
+
g1 = corrected[g1r:H:2, g1c:W:2]
|
|
2778
|
+
corrected[g1r:H:2, g1c:W:2] = _process_plane(g1)
|
|
2779
|
+
|
|
2780
|
+
g2 = corrected[g2r:H:2, g2c:W:2]
|
|
2781
|
+
corrected[g2r:H:2, g2c:W:2] = _process_plane(g2)
|
|
2782
|
+
|
|
2783
|
+
return corrected
|
|
2784
|
+
|
|
2785
|
+
def evaluate_polynomial(H: int, W: int, coeffs: np.ndarray, degree: int) -> np.ndarray:
|
|
2786
|
+
"""
|
|
2787
|
+
Evaluates the polynomial function over the entire image domain.
|
|
2788
|
+
"""
|
|
2789
|
+
xx, yy = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing="xy")
|
|
2790
|
+
A_full = build_poly_terms(xx.ravel(), yy.ravel(), degree)
|
|
2791
|
+
return (A_full @ coeffs).reshape(H, W)
|
|
2792
|
+
|
|
2793
|
+
|
|
2794
|
+
|
|
2795
|
+
@njit(parallel=True, fastmath=True)
|
|
2796
|
+
def numba_mono_final_formula(rescaled, median_rescaled, target_median):
|
|
2797
|
+
"""
|
|
2798
|
+
Applies the final formula *after* we already have the rescaled values.
|
|
2799
|
+
|
|
2800
|
+
rescaled[y,x] = (original[y,x] - black_point) / (1 - black_point)
|
|
2801
|
+
median_rescaled = median(rescaled)
|
|
2802
|
+
|
|
2803
|
+
out_val = ((median_rescaled - 1) * target_median * r) /
|
|
2804
|
+
( median_rescaled*(target_median + r -1) - target_median*r )
|
|
2805
|
+
"""
|
|
2806
|
+
H, W = rescaled.shape
|
|
2807
|
+
out = np.empty_like(rescaled)
|
|
2808
|
+
|
|
2809
|
+
for y in prange(H):
|
|
2810
|
+
for x in range(W):
|
|
2811
|
+
r = rescaled[y, x]
|
|
2812
|
+
numer = (median_rescaled - 1.0) * target_median * r
|
|
2813
|
+
denom = median_rescaled * (target_median + r - 1.0) - target_median * r
|
|
2814
|
+
if np.abs(denom) < 1e-12:
|
|
2815
|
+
denom = 1e-12
|
|
2816
|
+
out[y, x] = numer / denom
|
|
2817
|
+
|
|
2818
|
+
return out
|
|
2819
|
+
|
|
2820
|
+
@njit(parallel=True, fastmath=True)
|
|
2821
|
+
def numba_color_final_formula_linked(rescaled, median_rescaled, target_median):
|
|
2822
|
+
"""
|
|
2823
|
+
Linked color transform: we use one median_rescaled for all channels.
|
|
2824
|
+
rescaled: (H,W,3), already = (image - black_point)/(1 - black_point)
|
|
2825
|
+
median_rescaled = median of *all* pixels in rescaled
|
|
2826
|
+
"""
|
|
2827
|
+
H, W, C = rescaled.shape
|
|
2828
|
+
out = np.empty_like(rescaled)
|
|
2829
|
+
|
|
2830
|
+
for y in prange(H):
|
|
2831
|
+
for x in range(W):
|
|
2832
|
+
for c in range(C):
|
|
2833
|
+
r = rescaled[y, x, c]
|
|
2834
|
+
numer = (median_rescaled - 1.0) * target_median * r
|
|
2835
|
+
denom = median_rescaled * (target_median + r - 1.0) - target_median * r
|
|
2836
|
+
if np.abs(denom) < 1e-12:
|
|
2837
|
+
denom = 1e-12
|
|
2838
|
+
out[y, x, c] = numer / denom
|
|
2839
|
+
|
|
2840
|
+
return out
|
|
2841
|
+
|
|
2842
|
+
@njit(parallel=True, fastmath=True)
|
|
2843
|
+
def numba_color_final_formula_unlinked(rescaled, medians_rescaled, target_median):
|
|
2844
|
+
"""
|
|
2845
|
+
Unlinked color transform: a separate median_rescaled per channel.
|
|
2846
|
+
rescaled: (H,W,3), where each channel is already (val - black_point[c]) / (1 - black_point[c])
|
|
2847
|
+
medians_rescaled: shape (3,) with median of each channel in the rescaled array.
|
|
2848
|
+
"""
|
|
2849
|
+
H, W, C = rescaled.shape
|
|
2850
|
+
out = np.empty_like(rescaled)
|
|
2851
|
+
|
|
2852
|
+
for y in prange(H):
|
|
2853
|
+
for x in range(W):
|
|
2854
|
+
for c in range(C):
|
|
2855
|
+
r = rescaled[y, x, c]
|
|
2856
|
+
med = medians_rescaled[c]
|
|
2857
|
+
numer = (med - 1.0) * target_median * r
|
|
2858
|
+
denom = med * (target_median + r - 1.0) - target_median * r
|
|
2859
|
+
if np.abs(denom) < 1e-12:
|
|
2860
|
+
denom = 1e-12
|
|
2861
|
+
out[y, x, c] = numer / denom
|
|
2862
|
+
|
|
2863
|
+
return out
|
|
2864
|
+
|
|
2865
|
+
|
|
2866
|
+
def build_poly_terms(x_array: np.ndarray, y_array: np.ndarray, degree: int) -> np.ndarray:
|
|
2867
|
+
"""
|
|
2868
|
+
Precomputes polynomial basis terms efficiently using NumPy, supporting up to degree 6.
|
|
2869
|
+
"""
|
|
2870
|
+
ones = np.ones_like(x_array, dtype=np.float32)
|
|
2871
|
+
|
|
2872
|
+
if degree == 1:
|
|
2873
|
+
return np.column_stack((ones, x_array, y_array))
|
|
2874
|
+
|
|
2875
|
+
elif degree == 2:
|
|
2876
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2877
|
+
x_array**2, x_array * y_array, y_array**2))
|
|
2878
|
+
|
|
2879
|
+
elif degree == 3:
|
|
2880
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2881
|
+
x_array**2, x_array * y_array, y_array**2,
|
|
2882
|
+
x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3))
|
|
2883
|
+
|
|
2884
|
+
elif degree == 4:
|
|
2885
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2886
|
+
x_array**2, x_array * y_array, y_array**2,
|
|
2887
|
+
x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3,
|
|
2888
|
+
x_array**4, x_array**3 * y_array, x_array**2 * y_array**2, x_array * y_array**3, y_array**4))
|
|
2889
|
+
|
|
2890
|
+
elif degree == 5:
|
|
2891
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2892
|
+
x_array**2, x_array * y_array, y_array**2,
|
|
2893
|
+
x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3,
|
|
2894
|
+
x_array**4, x_array**3 * y_array, x_array**2 * y_array**2, x_array * y_array**3, y_array**4,
|
|
2895
|
+
x_array**5, x_array**4 * y_array, x_array**3 * y_array**2, x_array**2 * y_array**3, x_array * y_array**4, y_array**5))
|
|
2896
|
+
|
|
2897
|
+
elif degree == 6:
|
|
2898
|
+
return np.column_stack((ones, x_array, y_array,
|
|
2899
|
+
x_array**2, x_array * y_array, y_array**2,
|
|
2900
|
+
x_array**3, x_array**2 * y_array, x_array * y_array**2, y_array**3,
|
|
2901
|
+
x_array**4, x_array**3 * y_array, x_array**2 * y_array**2, x_array * y_array**3, y_array**4,
|
|
2902
|
+
x_array**5, x_array**4 * y_array, x_array**3 * y_array**2, x_array**2 * y_array**3, x_array * y_array**4, y_array**5,
|
|
2903
|
+
x_array**6, x_array**5 * y_array, x_array**4 * y_array**2, x_array**3 * y_array**3, x_array**2 * y_array**4, x_array * y_array**5, y_array**6))
|
|
2904
|
+
|
|
2905
|
+
else:
|
|
2906
|
+
raise ValueError(f"Unsupported polynomial degree={degree}. Max supported is 6.")
|
|
2907
|
+
|
|
2908
|
+
|
|
2909
|
+
|
|
2910
|
+
|
|
2911
|
+
def generate_sample_points(image: np.ndarray, num_points: int = 100) -> np.ndarray:
|
|
2912
|
+
"""
|
|
2913
|
+
Generates sample points uniformly across the image.
|
|
2914
|
+
|
|
2915
|
+
- Places points in a uniform grid (no randomization).
|
|
2916
|
+
- Avoids border pixels.
|
|
2917
|
+
- Skips any points with value 0.000 or above 0.85.
|
|
2918
|
+
|
|
2919
|
+
Returns:
|
|
2920
|
+
np.ndarray: Array of shape (N, 2) containing (x, y) coordinates of sample points.
|
|
2921
|
+
"""
|
|
2922
|
+
H, W = image.shape[:2]
|
|
2923
|
+
points = []
|
|
2924
|
+
|
|
2925
|
+
# Create a uniform grid (avoiding the border)
|
|
2926
|
+
grid_size = int(np.sqrt(num_points)) # Roughly equal spacing
|
|
2927
|
+
x_vals = np.linspace(10, W - 10, grid_size, dtype=int) # Avoids border
|
|
2928
|
+
y_vals = np.linspace(10, H - 10, grid_size, dtype=int)
|
|
2929
|
+
|
|
2930
|
+
for y in y_vals:
|
|
2931
|
+
for x in x_vals:
|
|
2932
|
+
# Skip values that are too dark (0.000) or too bright (> 0.85)
|
|
2933
|
+
if np.any(image[int(y), int(x)] == 0.000) or np.any(image[int(y), int(x)] > 0.85):
|
|
2934
|
+
continue # Skip this pixel
|
|
2935
|
+
|
|
2936
|
+
points.append((int(x), int(y)))
|
|
2937
|
+
|
|
2938
|
+
if len(points) >= num_points:
|
|
2939
|
+
return np.array(points, dtype=np.int32) # Return only valid points
|
|
2940
|
+
|
|
2941
|
+
return np.array(points, dtype=np.int32) # Return all collected points
|
|
2942
|
+
|
|
2943
|
+
@njit(parallel=True, fastmath=True)
|
|
2944
|
+
def numba_unstretch(image: np.ndarray, stretch_original_medians: np.ndarray, stretch_original_mins: np.ndarray) -> np.ndarray:
|
|
2945
|
+
"""
|
|
2946
|
+
Numba-optimized function to undo the unlinked stretch.
|
|
2947
|
+
Restores each channel separately.
|
|
2948
|
+
"""
|
|
2949
|
+
H, W, C = image.shape
|
|
2950
|
+
out = np.empty_like(image, dtype=np.float32)
|
|
2951
|
+
|
|
2952
|
+
for c in prange(C): # Parallelize per channel
|
|
2953
|
+
cmed_stretched = np.median(image[..., c])
|
|
2954
|
+
orig_med = stretch_original_medians[c]
|
|
2955
|
+
orig_min = stretch_original_mins[c]
|
|
2956
|
+
|
|
2957
|
+
if cmed_stretched != 0 and orig_med != 0:
|
|
2958
|
+
for y in prange(H):
|
|
2959
|
+
for x in range(W):
|
|
2960
|
+
r = image[y, x, c]
|
|
2961
|
+
numerator = (cmed_stretched - 1) * orig_med * r
|
|
2962
|
+
denominator = cmed_stretched * (orig_med + r - 1) - orig_med * r
|
|
2963
|
+
if denominator == 0:
|
|
2964
|
+
denominator = 1e-6 # Avoid division by zero
|
|
2965
|
+
out[y, x, c] = numerator / denominator
|
|
2966
|
+
|
|
2967
|
+
# Restore the original black point
|
|
2968
|
+
out[..., c] += orig_min
|
|
2969
|
+
|
|
2970
|
+
return np.clip(out, 0, 1) # Clip to valid range
|
|
2971
|
+
|
|
2972
|
+
|
|
2973
|
+
@njit(fastmath=True)
|
|
2974
|
+
def drizzle_deposit_numba_naive(
|
|
2975
|
+
img_data, # shape (H, W), mono
|
|
2976
|
+
transform, # shape (2, 3), e.g. [[a,b,tx],[c,d,ty]]
|
|
2977
|
+
drizzle_buffer, # shape (outH, outW)
|
|
2978
|
+
coverage_buffer,# shape (outH, outW)
|
|
2979
|
+
drizzle_factor: float,
|
|
2980
|
+
frame_weight: float
|
|
2981
|
+
):
|
|
2982
|
+
"""
|
|
2983
|
+
Naive deposit: each input pixel is mapped to exactly one output pixel,
|
|
2984
|
+
ignoring drop_shrink. 2D single-channel version (mono).
|
|
2985
|
+
"""
|
|
2986
|
+
h, w = img_data.shape
|
|
2987
|
+
out_h, out_w = drizzle_buffer.shape
|
|
2988
|
+
|
|
2989
|
+
# Build a 3×3 matrix M
|
|
2990
|
+
# transform is 2×3, so we expand to 3×3 for the standard [x, y, 1] approach
|
|
2991
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
2992
|
+
M[0, 0] = transform[0, 0] # a
|
|
2993
|
+
M[0, 1] = transform[0, 1] # b
|
|
2994
|
+
M[0, 2] = transform[0, 2] # tx
|
|
2995
|
+
M[1, 0] = transform[1, 0] # c
|
|
2996
|
+
M[1, 1] = transform[1, 1] # d
|
|
2997
|
+
M[1, 2] = transform[1, 2] # ty
|
|
2998
|
+
M[2, 2] = 1.0
|
|
2999
|
+
|
|
3000
|
+
# We'll reuse a small input vector for each pixel
|
|
3001
|
+
in_coords = np.zeros(3, dtype=np.float32)
|
|
3002
|
+
in_coords[2] = 1.0
|
|
3003
|
+
|
|
3004
|
+
for y in range(h):
|
|
3005
|
+
for x in range(w):
|
|
3006
|
+
val = img_data[y, x]
|
|
3007
|
+
if val == 0:
|
|
3008
|
+
continue
|
|
3009
|
+
|
|
3010
|
+
# Fill the input vector
|
|
3011
|
+
in_coords[0] = x
|
|
3012
|
+
in_coords[1] = y
|
|
3013
|
+
|
|
3014
|
+
# Multiply
|
|
3015
|
+
out_coords = M @ in_coords
|
|
3016
|
+
X = out_coords[0]
|
|
3017
|
+
Y = out_coords[1]
|
|
3018
|
+
|
|
3019
|
+
# Multiply by drizzle_factor
|
|
3020
|
+
Xo = int(X * drizzle_factor)
|
|
3021
|
+
Yo = int(Y * drizzle_factor)
|
|
3022
|
+
|
|
3023
|
+
if 0 <= Xo < out_w and 0 <= Yo < out_h:
|
|
3024
|
+
drizzle_buffer[Yo, Xo] += val * frame_weight
|
|
3025
|
+
coverage_buffer[Yo, Xo] += frame_weight
|
|
3026
|
+
|
|
3027
|
+
return drizzle_buffer, coverage_buffer
|
|
3028
|
+
|
|
3029
|
+
|
|
3030
|
+
@njit(fastmath=True)
|
|
3031
|
+
def drizzle_deposit_numba_footprint(
|
|
3032
|
+
img_data, # shape (H, W), mono
|
|
3033
|
+
transform, # shape (2, 3)
|
|
3034
|
+
drizzle_buffer, # shape (outH, outW)
|
|
3035
|
+
coverage_buffer,# shape (outH, outW)
|
|
3036
|
+
drizzle_factor: float,
|
|
3037
|
+
drop_shrink: float,
|
|
3038
|
+
frame_weight: float
|
|
3039
|
+
):
|
|
3040
|
+
"""
|
|
3041
|
+
Distributes each input pixel over a bounding box of width=drop_shrink
|
|
3042
|
+
in the drizzle (out) plane. (Mono 2D version)
|
|
3043
|
+
"""
|
|
3044
|
+
h, w = img_data.shape
|
|
3045
|
+
out_h, out_w = drizzle_buffer.shape
|
|
3046
|
+
|
|
3047
|
+
# Build a 3×3 matrix M
|
|
3048
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
3049
|
+
M[0, 0] = transform[0, 0] # a
|
|
3050
|
+
M[0, 1] = transform[0, 1] # b
|
|
3051
|
+
M[0, 2] = transform[0, 2] # tx
|
|
3052
|
+
M[1, 0] = transform[1, 0] # c
|
|
3053
|
+
M[1, 1] = transform[1, 1] # d
|
|
3054
|
+
M[1, 2] = transform[1, 2] # ty
|
|
3055
|
+
M[2, 2] = 1.0
|
|
3056
|
+
|
|
3057
|
+
in_coords = np.zeros(3, dtype=np.float32)
|
|
3058
|
+
in_coords[2] = 1.0
|
|
3059
|
+
|
|
3060
|
+
footprint_radius = drop_shrink * 0.5
|
|
3061
|
+
|
|
3062
|
+
for y in range(h):
|
|
3063
|
+
for x in range(w):
|
|
3064
|
+
val = img_data[y, x]
|
|
3065
|
+
if val == 0:
|
|
3066
|
+
continue
|
|
3067
|
+
|
|
3068
|
+
# Transform to output coords
|
|
3069
|
+
in_coords[0] = x
|
|
3070
|
+
in_coords[1] = y
|
|
3071
|
+
out_coords = M @ in_coords
|
|
3072
|
+
X = out_coords[0]
|
|
3073
|
+
Y = out_coords[1]
|
|
3074
|
+
|
|
3075
|
+
# Upsample
|
|
3076
|
+
Xo = X * drizzle_factor
|
|
3077
|
+
Yo = Y * drizzle_factor
|
|
3078
|
+
|
|
3079
|
+
# bounding box
|
|
3080
|
+
min_x = int(np.floor(Xo - footprint_radius))
|
|
3081
|
+
max_x = int(np.floor(Xo + footprint_radius))
|
|
3082
|
+
min_y = int(np.floor(Yo - footprint_radius))
|
|
3083
|
+
max_y = int(np.floor(Yo + footprint_radius))
|
|
3084
|
+
|
|
3085
|
+
# clip
|
|
3086
|
+
if max_x < 0 or min_x >= out_w or max_y < 0 or min_y >= out_h:
|
|
3087
|
+
continue
|
|
3088
|
+
if min_x < 0:
|
|
3089
|
+
min_x = 0
|
|
3090
|
+
if max_x >= out_w:
|
|
3091
|
+
max_x = out_w - 1
|
|
3092
|
+
if min_y < 0:
|
|
3093
|
+
min_y = 0
|
|
3094
|
+
if max_y >= out_h:
|
|
3095
|
+
max_y = out_h - 1
|
|
3096
|
+
|
|
3097
|
+
width_foot = (max_x - min_x + 1)
|
|
3098
|
+
height_foot = (max_y - min_y + 1)
|
|
3099
|
+
area_pixels = width_foot * height_foot
|
|
3100
|
+
if area_pixels <= 0:
|
|
3101
|
+
continue
|
|
3102
|
+
|
|
3103
|
+
deposit_val = (val * frame_weight) / area_pixels
|
|
3104
|
+
coverage_fraction = frame_weight / area_pixels
|
|
3105
|
+
|
|
3106
|
+
for oy in range(min_y, max_y+1):
|
|
3107
|
+
for ox in range(min_x, max_x+1):
|
|
3108
|
+
drizzle_buffer[oy, ox] += deposit_val
|
|
3109
|
+
coverage_buffer[oy, ox] += coverage_fraction
|
|
3110
|
+
|
|
3111
|
+
return drizzle_buffer, coverage_buffer
|
|
3112
|
+
|
|
3113
|
+
@njit(fastmath=True)
|
|
3114
|
+
def _drizzle_kernel_weights(kernel_code: int, Xo: float, Yo: float,
|
|
3115
|
+
min_x: int, max_x: int, min_y: int, max_y: int,
|
|
3116
|
+
sigma_out: float,
|
|
3117
|
+
weights_out): # preallocated 2D view (max_y-min_y+1, max_x-min_x+1)
|
|
3118
|
+
"""
|
|
3119
|
+
Fill `weights_out` with unnormalized kernel weights centered at (Xo,Yo).
|
|
3120
|
+
Returns (sum_w, count_used).
|
|
3121
|
+
"""
|
|
3122
|
+
H = max_y - min_y + 1
|
|
3123
|
+
W = max_x - min_x + 1
|
|
3124
|
+
r2_limit = sigma_out * sigma_out # for circle, sigma_out := radius
|
|
3125
|
+
|
|
3126
|
+
sum_w = 0.0
|
|
3127
|
+
cnt = 0
|
|
3128
|
+
for j in range(H):
|
|
3129
|
+
oy = min_y + j
|
|
3130
|
+
cy = (oy + 0.5) - Yo # pixel-center distance
|
|
3131
|
+
for i in range(W):
|
|
3132
|
+
ox = min_x + i
|
|
3133
|
+
cx = (ox + 0.5) - Xo
|
|
3134
|
+
w = 0.0
|
|
3135
|
+
|
|
3136
|
+
if kernel_code == 0:
|
|
3137
|
+
# square = uniform weight in the bounding box
|
|
3138
|
+
w = 1.0
|
|
3139
|
+
elif kernel_code == 1:
|
|
3140
|
+
# circle = uniform weight if inside radius
|
|
3141
|
+
if (cx*cx + cy*cy) <= r2_limit:
|
|
3142
|
+
w = 1.0
|
|
3143
|
+
else: # gaussian
|
|
3144
|
+
# gaussian centered at (Xo,Yo) with sigma_out
|
|
3145
|
+
z = (cx*cx + cy*cy) / (2.0 * sigma_out * sigma_out)
|
|
3146
|
+
# drop tiny far-away contributions to keep perf ok
|
|
3147
|
+
if z <= 9.0: # ~3σ
|
|
3148
|
+
w = math.exp(-z)
|
|
3149
|
+
|
|
3150
|
+
weights_out[j, i] = w
|
|
3151
|
+
sum_w += w
|
|
3152
|
+
if w > 0.0:
|
|
3153
|
+
cnt += 1
|
|
3154
|
+
|
|
3155
|
+
return sum_w, cnt
|
|
3156
|
+
|
|
3157
|
+
|
|
3158
|
+
@njit(fastmath=True)
|
|
3159
|
+
def drizzle_deposit_numba_kernel_mono(
|
|
3160
|
+
img_data, transform, drizzle_buffer, coverage_buffer,
|
|
3161
|
+
drizzle_factor: float, drop_shrink: float, frame_weight: float,
|
|
3162
|
+
kernel_code: int, gaussian_sigma_or_radius: float
|
|
3163
|
+
):
|
|
3164
|
+
H, W = img_data.shape
|
|
3165
|
+
outH, outW = drizzle_buffer.shape
|
|
3166
|
+
|
|
3167
|
+
# build 3x3
|
|
3168
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
3169
|
+
M[0,0], M[0,1], M[0,2] = transform[0,0], transform[0,1], transform[0,2]
|
|
3170
|
+
M[1,0], M[1,1], M[1,2] = transform[1,0], transform[1,1], transform[1,2]
|
|
3171
|
+
M[2,2] = 1.0
|
|
3172
|
+
|
|
3173
|
+
v = np.zeros(3, dtype=np.float32); v[2] = 1.0
|
|
3174
|
+
|
|
3175
|
+
# interpret width parameter:
|
|
3176
|
+
# - square/circle: radius = drop_shrink * 0.5 (pixfrac-like)
|
|
3177
|
+
# - gaussian: sigma_out = max(gaussian_sigma_or_radius, drop_shrink * 0.5)
|
|
3178
|
+
radius = drop_shrink * 0.5
|
|
3179
|
+
sigma_out = gaussian_sigma_or_radius if kernel_code == 2 else radius
|
|
3180
|
+
if sigma_out < 1e-6:
|
|
3181
|
+
sigma_out = 1e-6
|
|
3182
|
+
|
|
3183
|
+
# temp weights tile (safely sized later per pixel)
|
|
3184
|
+
for y in range(H):
|
|
3185
|
+
for x in range(W):
|
|
3186
|
+
val = img_data[y, x]
|
|
3187
|
+
if val == 0.0:
|
|
3188
|
+
continue
|
|
3189
|
+
|
|
3190
|
+
v[0] = x; v[1] = y
|
|
3191
|
+
out_coords = M @ v
|
|
3192
|
+
Xo = out_coords[0] * drizzle_factor
|
|
3193
|
+
Yo = out_coords[1] * drizzle_factor
|
|
3194
|
+
|
|
3195
|
+
# choose bounds
|
|
3196
|
+
if kernel_code == 2:
|
|
3197
|
+
r = int(math.ceil(3.0 * sigma_out))
|
|
3198
|
+
else:
|
|
3199
|
+
r = int(math.ceil(radius))
|
|
3200
|
+
|
|
3201
|
+
if r <= 0:
|
|
3202
|
+
# degenerate → nearest pixel
|
|
3203
|
+
ox = int(Xo); oy = int(Yo)
|
|
3204
|
+
if 0 <= ox < outW and 0 <= oy < outH:
|
|
3205
|
+
drizzle_buffer[oy, ox] += val * frame_weight
|
|
3206
|
+
coverage_buffer[oy, ox] += frame_weight
|
|
3207
|
+
continue
|
|
3208
|
+
|
|
3209
|
+
min_x = int(math.floor(Xo - r))
|
|
3210
|
+
max_x = int(math.floor(Xo + r))
|
|
3211
|
+
min_y = int(math.floor(Yo - r))
|
|
3212
|
+
max_y = int(math.floor(Yo + r))
|
|
3213
|
+
if max_x < 0 or min_x >= outW or max_y < 0 or min_y >= outH:
|
|
3214
|
+
continue
|
|
3215
|
+
if min_x < 0: min_x = 0
|
|
3216
|
+
if min_y < 0: min_y = 0
|
|
3217
|
+
if max_x >= outW: max_x = outW - 1
|
|
3218
|
+
if max_y >= outH: max_y = outH - 1
|
|
3219
|
+
|
|
3220
|
+
Ht = max_y - min_y + 1
|
|
3221
|
+
Wt = max_x - min_x + 1
|
|
3222
|
+
if Ht <= 0 or Wt <= 0:
|
|
3223
|
+
continue
|
|
3224
|
+
|
|
3225
|
+
# allocate small tile (Numba-friendly: fixed-size via stack array)
|
|
3226
|
+
weights = np.zeros((Ht, Wt), dtype=np.float32)
|
|
3227
|
+
sum_w, cnt = _drizzle_kernel_weights(kernel_code, Xo, Yo,
|
|
3228
|
+
min_x, max_x, min_y, max_y,
|
|
3229
|
+
sigma_out, weights)
|
|
3230
|
+
if cnt == 0 or sum_w <= 1e-12:
|
|
3231
|
+
# fallback to nearest
|
|
3232
|
+
ox = int(Xo); oy = int(Yo)
|
|
3233
|
+
if 0 <= ox < outW and 0 <= oy < outH:
|
|
3234
|
+
drizzle_buffer[oy, ox] += val * frame_weight
|
|
3235
|
+
coverage_buffer[oy, ox] += frame_weight
|
|
3236
|
+
continue
|
|
3237
|
+
|
|
3238
|
+
scale = (val * frame_weight) / sum_w
|
|
3239
|
+
cov_scale = frame_weight / sum_w
|
|
3240
|
+
for j in range(Ht):
|
|
3241
|
+
oy = min_y + j
|
|
3242
|
+
for i in range(Wt):
|
|
3243
|
+
w = weights[j, i]
|
|
3244
|
+
if w > 0.0:
|
|
3245
|
+
ox = min_x + i
|
|
3246
|
+
drizzle_buffer[oy, ox] += w * scale
|
|
3247
|
+
coverage_buffer[oy, ox] += w * cov_scale
|
|
3248
|
+
|
|
3249
|
+
return drizzle_buffer, coverage_buffer
|
|
3250
|
+
|
|
3251
|
+
|
|
3252
|
+
@njit(fastmath=True)
|
|
3253
|
+
def drizzle_deposit_color_kernel(
|
|
3254
|
+
img_data, transform, drizzle_buffer, coverage_buffer,
|
|
3255
|
+
drizzle_factor: float, drop_shrink: float, frame_weight: float,
|
|
3256
|
+
kernel_code: int, gaussian_sigma_or_radius: float
|
|
3257
|
+
):
|
|
3258
|
+
H, W, C = img_data.shape
|
|
3259
|
+
outH, outW, _ = drizzle_buffer.shape
|
|
3260
|
+
|
|
3261
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
3262
|
+
M[0,0], M[0,1], M[0,2] = transform[0,0], transform[0,1], transform[0,2]
|
|
3263
|
+
M[1,0], M[1,1], M[1,2] = transform[1,0], transform[1,1], transform[1,2]
|
|
3264
|
+
M[2,2] = 1.0
|
|
3265
|
+
|
|
3266
|
+
v = np.zeros(3, dtype=np.float32); v[2] = 1.0
|
|
3267
|
+
|
|
3268
|
+
radius = drop_shrink * 0.5
|
|
3269
|
+
sigma_out = gaussian_sigma_or_radius if kernel_code == 2 else radius
|
|
3270
|
+
if sigma_out < 1e-6:
|
|
3271
|
+
sigma_out = 1e-6
|
|
3272
|
+
|
|
3273
|
+
for y in range(H):
|
|
3274
|
+
for x in range(W):
|
|
3275
|
+
# (minor optimization) skip all-zero triplets
|
|
3276
|
+
nz = False
|
|
3277
|
+
for cc in range(C):
|
|
3278
|
+
if img_data[y, x, cc] != 0.0:
|
|
3279
|
+
nz = True; break
|
|
3280
|
+
if not nz:
|
|
3281
|
+
continue
|
|
3282
|
+
|
|
3283
|
+
v[0] = x; v[1] = y
|
|
3284
|
+
out_coords = M @ v
|
|
3285
|
+
Xo = out_coords[0] * drizzle_factor
|
|
3286
|
+
Yo = out_coords[1] * drizzle_factor
|
|
3287
|
+
|
|
3288
|
+
if kernel_code == 2:
|
|
3289
|
+
r = int(math.ceil(3.0 * sigma_out))
|
|
3290
|
+
else:
|
|
3291
|
+
r = int(math.ceil(radius))
|
|
3292
|
+
|
|
3293
|
+
if r <= 0:
|
|
3294
|
+
ox = int(Xo); oy = int(Yo)
|
|
3295
|
+
if 0 <= ox < outW and 0 <= oy < outH:
|
|
3296
|
+
for c in range(C):
|
|
3297
|
+
val = img_data[y, x, c]
|
|
3298
|
+
if val != 0.0:
|
|
3299
|
+
drizzle_buffer[oy, ox, c] += val * frame_weight
|
|
3300
|
+
coverage_buffer[oy, ox, c] += frame_weight
|
|
3301
|
+
continue
|
|
3302
|
+
|
|
3303
|
+
min_x = int(math.floor(Xo - r))
|
|
3304
|
+
max_x = int(math.floor(Xo + r))
|
|
3305
|
+
min_y = int(math.floor(Yo - r))
|
|
3306
|
+
max_y = int(math.floor(Yo + r))
|
|
3307
|
+
if max_x < 0 or min_x >= outW or max_y < 0 or min_y >= outH:
|
|
3308
|
+
continue
|
|
3309
|
+
if min_x < 0: min_x = 0
|
|
3310
|
+
if min_y < 0: min_y = 0
|
|
3311
|
+
if max_x >= outW: max_x = outW - 1
|
|
3312
|
+
if max_y >= outH: max_y = outH - 1
|
|
3313
|
+
|
|
3314
|
+
Ht = max_y - min_y + 1
|
|
3315
|
+
Wt = max_x - min_x + 1
|
|
3316
|
+
if Ht <= 0 or Wt <= 0:
|
|
3317
|
+
continue
|
|
3318
|
+
|
|
3319
|
+
weights = np.zeros((Ht, Wt), dtype=np.float32)
|
|
3320
|
+
sum_w, cnt = _drizzle_kernel_weights(kernel_code, Xo, Yo,
|
|
3321
|
+
min_x, max_x, min_y, max_y,
|
|
3322
|
+
sigma_out, weights)
|
|
3323
|
+
if cnt == 0 or sum_w <= 1e-12:
|
|
3324
|
+
ox = int(Xo); oy = int(Yo)
|
|
3325
|
+
if 0 <= ox < outW and 0 <= oy < outH:
|
|
3326
|
+
for c in range(C):
|
|
3327
|
+
val = img_data[y, x, c]
|
|
3328
|
+
if val != 0.0:
|
|
3329
|
+
drizzle_buffer[oy, ox, c] += val * frame_weight
|
|
3330
|
+
coverage_buffer[oy, ox, c] += frame_weight
|
|
3331
|
+
continue
|
|
3332
|
+
|
|
3333
|
+
inv_sum = 1.0 / sum_w
|
|
3334
|
+
for c in range(C):
|
|
3335
|
+
val = img_data[y, x, c]
|
|
3336
|
+
if val == 0.0:
|
|
3337
|
+
continue
|
|
3338
|
+
scale = (val * frame_weight) * inv_sum
|
|
3339
|
+
cov_scale = frame_weight * inv_sum
|
|
3340
|
+
for j in range(Ht):
|
|
3341
|
+
oy = min_y + j
|
|
3342
|
+
for i in range(Wt):
|
|
3343
|
+
w = weights[j, i]
|
|
3344
|
+
if w > 0.0:
|
|
3345
|
+
ox = min_x + i
|
|
3346
|
+
drizzle_buffer[oy, ox, c] += w * scale
|
|
3347
|
+
coverage_buffer[oy, ox, c] += w * cov_scale
|
|
3348
|
+
|
|
3349
|
+
return drizzle_buffer, coverage_buffer
|
|
3350
|
+
|
|
3351
|
+
@njit(parallel=True)
|
|
3352
|
+
def finalize_drizzle_2d(drizzle_buffer, coverage_buffer, final_out):
|
|
3353
|
+
"""
|
|
3354
|
+
parallel-friendly final step: final_out = drizzle_buffer / coverage_buffer,
|
|
3355
|
+
with coverage < 1e-8 => 0
|
|
3356
|
+
"""
|
|
3357
|
+
out_h, out_w = drizzle_buffer.shape
|
|
3358
|
+
for y in prange(out_h):
|
|
3359
|
+
for x in range(out_w):
|
|
3360
|
+
cov = coverage_buffer[y, x]
|
|
3361
|
+
if cov < 1e-8:
|
|
3362
|
+
final_out[y, x] = 0.0
|
|
3363
|
+
else:
|
|
3364
|
+
final_out[y, x] = drizzle_buffer[y, x] / cov
|
|
3365
|
+
return final_out
|
|
3366
|
+
|
|
3367
|
+
@njit(fastmath=True)
|
|
3368
|
+
def drizzle_deposit_color_naive(
|
|
3369
|
+
img_data, # shape (H,W,C)
|
|
3370
|
+
transform, # shape (2,3)
|
|
3371
|
+
drizzle_buffer, # shape (outH,outW,C)
|
|
3372
|
+
coverage_buffer, # shape (outH,outW,C)
|
|
3373
|
+
drizzle_factor: float,
|
|
3374
|
+
drop_shrink: float, # unused here
|
|
3375
|
+
frame_weight: float
|
|
3376
|
+
):
|
|
3377
|
+
"""
|
|
3378
|
+
Naive color deposit:
|
|
3379
|
+
Each input pixel is mapped to exactly one output pixel (ignores drop_shrink).
|
|
3380
|
+
"""
|
|
3381
|
+
H, W, channels = img_data.shape
|
|
3382
|
+
outH, outW, outC = drizzle_buffer.shape
|
|
3383
|
+
|
|
3384
|
+
# Build 3×3 matrix M
|
|
3385
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
3386
|
+
M[0, 0] = transform[0, 0]
|
|
3387
|
+
M[0, 1] = transform[0, 1]
|
|
3388
|
+
M[0, 2] = transform[0, 2]
|
|
3389
|
+
M[1, 0] = transform[1, 0]
|
|
3390
|
+
M[1, 1] = transform[1, 1]
|
|
3391
|
+
M[1, 2] = transform[1, 2]
|
|
3392
|
+
M[2, 2] = 1.0
|
|
3393
|
+
|
|
3394
|
+
in_coords = np.zeros(3, dtype=np.float32)
|
|
3395
|
+
in_coords[2] = 1.0
|
|
3396
|
+
|
|
3397
|
+
for y in range(H):
|
|
3398
|
+
for x in range(W):
|
|
3399
|
+
# 1) Transform
|
|
3400
|
+
in_coords[0] = x
|
|
3401
|
+
in_coords[1] = y
|
|
3402
|
+
out_coords = M @ in_coords
|
|
3403
|
+
X = out_coords[0]
|
|
3404
|
+
Y = out_coords[1]
|
|
3405
|
+
|
|
3406
|
+
# 2) Upsample
|
|
3407
|
+
Xo = int(X * drizzle_factor)
|
|
3408
|
+
Yo = int(Y * drizzle_factor)
|
|
3409
|
+
|
|
3410
|
+
# 3) Check bounds
|
|
3411
|
+
if 0 <= Xo < outW and 0 <= Yo < outH:
|
|
3412
|
+
# 4) For each channel
|
|
3413
|
+
for cidx in range(channels):
|
|
3414
|
+
val = img_data[y, x, cidx]
|
|
3415
|
+
if val != 0:
|
|
3416
|
+
drizzle_buffer[Yo, Xo, cidx] += val * frame_weight
|
|
3417
|
+
coverage_buffer[Yo, Xo, cidx] += frame_weight
|
|
3418
|
+
|
|
3419
|
+
return drizzle_buffer, coverage_buffer
|
|
3420
|
+
@njit(fastmath=True)
|
|
3421
|
+
def drizzle_deposit_color_footprint(
|
|
3422
|
+
img_data, # shape (H,W,C)
|
|
3423
|
+
transform, # shape (2,3)
|
|
3424
|
+
drizzle_buffer, # shape (outH,outW,C)
|
|
3425
|
+
coverage_buffer, # shape (outH,outW,C)
|
|
3426
|
+
drizzle_factor: float,
|
|
3427
|
+
drop_shrink: float,
|
|
3428
|
+
frame_weight: float
|
|
3429
|
+
):
|
|
3430
|
+
"""
|
|
3431
|
+
Color version with a bounding-box footprint of width=drop_shrink
|
|
3432
|
+
for distributing flux in the output plane.
|
|
3433
|
+
"""
|
|
3434
|
+
H, W, channels = img_data.shape
|
|
3435
|
+
outH, outW, outC = drizzle_buffer.shape
|
|
3436
|
+
|
|
3437
|
+
# Build 3×3 matrix
|
|
3438
|
+
M = np.zeros((3, 3), dtype=np.float32)
|
|
3439
|
+
M[0, 0] = transform[0, 0]
|
|
3440
|
+
M[0, 1] = transform[0, 1]
|
|
3441
|
+
M[0, 2] = transform[0, 2]
|
|
3442
|
+
M[1, 0] = transform[1, 0]
|
|
3443
|
+
M[1, 1] = transform[1, 1]
|
|
3444
|
+
M[1, 2] = transform[1, 2]
|
|
3445
|
+
M[2, 2] = 1.0
|
|
3446
|
+
|
|
3447
|
+
in_coords = np.zeros(3, dtype=np.float32)
|
|
3448
|
+
in_coords[2] = 1.0
|
|
3449
|
+
|
|
3450
|
+
footprint_radius = drop_shrink * 0.5
|
|
3451
|
+
|
|
3452
|
+
for y in range(H):
|
|
3453
|
+
for x in range(W):
|
|
3454
|
+
# Transform once per pixel
|
|
3455
|
+
in_coords[0] = x
|
|
3456
|
+
in_coords[1] = y
|
|
3457
|
+
out_coords = M @ in_coords
|
|
3458
|
+
X = out_coords[0]
|
|
3459
|
+
Y = out_coords[1]
|
|
3460
|
+
|
|
3461
|
+
# Upsample
|
|
3462
|
+
Xo = X * drizzle_factor
|
|
3463
|
+
Yo = Y * drizzle_factor
|
|
3464
|
+
|
|
3465
|
+
# bounding box
|
|
3466
|
+
min_x = int(np.floor(Xo - footprint_radius))
|
|
3467
|
+
max_x = int(np.floor(Xo + footprint_radius))
|
|
3468
|
+
min_y = int(np.floor(Yo - footprint_radius))
|
|
3469
|
+
max_y = int(np.floor(Yo + footprint_radius))
|
|
3470
|
+
|
|
3471
|
+
if max_x < 0 or min_x >= outW or max_y < 0 or min_y >= outH:
|
|
3472
|
+
continue
|
|
3473
|
+
if min_x < 0:
|
|
3474
|
+
min_x = 0
|
|
3475
|
+
if max_x >= outW:
|
|
3476
|
+
max_x = outW - 1
|
|
3477
|
+
if min_y < 0:
|
|
3478
|
+
min_y = 0
|
|
3479
|
+
if max_y >= outH:
|
|
3480
|
+
max_y = outH - 1
|
|
3481
|
+
|
|
3482
|
+
width_foot = (max_x - min_x + 1)
|
|
3483
|
+
height_foot = (max_y - min_y + 1)
|
|
3484
|
+
area_pixels = width_foot * height_foot
|
|
3485
|
+
if area_pixels <= 0:
|
|
3486
|
+
continue
|
|
3487
|
+
|
|
3488
|
+
for cidx in range(channels):
|
|
3489
|
+
val = img_data[y, x, cidx]
|
|
3490
|
+
if val == 0:
|
|
3491
|
+
continue
|
|
3492
|
+
|
|
3493
|
+
deposit_val = (val * frame_weight) / area_pixels
|
|
3494
|
+
coverage_fraction = frame_weight / area_pixels
|
|
3495
|
+
|
|
3496
|
+
for oy in range(min_y, max_y + 1):
|
|
3497
|
+
for ox in range(min_x, max_x + 1):
|
|
3498
|
+
drizzle_buffer[oy, ox, cidx] += deposit_val
|
|
3499
|
+
coverage_buffer[oy, ox, cidx] += coverage_fraction
|
|
3500
|
+
|
|
3501
|
+
return drizzle_buffer, coverage_buffer
|
|
3502
|
+
|
|
3503
|
+
|
|
3504
|
+
@njit
|
|
3505
|
+
def finalize_drizzle_3d(drizzle_buffer, coverage_buffer, final_out):
|
|
3506
|
+
"""
|
|
3507
|
+
final_out[y,x,c] = drizzle_buffer[y,x,c] / coverage_buffer[y,x,c]
|
|
3508
|
+
if coverage < 1e-8 => 0
|
|
3509
|
+
"""
|
|
3510
|
+
outH, outW, channels = drizzle_buffer.shape
|
|
3511
|
+
for y in range(outH):
|
|
3512
|
+
for x in range(outW):
|
|
3513
|
+
for cidx in range(channels):
|
|
3514
|
+
cov = coverage_buffer[y, x, cidx]
|
|
3515
|
+
if cov < 1e-8:
|
|
3516
|
+
final_out[y, x, cidx] = 0.0
|
|
3517
|
+
else:
|
|
3518
|
+
final_out[y, x, cidx] = drizzle_buffer[y, x, cidx] / cov
|
|
3519
|
+
return final_out
|
|
3520
|
+
|
|
3521
|
+
|
|
3522
|
+
|
|
3523
|
+
@njit
|
|
3524
|
+
def piecewise_linear(val, xvals, yvals):
|
|
3525
|
+
"""
|
|
3526
|
+
Performs piecewise linear interpolation:
|
|
3527
|
+
Given a scalar 'val', and arrays xvals, yvals (each of length N),
|
|
3528
|
+
finds i s.t. xvals[i] <= val < xvals[i+1],
|
|
3529
|
+
then returns the linear interpolation between yvals[i], yvals[i+1].
|
|
3530
|
+
If val < xvals[0], returns yvals[0].
|
|
3531
|
+
If val > xvals[-1], returns yvals[-1].
|
|
3532
|
+
"""
|
|
3533
|
+
if val <= xvals[0]:
|
|
3534
|
+
return yvals[0]
|
|
3535
|
+
for i in range(len(xvals)-1):
|
|
3536
|
+
if val < xvals[i+1]:
|
|
3537
|
+
# Perform a linear interpolation in interval [xvals[i], xvals[i+1]]
|
|
3538
|
+
dx = xvals[i+1] - xvals[i]
|
|
3539
|
+
dy = yvals[i+1] - yvals[i]
|
|
3540
|
+
ratio = (val - xvals[i]) / dx
|
|
3541
|
+
return yvals[i] + ratio * dy
|
|
3542
|
+
return yvals[-1]
|
|
3543
|
+
|
|
3544
|
+
@njit(parallel=True, fastmath=True)
|
|
3545
|
+
def apply_curves_numba(image, xvals, yvals):
|
|
3546
|
+
"""
|
|
3547
|
+
Numba-accelerated routine to apply piecewise linear interpolation
|
|
3548
|
+
to each pixel in 'image'.
|
|
3549
|
+
- image can be (H,W) or (H,W,3).
|
|
3550
|
+
- xvals, yvals are the curve arrays in ascending order.
|
|
3551
|
+
Returns the adjusted image as float32.
|
|
3552
|
+
"""
|
|
3553
|
+
if image.ndim == 2:
|
|
3554
|
+
H, W = image.shape
|
|
3555
|
+
out = np.empty((H, W), dtype=np.float32)
|
|
3556
|
+
for y in prange(H):
|
|
3557
|
+
for x in range(W):
|
|
3558
|
+
val = image[y, x]
|
|
3559
|
+
out[y, x] = piecewise_linear(val, xvals, yvals)
|
|
3560
|
+
return out
|
|
3561
|
+
elif image.ndim == 3:
|
|
3562
|
+
H, W, C = image.shape
|
|
3563
|
+
out = np.empty((H, W, C), dtype=np.float32)
|
|
3564
|
+
for y in prange(H):
|
|
3565
|
+
for x in range(W):
|
|
3566
|
+
for c in range(C):
|
|
3567
|
+
val = image[y, x, c]
|
|
3568
|
+
out[y, x, c] = piecewise_linear(val, xvals, yvals)
|
|
3569
|
+
return out
|
|
3570
|
+
else:
|
|
3571
|
+
# Unexpected shape
|
|
3572
|
+
return image # Fallback
|
|
3573
|
+
|
|
3574
|
+
def fast_star_detect(image,
|
|
3575
|
+
blur_size=9,
|
|
3576
|
+
threshold_factor=0.7,
|
|
3577
|
+
min_area=1,
|
|
3578
|
+
max_area=5000):
|
|
3579
|
+
"""
|
|
3580
|
+
Finds star positions via contour detection + ellipse fitting.
|
|
3581
|
+
Returns Nx2 array of (x, y) star coordinates in the same coordinate system as 'image'.
|
|
3582
|
+
"""
|
|
3583
|
+
|
|
3584
|
+
# 1) Convert to grayscale if needed
|
|
3585
|
+
if image.ndim == 3:
|
|
3586
|
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
|
3587
|
+
|
|
3588
|
+
# 2) Normalize to 8-bit [0..255]
|
|
3589
|
+
img_min, img_max = image.min(), image.max()
|
|
3590
|
+
if img_max <= img_min:
|
|
3591
|
+
return np.empty((0,2), dtype=np.float32) # All pixels same => no stars
|
|
3592
|
+
image_8u = (255.0 * (image - img_min) / (img_max - img_min)).astype(np.uint8)
|
|
3593
|
+
|
|
3594
|
+
# 3) Blur => subtract => highlight stars
|
|
3595
|
+
blurred = cv2.GaussianBlur(image_8u, (blur_size, blur_size), 0)
|
|
3596
|
+
subtracted = cv2.absdiff(image_8u, blurred)
|
|
3597
|
+
|
|
3598
|
+
# 4) Otsu's threshold => scaled by threshold_factor
|
|
3599
|
+
otsu_thresh, _ = cv2.threshold(subtracted, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
|
|
3600
|
+
final_thresh_val = max(2, int(otsu_thresh * threshold_factor))
|
|
3601
|
+
|
|
3602
|
+
_, thresh = cv2.threshold(subtracted, final_thresh_val, 255, cv2.THRESH_BINARY)
|
|
3603
|
+
|
|
3604
|
+
# 5) (Optional) morphological opening to remove single-pixel noise
|
|
3605
|
+
kernel = np.ones((2, 2), np.uint8)
|
|
3606
|
+
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
|
|
3607
|
+
|
|
3608
|
+
# 6) Find contours
|
|
3609
|
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
3610
|
+
|
|
3611
|
+
# 7) Filter by area, fit ellipse => use ellipse center as star position
|
|
3612
|
+
star_positions = []
|
|
3613
|
+
for c in contours:
|
|
3614
|
+
area = cv2.contourArea(c)
|
|
3615
|
+
if area < min_area or area > max_area:
|
|
3616
|
+
continue
|
|
3617
|
+
if len(c) < 5:
|
|
3618
|
+
# Need >=5 points to fit an ellipse
|
|
3619
|
+
continue
|
|
3620
|
+
|
|
3621
|
+
ellipse = cv2.fitEllipse(c)
|
|
3622
|
+
(cx, cy), (major_axis, minor_axis), angle = ellipse
|
|
3623
|
+
# You could check eccentricity, etc. if you want to filter out weird shapes
|
|
3624
|
+
star_positions.append((cx, cy))
|
|
3625
|
+
|
|
3626
|
+
if len(star_positions) == 0:
|
|
3627
|
+
return np.empty((0,2), dtype=np.float32)
|
|
3628
|
+
else:
|
|
3629
|
+
return np.array(star_positions, dtype=np.float32)
|
|
3630
|
+
|
|
3631
|
+
|
|
3632
|
+
@njit(fastmath=True)
|
|
3633
|
+
def _drizzle_kernel_weights(kernel_code: int, Xo: float, Yo: float,
|
|
3634
|
+
min_x: int, max_x: int, min_y: int, max_y: int,
|
|
3635
|
+
sigma_out: float,
|
|
3636
|
+
weights_out): # preallocated 2D view (max_y-min_y+1, max_x-min_x+1)
|
|
3637
|
+
"""
|
|
3638
|
+
Fill `weights_out` with unnormalized kernel weights centered at (Xo,Yo).
|
|
3639
|
+
Returns (sum_w, count_used).
|
|
3640
|
+
"""
|
|
3641
|
+
H = max_y - min_y + 1
|
|
3642
|
+
W = max_x - min_x + 1
|
|
3643
|
+
r2_limit = sigma_out * sigma_out # for circle, sigma_out := radius
|
|
3644
|
+
|
|
3645
|
+
sum_w = 0.0
|
|
3646
|
+
cnt = 0
|
|
3647
|
+
for j in range(H):
|
|
3648
|
+
oy = min_y + j
|
|
3649
|
+
cy = (oy + 0.5) - Yo # pixel-center distance
|
|
3650
|
+
for i in range(W):
|
|
3651
|
+
ox = min_x + i
|
|
3652
|
+
cx = (ox + 0.5) - Xo
|
|
3653
|
+
w = 0.0
|
|
3654
|
+
|
|
3655
|
+
if kernel_code == 0:
|
|
3656
|
+
# square = uniform weight in the bounding box
|
|
3657
|
+
w = 1.0
|
|
3658
|
+
elif kernel_code == 1:
|
|
3659
|
+
# circle = uniform weight if inside radius
|
|
3660
|
+
if (cx*cx + cy*cy) <= r2_limit:
|
|
3661
|
+
w = 1.0
|
|
3662
|
+
else: # gaussian
|
|
3663
|
+
# gaussian centered at (Xo,Yo) with sigma_out
|
|
3664
|
+
z = (cx*cx + cy*cy) / (2.0 * sigma_out * sigma_out)
|
|
3665
|
+
# drop tiny far-away contributions to keep perf ok
|
|
3666
|
+
if z <= 9.0: # ~3σ
|
|
3667
|
+
w = math.exp(-z)
|
|
3668
|
+
|
|
3669
|
+
weights_out[j, i] = w
|
|
3670
|
+
sum_w += w
|
|
3671
|
+
if w > 0.0:
|
|
3672
|
+
cnt += 1
|
|
3673
|
+
|
|
3674
|
+
return sum_w, cnt
|
|
3675
|
+
|
|
3676
|
+
|