setiastrosuitepro 1.6.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of setiastrosuitepro might be problematic. Click here for more details.
- setiastro/__init__.py +2 -0
- setiastro/data/SASP_data.fits +0 -0
- setiastro/data/catalogs/List_of_Galaxies_with_Distances_Gly.csv +488 -0
- setiastro/data/catalogs/astrobin_filters.csv +890 -0
- setiastro/data/catalogs/astrobin_filters_page1_local.csv +51 -0
- setiastro/data/catalogs/cali2.csv +63 -0
- setiastro/data/catalogs/cali2color.csv +65 -0
- setiastro/data/catalogs/celestial_catalog - original.csv +16471 -0
- setiastro/data/catalogs/celestial_catalog.csv +24031 -0
- setiastro/data/catalogs/detected_stars.csv +24784 -0
- setiastro/data/catalogs/fits_header_data.csv +46 -0
- setiastro/data/catalogs/test.csv +8 -0
- setiastro/data/catalogs/updated_celestial_catalog.csv +16471 -0
- setiastro/images/Astro_Spikes.png +0 -0
- setiastro/images/Background_startup.jpg +0 -0
- setiastro/images/HRDiagram.png +0 -0
- setiastro/images/LExtract.png +0 -0
- setiastro/images/LInsert.png +0 -0
- setiastro/images/Oxygenation-atm-2.svg.png +0 -0
- setiastro/images/RGB080604.png +0 -0
- setiastro/images/abeicon.png +0 -0
- setiastro/images/aberration.png +0 -0
- setiastro/images/acv_icon.png +0 -0
- setiastro/images/andromedatry.png +0 -0
- setiastro/images/andromedatry_satellited.png +0 -0
- setiastro/images/annotated.png +0 -0
- setiastro/images/aperture.png +0 -0
- setiastro/images/astrosuite.ico +0 -0
- setiastro/images/astrosuite.png +0 -0
- setiastro/images/astrosuitepro.icns +0 -0
- setiastro/images/astrosuitepro.ico +0 -0
- setiastro/images/astrosuitepro.png +0 -0
- setiastro/images/background.png +0 -0
- setiastro/images/background2.png +0 -0
- setiastro/images/benchmark.png +0 -0
- setiastro/images/big_moon_stabilizer_timeline.png +0 -0
- setiastro/images/big_moon_stabilizer_timeline_clean.png +0 -0
- setiastro/images/blaster.png +0 -0
- setiastro/images/blink.png +0 -0
- setiastro/images/clahe.png +0 -0
- setiastro/images/collage.png +0 -0
- setiastro/images/colorwheel.png +0 -0
- setiastro/images/contsub.png +0 -0
- setiastro/images/convo.png +0 -0
- setiastro/images/copyslot.png +0 -0
- setiastro/images/cosmic.png +0 -0
- setiastro/images/cosmicsat.png +0 -0
- setiastro/images/crop1.png +0 -0
- setiastro/images/cropicon.png +0 -0
- setiastro/images/curves.png +0 -0
- setiastro/images/cvs.png +0 -0
- setiastro/images/debayer.png +0 -0
- setiastro/images/denoise_cnn_custom.png +0 -0
- setiastro/images/denoise_cnn_graph.png +0 -0
- setiastro/images/disk.png +0 -0
- setiastro/images/dse.png +0 -0
- setiastro/images/exoicon.png +0 -0
- setiastro/images/eye.png +0 -0
- setiastro/images/first_quarter.png +0 -0
- setiastro/images/fliphorizontal.png +0 -0
- setiastro/images/flipvertical.png +0 -0
- setiastro/images/font.png +0 -0
- setiastro/images/freqsep.png +0 -0
- setiastro/images/full_moon.png +0 -0
- setiastro/images/functionbundle.png +0 -0
- setiastro/images/graxpert.png +0 -0
- setiastro/images/green.png +0 -0
- setiastro/images/gridicon.png +0 -0
- setiastro/images/halo.png +0 -0
- setiastro/images/hdr.png +0 -0
- setiastro/images/histogram.png +0 -0
- setiastro/images/hubble.png +0 -0
- setiastro/images/imagecombine.png +0 -0
- setiastro/images/invert.png +0 -0
- setiastro/images/isophote.png +0 -0
- setiastro/images/isophote_demo_figure.png +0 -0
- setiastro/images/isophote_demo_image.png +0 -0
- setiastro/images/isophote_demo_model.png +0 -0
- setiastro/images/isophote_demo_residual.png +0 -0
- setiastro/images/jwstpupil.png +0 -0
- setiastro/images/last_quarter.png +0 -0
- setiastro/images/linearfit.png +0 -0
- setiastro/images/livestacking.png +0 -0
- setiastro/images/mask.png +0 -0
- setiastro/images/maskapply.png +0 -0
- setiastro/images/maskcreate.png +0 -0
- setiastro/images/maskremove.png +0 -0
- setiastro/images/morpho.png +0 -0
- setiastro/images/mosaic.png +0 -0
- setiastro/images/multiscale_decomp.png +0 -0
- setiastro/images/nbtorgb.png +0 -0
- setiastro/images/neutral.png +0 -0
- setiastro/images/new_moon.png +0 -0
- setiastro/images/nuke.png +0 -0
- setiastro/images/openfile.png +0 -0
- setiastro/images/pedestal.png +0 -0
- setiastro/images/pen.png +0 -0
- setiastro/images/pixelmath.png +0 -0
- setiastro/images/platesolve.png +0 -0
- setiastro/images/ppp.png +0 -0
- setiastro/images/pro.png +0 -0
- setiastro/images/project.png +0 -0
- setiastro/images/psf.png +0 -0
- setiastro/images/redo.png +0 -0
- setiastro/images/redoicon.png +0 -0
- setiastro/images/rescale.png +0 -0
- setiastro/images/rgbalign.png +0 -0
- setiastro/images/rgbcombo.png +0 -0
- setiastro/images/rgbextract.png +0 -0
- setiastro/images/rotate180.png +0 -0
- setiastro/images/rotatearbitrary.png +0 -0
- setiastro/images/rotateclockwise.png +0 -0
- setiastro/images/rotatecounterclockwise.png +0 -0
- setiastro/images/satellite.png +0 -0
- setiastro/images/script.png +0 -0
- setiastro/images/selectivecolor.png +0 -0
- setiastro/images/simbad.png +0 -0
- setiastro/images/slot0.png +0 -0
- setiastro/images/slot1.png +0 -0
- setiastro/images/slot2.png +0 -0
- setiastro/images/slot3.png +0 -0
- setiastro/images/slot4.png +0 -0
- setiastro/images/slot5.png +0 -0
- setiastro/images/slot6.png +0 -0
- setiastro/images/slot7.png +0 -0
- setiastro/images/slot8.png +0 -0
- setiastro/images/slot9.png +0 -0
- setiastro/images/spcc.png +0 -0
- setiastro/images/spin_precession_vs_lunar_distance.png +0 -0
- setiastro/images/spinner.gif +0 -0
- setiastro/images/stacking.png +0 -0
- setiastro/images/staradd.png +0 -0
- setiastro/images/staralign.png +0 -0
- setiastro/images/starnet.png +0 -0
- setiastro/images/starregistration.png +0 -0
- setiastro/images/starspike.png +0 -0
- setiastro/images/starstretch.png +0 -0
- setiastro/images/statstretch.png +0 -0
- setiastro/images/supernova.png +0 -0
- setiastro/images/uhs.png +0 -0
- setiastro/images/undoicon.png +0 -0
- setiastro/images/upscale.png +0 -0
- setiastro/images/viewbundle.png +0 -0
- setiastro/images/waning_crescent_1.png +0 -0
- setiastro/images/waning_crescent_2.png +0 -0
- setiastro/images/waning_crescent_3.png +0 -0
- setiastro/images/waning_crescent_4.png +0 -0
- setiastro/images/waning_crescent_5.png +0 -0
- setiastro/images/waning_gibbous_1.png +0 -0
- setiastro/images/waning_gibbous_2.png +0 -0
- setiastro/images/waning_gibbous_3.png +0 -0
- setiastro/images/waning_gibbous_4.png +0 -0
- setiastro/images/waning_gibbous_5.png +0 -0
- setiastro/images/waxing_crescent_1.png +0 -0
- setiastro/images/waxing_crescent_2.png +0 -0
- setiastro/images/waxing_crescent_3.png +0 -0
- setiastro/images/waxing_crescent_4.png +0 -0
- setiastro/images/waxing_crescent_5.png +0 -0
- setiastro/images/waxing_gibbous_1.png +0 -0
- setiastro/images/waxing_gibbous_2.png +0 -0
- setiastro/images/waxing_gibbous_3.png +0 -0
- setiastro/images/waxing_gibbous_4.png +0 -0
- setiastro/images/waxing_gibbous_5.png +0 -0
- setiastro/images/whitebalance.png +0 -0
- setiastro/images/wimi_icon_256x256.png +0 -0
- setiastro/images/wimilogo.png +0 -0
- setiastro/images/wims.png +0 -0
- setiastro/images/wrench_icon.png +0 -0
- setiastro/images/xisfliberator.png +0 -0
- setiastro/qml/ResourceMonitor.qml +128 -0
- setiastro/saspro/__init__.py +20 -0
- setiastro/saspro/__main__.py +964 -0
- setiastro/saspro/_generated/__init__.py +7 -0
- setiastro/saspro/_generated/build_info.py +3 -0
- setiastro/saspro/abe.py +1379 -0
- setiastro/saspro/abe_preset.py +196 -0
- setiastro/saspro/aberration_ai.py +910 -0
- setiastro/saspro/aberration_ai_preset.py +224 -0
- setiastro/saspro/accel_installer.py +218 -0
- setiastro/saspro/accel_workers.py +30 -0
- setiastro/saspro/acv_exporter.py +379 -0
- setiastro/saspro/add_stars.py +627 -0
- setiastro/saspro/astrobin_exporter.py +1010 -0
- setiastro/saspro/astrospike.py +153 -0
- setiastro/saspro/astrospike_python.py +1841 -0
- setiastro/saspro/autostretch.py +198 -0
- setiastro/saspro/backgroundneutral.py +639 -0
- setiastro/saspro/batch_convert.py +328 -0
- setiastro/saspro/batch_renamer.py +522 -0
- setiastro/saspro/blemish_blaster.py +494 -0
- setiastro/saspro/blink_comparator_pro.py +3149 -0
- setiastro/saspro/bundles.py +61 -0
- setiastro/saspro/bundles_dock.py +114 -0
- setiastro/saspro/cheat_sheet.py +213 -0
- setiastro/saspro/clahe.py +371 -0
- setiastro/saspro/comet_stacking.py +1442 -0
- setiastro/saspro/common_tr.py +107 -0
- setiastro/saspro/config.py +38 -0
- setiastro/saspro/config_bootstrap.py +40 -0
- setiastro/saspro/config_manager.py +316 -0
- setiastro/saspro/continuum_subtract.py +1620 -0
- setiastro/saspro/convo.py +1403 -0
- setiastro/saspro/convo_preset.py +414 -0
- setiastro/saspro/copyastro.py +190 -0
- setiastro/saspro/cosmicclarity.py +1593 -0
- setiastro/saspro/cosmicclarity_preset.py +407 -0
- setiastro/saspro/crop_dialog_pro.py +1005 -0
- setiastro/saspro/crop_preset.py +189 -0
- setiastro/saspro/curve_editor_pro.py +2608 -0
- setiastro/saspro/curves_preset.py +375 -0
- setiastro/saspro/debayer.py +673 -0
- setiastro/saspro/debug_utils.py +29 -0
- setiastro/saspro/dnd_mime.py +35 -0
- setiastro/saspro/doc_manager.py +2727 -0
- setiastro/saspro/exoplanet_detector.py +2258 -0
- setiastro/saspro/file_utils.py +284 -0
- setiastro/saspro/fitsmodifier.py +748 -0
- setiastro/saspro/fix_bom.py +32 -0
- setiastro/saspro/free_torch_memory.py +48 -0
- setiastro/saspro/frequency_separation.py +1352 -0
- setiastro/saspro/function_bundle.py +1596 -0
- setiastro/saspro/generate_translations.py +3092 -0
- setiastro/saspro/ghs_dialog_pro.py +728 -0
- setiastro/saspro/ghs_preset.py +284 -0
- setiastro/saspro/graxpert.py +638 -0
- setiastro/saspro/graxpert_preset.py +287 -0
- setiastro/saspro/gui/__init__.py +0 -0
- setiastro/saspro/gui/main_window.py +8928 -0
- setiastro/saspro/gui/mixins/__init__.py +33 -0
- setiastro/saspro/gui/mixins/dock_mixin.py +375 -0
- setiastro/saspro/gui/mixins/file_mixin.py +450 -0
- setiastro/saspro/gui/mixins/geometry_mixin.py +503 -0
- setiastro/saspro/gui/mixins/header_mixin.py +441 -0
- setiastro/saspro/gui/mixins/mask_mixin.py +421 -0
- setiastro/saspro/gui/mixins/menu_mixin.py +391 -0
- setiastro/saspro/gui/mixins/theme_mixin.py +367 -0
- setiastro/saspro/gui/mixins/toolbar_mixin.py +1824 -0
- setiastro/saspro/gui/mixins/update_mixin.py +323 -0
- setiastro/saspro/gui/mixins/view_mixin.py +477 -0
- setiastro/saspro/gui/statistics_dialog.py +47 -0
- setiastro/saspro/halobgon.py +492 -0
- setiastro/saspro/header_viewer.py +448 -0
- setiastro/saspro/headless_utils.py +88 -0
- setiastro/saspro/histogram.py +760 -0
- setiastro/saspro/history_explorer.py +941 -0
- setiastro/saspro/i18n.py +168 -0
- setiastro/saspro/image_combine.py +421 -0
- setiastro/saspro/image_peeker_pro.py +1608 -0
- setiastro/saspro/imageops/__init__.py +37 -0
- setiastro/saspro/imageops/mdi_snap.py +292 -0
- setiastro/saspro/imageops/scnr.py +36 -0
- setiastro/saspro/imageops/starbasedwhitebalance.py +210 -0
- setiastro/saspro/imageops/stretch.py +236 -0
- setiastro/saspro/isophote.py +1186 -0
- setiastro/saspro/layers.py +208 -0
- setiastro/saspro/layers_dock.py +714 -0
- setiastro/saspro/lazy_imports.py +193 -0
- setiastro/saspro/legacy/__init__.py +2 -0
- setiastro/saspro/legacy/image_manager.py +2360 -0
- setiastro/saspro/legacy/numba_utils.py +3676 -0
- setiastro/saspro/legacy/xisf.py +1213 -0
- setiastro/saspro/linear_fit.py +537 -0
- setiastro/saspro/live_stacking.py +1854 -0
- setiastro/saspro/log_bus.py +5 -0
- setiastro/saspro/logging_config.py +460 -0
- setiastro/saspro/luminancerecombine.py +510 -0
- setiastro/saspro/main_helpers.py +201 -0
- setiastro/saspro/mask_creation.py +1090 -0
- setiastro/saspro/masks_core.py +56 -0
- setiastro/saspro/mdi_widgets.py +353 -0
- setiastro/saspro/memory_utils.py +666 -0
- setiastro/saspro/metadata_patcher.py +75 -0
- setiastro/saspro/mfdeconv.py +3909 -0
- setiastro/saspro/mfdeconv_earlystop.py +71 -0
- setiastro/saspro/mfdeconvcudnn.py +3312 -0
- setiastro/saspro/mfdeconvsport.py +2459 -0
- setiastro/saspro/minorbodycatalog.py +567 -0
- setiastro/saspro/morphology.py +411 -0
- setiastro/saspro/multiscale_decomp.py +1751 -0
- setiastro/saspro/nbtorgb_stars.py +541 -0
- setiastro/saspro/numba_utils.py +3145 -0
- setiastro/saspro/numba_warmup.py +141 -0
- setiastro/saspro/ops/__init__.py +9 -0
- setiastro/saspro/ops/command_help_dialog.py +623 -0
- setiastro/saspro/ops/command_runner.py +217 -0
- setiastro/saspro/ops/commands.py +1594 -0
- setiastro/saspro/ops/script_editor.py +1105 -0
- setiastro/saspro/ops/scripts.py +1476 -0
- setiastro/saspro/ops/settings.py +637 -0
- setiastro/saspro/parallel_utils.py +554 -0
- setiastro/saspro/pedestal.py +121 -0
- setiastro/saspro/perfect_palette_picker.py +1105 -0
- setiastro/saspro/pipeline.py +110 -0
- setiastro/saspro/pixelmath.py +1604 -0
- setiastro/saspro/plate_solver.py +2480 -0
- setiastro/saspro/project_io.py +797 -0
- setiastro/saspro/psf_utils.py +136 -0
- setiastro/saspro/psf_viewer.py +631 -0
- setiastro/saspro/pyi_rthook_astroquery.py +95 -0
- setiastro/saspro/remove_green.py +331 -0
- setiastro/saspro/remove_stars.py +1599 -0
- setiastro/saspro/remove_stars_preset.py +446 -0
- setiastro/saspro/resources.py +570 -0
- setiastro/saspro/rgb_combination.py +208 -0
- setiastro/saspro/rgb_extract.py +19 -0
- setiastro/saspro/rgbalign.py +727 -0
- setiastro/saspro/runtime_imports.py +7 -0
- setiastro/saspro/runtime_torch.py +754 -0
- setiastro/saspro/save_options.py +73 -0
- setiastro/saspro/selective_color.py +1614 -0
- setiastro/saspro/sfcc.py +1530 -0
- setiastro/saspro/shortcuts.py +3125 -0
- setiastro/saspro/signature_insert.py +1106 -0
- setiastro/saspro/stacking_suite.py +19069 -0
- setiastro/saspro/star_alignment.py +7383 -0
- setiastro/saspro/star_alignment_preset.py +329 -0
- setiastro/saspro/star_metrics.py +49 -0
- setiastro/saspro/star_spikes.py +769 -0
- setiastro/saspro/star_stretch.py +542 -0
- setiastro/saspro/stat_stretch.py +554 -0
- setiastro/saspro/status_log_dock.py +78 -0
- setiastro/saspro/subwindow.py +3523 -0
- setiastro/saspro/supernovaasteroidhunter.py +1719 -0
- setiastro/saspro/swap_manager.py +134 -0
- setiastro/saspro/torch_backend.py +89 -0
- setiastro/saspro/torch_rejection.py +434 -0
- setiastro/saspro/translations/all_source_strings.json +4726 -0
- setiastro/saspro/translations/ar_translations.py +4096 -0
- setiastro/saspro/translations/de_translations.py +3728 -0
- setiastro/saspro/translations/es_translations.py +4169 -0
- setiastro/saspro/translations/fr_translations.py +4090 -0
- setiastro/saspro/translations/hi_translations.py +3803 -0
- setiastro/saspro/translations/integrate_translations.py +271 -0
- setiastro/saspro/translations/it_translations.py +4728 -0
- setiastro/saspro/translations/ja_translations.py +3834 -0
- setiastro/saspro/translations/pt_translations.py +3847 -0
- setiastro/saspro/translations/ru_translations.py +3082 -0
- setiastro/saspro/translations/saspro_ar.qm +0 -0
- setiastro/saspro/translations/saspro_ar.ts +16019 -0
- setiastro/saspro/translations/saspro_de.qm +0 -0
- setiastro/saspro/translations/saspro_de.ts +14548 -0
- setiastro/saspro/translations/saspro_es.qm +0 -0
- setiastro/saspro/translations/saspro_es.ts +16202 -0
- setiastro/saspro/translations/saspro_fr.qm +0 -0
- setiastro/saspro/translations/saspro_fr.ts +15870 -0
- setiastro/saspro/translations/saspro_hi.qm +0 -0
- setiastro/saspro/translations/saspro_hi.ts +14855 -0
- setiastro/saspro/translations/saspro_it.qm +0 -0
- setiastro/saspro/translations/saspro_it.ts +19046 -0
- setiastro/saspro/translations/saspro_ja.qm +0 -0
- setiastro/saspro/translations/saspro_ja.ts +14980 -0
- setiastro/saspro/translations/saspro_pt.qm +0 -0
- setiastro/saspro/translations/saspro_pt.ts +15024 -0
- setiastro/saspro/translations/saspro_ru.qm +0 -0
- setiastro/saspro/translations/saspro_ru.ts +11835 -0
- setiastro/saspro/translations/saspro_sw.qm +0 -0
- setiastro/saspro/translations/saspro_sw.ts +15237 -0
- setiastro/saspro/translations/saspro_uk.qm +0 -0
- setiastro/saspro/translations/saspro_uk.ts +15248 -0
- setiastro/saspro/translations/saspro_zh.qm +0 -0
- setiastro/saspro/translations/saspro_zh.ts +15289 -0
- setiastro/saspro/translations/sw_translations.py +3897 -0
- setiastro/saspro/translations/uk_translations.py +3929 -0
- setiastro/saspro/translations/zh_translations.py +3910 -0
- setiastro/saspro/versioning.py +77 -0
- setiastro/saspro/view_bundle.py +1558 -0
- setiastro/saspro/wavescale_hdr.py +648 -0
- setiastro/saspro/wavescale_hdr_preset.py +101 -0
- setiastro/saspro/wavescalede.py +683 -0
- setiastro/saspro/wavescalede_preset.py +230 -0
- setiastro/saspro/wcs_update.py +374 -0
- setiastro/saspro/whitebalance.py +540 -0
- setiastro/saspro/widgets/__init__.py +48 -0
- setiastro/saspro/widgets/common_utilities.py +306 -0
- setiastro/saspro/widgets/graphics_views.py +122 -0
- setiastro/saspro/widgets/image_utils.py +518 -0
- setiastro/saspro/widgets/minigame/game.js +991 -0
- setiastro/saspro/widgets/minigame/index.html +53 -0
- setiastro/saspro/widgets/minigame/style.css +241 -0
- setiastro/saspro/widgets/preview_dialogs.py +280 -0
- setiastro/saspro/widgets/resource_monitor.py +313 -0
- setiastro/saspro/widgets/spinboxes.py +290 -0
- setiastro/saspro/widgets/themed_buttons.py +13 -0
- setiastro/saspro/widgets/wavelet_utils.py +331 -0
- setiastro/saspro/wimi.py +7367 -0
- setiastro/saspro/wims.py +588 -0
- setiastro/saspro/window_shelf.py +185 -0
- setiastro/saspro/xisf.py +1213 -0
- setiastrosuitepro-1.6.7.dist-info/METADATA +279 -0
- setiastrosuitepro-1.6.7.dist-info/RECORD +394 -0
- setiastrosuitepro-1.6.7.dist-info/WHEEL +4 -0
- setiastrosuitepro-1.6.7.dist-info/entry_points.txt +6 -0
- setiastrosuitepro-1.6.7.dist-info/licenses/LICENSE +674 -0
- setiastrosuitepro-1.6.7.dist-info/licenses/license.txt +2580 -0
|
@@ -0,0 +1,2459 @@
|
|
|
1
|
+
# pro/mfdeconvsport.py
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
import os, sys
|
|
4
|
+
import math
|
|
5
|
+
import re
|
|
6
|
+
import numpy as np
|
|
7
|
+
from astropy.io import fits
|
|
8
|
+
from PyQt6.QtCore import QObject, pyqtSignal
|
|
9
|
+
from setiastro.saspro.psf_utils import compute_psf_kernel_for_image
|
|
10
|
+
from PyQt6.QtWidgets import QApplication
|
|
11
|
+
from PyQt6.QtCore import QThread
|
|
12
|
+
from threadpoolctl import threadpool_limits
|
|
13
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed, ProcessPoolExecutor
|
|
14
|
+
_USE_PROCESS_POOL_FOR_ASSETS = not getattr(sys, "frozen", False)
|
|
15
|
+
from setiastro.saspro.mfdeconv_earlystop import EarlyStopper
|
|
16
|
+
|
|
17
|
+
import contextlib
|
|
18
|
+
try:
|
|
19
|
+
import sep
|
|
20
|
+
except Exception:
|
|
21
|
+
sep = None
|
|
22
|
+
from setiastro.saspro.free_torch_memory import _free_torch_memory
|
|
23
|
+
torch = None # filled by runtime loader if available
|
|
24
|
+
TORCH_OK = False
|
|
25
|
+
NO_GRAD = contextlib.nullcontext # fallback
|
|
26
|
+
|
|
27
|
+
_XISF_READERS = []
|
|
28
|
+
try:
|
|
29
|
+
# e.g. your legacy module
|
|
30
|
+
from setiastro.saspro.legacy import xisf as _legacy_xisf
|
|
31
|
+
if hasattr(_legacy_xisf, "read"):
|
|
32
|
+
_XISF_READERS.append(lambda p: _legacy_xisf.read(p))
|
|
33
|
+
elif hasattr(_legacy_xisf, "open"):
|
|
34
|
+
_XISF_READERS.append(lambda p: _legacy_xisf.open(p)[0])
|
|
35
|
+
except Exception:
|
|
36
|
+
pass
|
|
37
|
+
try:
|
|
38
|
+
# sometimes projects expose a generic load_image
|
|
39
|
+
from setiastro.saspro.legacy.image_manager import load_image as _generic_load_image # adjust if needed
|
|
40
|
+
_XISF_READERS.append(lambda p: _generic_load_image(p)[0])
|
|
41
|
+
except Exception:
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
# at top of file with the other imports
|
|
45
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
46
|
+
from queue import SimpleQueue
|
|
47
|
+
from setiastro.saspro.memory_utils import LRUDict
|
|
48
|
+
|
|
49
|
+
# ── XISF decode cache → memmap on disk ─────────────────────────────────
|
|
50
|
+
import tempfile
|
|
51
|
+
import threading
|
|
52
|
+
import uuid
|
|
53
|
+
import atexit
|
|
54
|
+
_XISF_CACHE = LRUDict(50)
|
|
55
|
+
_XISF_LOCK = threading.Lock()
|
|
56
|
+
_XISF_TMPFILES = []
|
|
57
|
+
|
|
58
|
+
from collections import OrderedDict
|
|
59
|
+
|
|
60
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
61
|
+
# Unified image I/O for MFDeconv (FITS + XISF)
|
|
62
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
63
|
+
import os
|
|
64
|
+
import numpy as np
|
|
65
|
+
from astropy.io import fits
|
|
66
|
+
|
|
67
|
+
from pathlib import Path
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
from collections import OrderedDict
|
|
71
|
+
|
|
72
|
+
# ── CHW LRU (float32) built on top of FITS memmap & XISF memmap ────────────────
|
|
73
|
+
class _FrameCHWLRU:
|
|
74
|
+
def __init__(self, capacity=8):
|
|
75
|
+
self.cap = int(max(1, capacity))
|
|
76
|
+
self.od = OrderedDict()
|
|
77
|
+
|
|
78
|
+
def clear(self):
|
|
79
|
+
self.od.clear()
|
|
80
|
+
|
|
81
|
+
def get(self, path, Ht, Wt, color_mode):
|
|
82
|
+
key = (path, Ht, Wt, str(color_mode).lower())
|
|
83
|
+
hit = self.od.get(key)
|
|
84
|
+
if hit is not None:
|
|
85
|
+
self.od.move_to_end(key)
|
|
86
|
+
return hit
|
|
87
|
+
|
|
88
|
+
# Load backing array cheaply (memmap for FITS, cached memmap for XISF)
|
|
89
|
+
ext = os.path.splitext(path)[1].lower()
|
|
90
|
+
if ext == ".xisf":
|
|
91
|
+
a = _xisf_cached_array(path) # float32, HW/HWC/CHW
|
|
92
|
+
else:
|
|
93
|
+
# FITS path: use astropy memmap (no data copy)
|
|
94
|
+
with fits.open(path, memmap=True, ignore_missing_simple=True) as hdul:
|
|
95
|
+
arr = None
|
|
96
|
+
for h in hdul:
|
|
97
|
+
if getattr(h, "data", None) is not None:
|
|
98
|
+
arr = h.data
|
|
99
|
+
break
|
|
100
|
+
if arr is None:
|
|
101
|
+
raise ValueError(f"No image data in {path}")
|
|
102
|
+
a = np.asarray(arr)
|
|
103
|
+
# dtype normalize once; keep float32
|
|
104
|
+
if a.dtype.kind in "ui":
|
|
105
|
+
a = a.astype(np.float32) / (float(np.iinfo(a.dtype).max) or 1.0)
|
|
106
|
+
else:
|
|
107
|
+
a = a.astype(np.float32, copy=False)
|
|
108
|
+
|
|
109
|
+
# Center-crop to (Ht, Wt) and convert to CHW
|
|
110
|
+
a = np.asarray(a) # float32
|
|
111
|
+
a = _center_crop(a, Ht, Wt)
|
|
112
|
+
|
|
113
|
+
# Respect color_mode: “luma” → 1×H×W, “PerChannel” → 3×H×W if RGB present
|
|
114
|
+
cm = str(color_mode).lower()
|
|
115
|
+
if cm == "luma":
|
|
116
|
+
a_chw = _as_chw(_to_luma_local(a)).astype(np.float32, copy=False)
|
|
117
|
+
else:
|
|
118
|
+
a_chw = _as_chw(a).astype(np.float32, copy=False)
|
|
119
|
+
if a_chw.shape[0] == 1 and cm != "luma":
|
|
120
|
+
# still OK (mono data)
|
|
121
|
+
pass
|
|
122
|
+
|
|
123
|
+
# LRU insert
|
|
124
|
+
self.od[key] = a_chw
|
|
125
|
+
if len(self.od) > self.cap:
|
|
126
|
+
self.od.popitem(last=False)
|
|
127
|
+
return a_chw
|
|
128
|
+
|
|
129
|
+
_FRAME_LRU = _FrameCHWLRU(capacity=8) # tune if you like
|
|
130
|
+
|
|
131
|
+
def _clear_all_caches():
|
|
132
|
+
try: _clear_xisf_cache()
|
|
133
|
+
except Exception as e:
|
|
134
|
+
import logging
|
|
135
|
+
logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
|
|
136
|
+
try: _FRAME_LRU.clear()
|
|
137
|
+
except Exception as e:
|
|
138
|
+
import logging
|
|
139
|
+
logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
|
|
140
|
+
|
|
141
|
+
def _as_chw(np_img: np.ndarray) -> np.ndarray:
|
|
142
|
+
x = np.asarray(np_img, dtype=np.float32, order="C")
|
|
143
|
+
if x.size == 0:
|
|
144
|
+
raise RuntimeError(f"Empty image array after load; raw shape={np_img.shape}")
|
|
145
|
+
if x.ndim == 2:
|
|
146
|
+
return x[None, ...] # 1,H,W
|
|
147
|
+
if x.ndim == 3 and x.shape[0] in (1, 3):
|
|
148
|
+
if x.shape[0] == 0:
|
|
149
|
+
raise RuntimeError(f"Zero channels in CHW array; shape={x.shape}")
|
|
150
|
+
return x
|
|
151
|
+
if x.ndim == 3 and x.shape[-1] in (1, 3):
|
|
152
|
+
if x.shape[-1] == 0:
|
|
153
|
+
raise RuntimeError(f"Zero channels in HWC array; shape={x.shape}")
|
|
154
|
+
return np.moveaxis(x, -1, 0)
|
|
155
|
+
# last resort: treat first dim as channels, but reject zero
|
|
156
|
+
if x.shape[0] == 0:
|
|
157
|
+
raise RuntimeError(f"Zero channels in array; shape={x.shape}")
|
|
158
|
+
return x
|
|
159
|
+
|
|
160
|
+
def _normalize_to_float32(a: np.ndarray) -> np.ndarray:
|
|
161
|
+
if a.dtype.kind in "ui":
|
|
162
|
+
return (a.astype(np.float32) / (float(np.iinfo(a.dtype).max) or 1.0))
|
|
163
|
+
if a.dtype == np.float32:
|
|
164
|
+
return a
|
|
165
|
+
return a.astype(np.float32, copy=False)
|
|
166
|
+
|
|
167
|
+
def _xisf_cached_array(path: str) -> np.memmap:
|
|
168
|
+
"""
|
|
169
|
+
Decode an XISF image exactly once and back it by a read-only float32 memmap.
|
|
170
|
+
Returns a memmap that can be sliced cheaply for tiles.
|
|
171
|
+
"""
|
|
172
|
+
with _XISF_LOCK:
|
|
173
|
+
hit = _XISF_CACHE.get(path)
|
|
174
|
+
if hit is not None:
|
|
175
|
+
fn, shape = hit
|
|
176
|
+
return np.memmap(fn, dtype=np.float32, mode="r", shape=shape)
|
|
177
|
+
|
|
178
|
+
# Decode once
|
|
179
|
+
arr, _ = _load_image_array(path) # your existing loader
|
|
180
|
+
if arr is None:
|
|
181
|
+
raise ValueError(f"XISF loader returned None for {path}")
|
|
182
|
+
arr = np.asarray(arr)
|
|
183
|
+
arrf = _normalize_to_float32(arr)
|
|
184
|
+
|
|
185
|
+
# Create a temp file-backed memmap
|
|
186
|
+
tmpdir = tempfile.gettempdir()
|
|
187
|
+
fn = os.path.join(tmpdir, f"xisf_cache_{uuid.uuid4().hex}.mmap")
|
|
188
|
+
mm = np.memmap(fn, dtype=np.float32, mode="w+", shape=arrf.shape)
|
|
189
|
+
mm[...] = arrf[...]
|
|
190
|
+
mm.flush()
|
|
191
|
+
del mm # close writer handle; re-open below as read-only
|
|
192
|
+
|
|
193
|
+
_XISF_CACHE[path] = (fn, arrf.shape)
|
|
194
|
+
_XISF_TMPFILES.append(fn)
|
|
195
|
+
return np.memmap(fn, dtype=np.float32, mode="r", shape=arrf.shape)
|
|
196
|
+
|
|
197
|
+
def _clear_xisf_cache():
|
|
198
|
+
with _XISF_LOCK:
|
|
199
|
+
for fn in _XISF_TMPFILES:
|
|
200
|
+
try: os.remove(fn)
|
|
201
|
+
except Exception as e:
|
|
202
|
+
import logging
|
|
203
|
+
logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
|
|
204
|
+
_XISF_CACHE.clear()
|
|
205
|
+
_XISF_TMPFILES.clear()
|
|
206
|
+
|
|
207
|
+
atexit.register(_clear_xisf_cache)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _is_xisf(path: str) -> bool:
|
|
211
|
+
return os.path.splitext(path)[1].lower() == ".xisf"
|
|
212
|
+
|
|
213
|
+
def _read_xisf_numpy(path: str) -> np.ndarray:
|
|
214
|
+
if not _XISF_READERS:
|
|
215
|
+
raise RuntimeError(
|
|
216
|
+
"No XISF readers registered. Ensure one of "
|
|
217
|
+
"legacy.xisf.read/open or *.image_io.load_image is importable."
|
|
218
|
+
)
|
|
219
|
+
last_err = None
|
|
220
|
+
for fn in _XISF_READERS:
|
|
221
|
+
try:
|
|
222
|
+
arr = fn(path)
|
|
223
|
+
if isinstance(arr, tuple):
|
|
224
|
+
arr = arr[0]
|
|
225
|
+
return np.asarray(arr)
|
|
226
|
+
except Exception as e:
|
|
227
|
+
last_err = e
|
|
228
|
+
raise RuntimeError(f"All XISF readers failed for {path}: {last_err}")
|
|
229
|
+
|
|
230
|
+
def _fits_open_data(path: str):
|
|
231
|
+
# ignore_missing_simple=True lets us open headers missing SIMPLE
|
|
232
|
+
with fits.open(path, memmap=True, ignore_missing_simple=True) as hdul:
|
|
233
|
+
hdu = hdul[0]
|
|
234
|
+
if hdu.data is None:
|
|
235
|
+
# find first image HDU if primary is header-only
|
|
236
|
+
for h in hdul[1:]:
|
|
237
|
+
if getattr(h, "data", None) is not None:
|
|
238
|
+
hdu = h
|
|
239
|
+
break
|
|
240
|
+
data = np.asanyarray(hdu.data)
|
|
241
|
+
hdr = hdu.header
|
|
242
|
+
return data, hdr
|
|
243
|
+
|
|
244
|
+
def _load_image_array(path: str) -> tuple[np.ndarray, "fits.Header | None"]:
|
|
245
|
+
"""
|
|
246
|
+
Return (numpy array, fits.Header or None). Color-last if 3D.
|
|
247
|
+
dtype left as-is; callers cast to float32. Array is C-contig & writeable.
|
|
248
|
+
"""
|
|
249
|
+
if _is_xisf(path):
|
|
250
|
+
arr = _read_xisf_numpy(path)
|
|
251
|
+
hdr = None
|
|
252
|
+
else:
|
|
253
|
+
arr, hdr = _fits_open_data(path)
|
|
254
|
+
|
|
255
|
+
a = np.asarray(arr)
|
|
256
|
+
# Move color axis to last if 3D with a leading channel axis
|
|
257
|
+
if a.ndim == 3 and a.shape[0] in (1, 3) and a.shape[-1] not in (1, 3):
|
|
258
|
+
a = np.moveaxis(a, 0, -1)
|
|
259
|
+
# Ensure contiguous, writeable float32 decisions happen later; here we just ensure writeable
|
|
260
|
+
if (not a.flags.c_contiguous) or (not a.flags.writeable):
|
|
261
|
+
a = np.array(a, copy=True)
|
|
262
|
+
return a, hdr
|
|
263
|
+
|
|
264
|
+
def _probe_hw(path: str) -> tuple[int, int, int | None]:
|
|
265
|
+
"""
|
|
266
|
+
Returns (H, W, C_or_None) without changing data. Moves color to last if needed.
|
|
267
|
+
"""
|
|
268
|
+
a, _ = _load_image_array(path)
|
|
269
|
+
if a.ndim == 2:
|
|
270
|
+
return a.shape[0], a.shape[1], None
|
|
271
|
+
if a.ndim == 3:
|
|
272
|
+
h, w, c = a.shape
|
|
273
|
+
# treat mono-3D as (H,W,1)
|
|
274
|
+
if c not in (1, 3) and a.shape[0] in (1, 3):
|
|
275
|
+
a = np.moveaxis(a, 0, -1)
|
|
276
|
+
h, w, c = a.shape
|
|
277
|
+
return h, w, c if c in (1, 3) else None
|
|
278
|
+
raise ValueError(f"Unsupported ndim={a.ndim} for {path}")
|
|
279
|
+
|
|
280
|
+
def _common_hw_from_paths(paths: list[str]) -> tuple[int, int]:
|
|
281
|
+
Hs, Ws = [], []
|
|
282
|
+
for p in paths:
|
|
283
|
+
h, w, _ = _probe_hw(p)
|
|
284
|
+
h = int(h); w = int(w)
|
|
285
|
+
if h > 0 and w > 0:
|
|
286
|
+
Hs.append(h); Ws.append(w)
|
|
287
|
+
|
|
288
|
+
if not Hs:
|
|
289
|
+
raise ValueError("Could not determine any valid frame sizes.")
|
|
290
|
+
Ht = min(Hs); Wt = min(Ws)
|
|
291
|
+
if Ht < 8 or Wt < 8:
|
|
292
|
+
raise ValueError(f"Intersection too small: {Ht}x{Wt}")
|
|
293
|
+
return Ht, Wt
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def _to_chw_float32(img: np.ndarray, color_mode: str) -> np.ndarray:
|
|
297
|
+
"""
|
|
298
|
+
Convert to CHW float32:
|
|
299
|
+
- mono → (1,H,W)
|
|
300
|
+
- RGB → (3,H,W) if 'PerChannel'; (1,H,W) if 'luma'
|
|
301
|
+
"""
|
|
302
|
+
x = np.asarray(img)
|
|
303
|
+
if x.ndim == 2:
|
|
304
|
+
y = x.astype(np.float32, copy=False)[None, ...] # (1,H,W)
|
|
305
|
+
return y
|
|
306
|
+
if x.ndim == 3:
|
|
307
|
+
# color-last (H,W,C) expected
|
|
308
|
+
if x.shape[-1] == 1:
|
|
309
|
+
return x[..., 0].astype(np.float32, copy=False)[None, ...]
|
|
310
|
+
if x.shape[-1] == 3:
|
|
311
|
+
if str(color_mode).lower() in ("perchannel", "per_channel", "perchannelrgb"):
|
|
312
|
+
r, g, b = x[..., 0], x[..., 1], x[..., 2]
|
|
313
|
+
return np.stack([r.astype(np.float32, copy=False),
|
|
314
|
+
g.astype(np.float32, copy=False),
|
|
315
|
+
b.astype(np.float32, copy=False)], axis=0)
|
|
316
|
+
# luma
|
|
317
|
+
r, g, b = x[..., 0].astype(np.float32, copy=False), x[..., 1].astype(np.float32, copy=False), x[..., 2].astype(np.float32, copy=False)
|
|
318
|
+
L = 0.2126*r + 0.7152*g + 0.0722*b
|
|
319
|
+
return L[None, ...]
|
|
320
|
+
# rare mono-3D
|
|
321
|
+
if x.shape[0] in (1, 3) and x.shape[-1] not in (1, 3):
|
|
322
|
+
x = np.moveaxis(x, 0, -1)
|
|
323
|
+
return _to_chw_float32(x, color_mode)
|
|
324
|
+
raise ValueError(f"Unsupported image shape {x.shape}")
|
|
325
|
+
|
|
326
|
+
def _center_crop_hw(img: np.ndarray, Ht: int, Wt: int) -> np.ndarray:
|
|
327
|
+
h, w = img.shape[:2]
|
|
328
|
+
y0 = max(0, (h - Ht)//2); x0 = max(0, (w - Wt)//2)
|
|
329
|
+
return img[y0:y0+Ht, x0:x0+Wt, ...].copy() if (Ht < h or Wt < w) else img
|
|
330
|
+
|
|
331
|
+
def _stack_loader_memmap(paths: list[str], Ht: int, Wt: int, color_mode: str):
|
|
332
|
+
"""
|
|
333
|
+
Drop-in replacement of the old FITS-only helper.
|
|
334
|
+
Returns (ys, hdrs):
|
|
335
|
+
ys : list of CHW float32 arrays cropped to (Ht,Wt)
|
|
336
|
+
hdrs : list of fits.Header or None (XISF)
|
|
337
|
+
"""
|
|
338
|
+
ys, hdrs = [], []
|
|
339
|
+
for p in paths:
|
|
340
|
+
arr, hdr = _load_image_array(p)
|
|
341
|
+
arr = _center_crop_hw(arr, Ht, Wt)
|
|
342
|
+
# normalize integer data to [0,1] like the rest of your code
|
|
343
|
+
if arr.dtype.kind in "ui":
|
|
344
|
+
mx = np.float32(np.iinfo(arr.dtype).max)
|
|
345
|
+
arr = arr.astype(np.float32, copy=False) / (mx if mx > 0 else 1.0)
|
|
346
|
+
elif arr.dtype.kind == "f":
|
|
347
|
+
arr = arr.astype(np.float32, copy=False)
|
|
348
|
+
else:
|
|
349
|
+
arr = arr.astype(np.float32, copy=False)
|
|
350
|
+
|
|
351
|
+
y = _to_chw_float32(arr, color_mode)
|
|
352
|
+
if (not y.flags.c_contiguous) or (not y.flags.writeable):
|
|
353
|
+
y = np.ascontiguousarray(y.astype(np.float32, copy=True))
|
|
354
|
+
ys.append(y)
|
|
355
|
+
hdrs.append(hdr if isinstance(hdr, fits.Header) else None)
|
|
356
|
+
return ys, hdrs
|
|
357
|
+
|
|
358
|
+
def _safe_primary_header(path: str) -> fits.Header:
|
|
359
|
+
if _is_xisf(path):
|
|
360
|
+
# best-effort synthetic header
|
|
361
|
+
h = fits.Header()
|
|
362
|
+
h["SIMPLE"] = (True, "created by MFDeconv")
|
|
363
|
+
h["BITPIX"] = -32
|
|
364
|
+
h["NAXIS"] = 2
|
|
365
|
+
return h
|
|
366
|
+
try:
|
|
367
|
+
return fits.getheader(path, ext=0, ignore_missing_simple=True)
|
|
368
|
+
except Exception:
|
|
369
|
+
return fits.Header()
|
|
370
|
+
|
|
371
|
+
def _compute_frame_assets(i, arr, hdr, *, make_masks, make_varmaps,
|
|
372
|
+
star_mask_cfg, varmap_cfg, status_sink=lambda s: None):
|
|
373
|
+
"""
|
|
374
|
+
Worker function: compute PSF and optional star mask / varmap for one frame.
|
|
375
|
+
Returns (index, psf, mask_or_None, var_or_None, log_lines)
|
|
376
|
+
"""
|
|
377
|
+
logs = []
|
|
378
|
+
def log(s): logs.append(s)
|
|
379
|
+
|
|
380
|
+
# --- PSF sizing by FWHM ---
|
|
381
|
+
f_hdr = _estimate_fwhm_from_header(hdr)
|
|
382
|
+
f_img = _estimate_fwhm_from_image(arr)
|
|
383
|
+
f_whm = f_hdr if (np.isfinite(f_hdr)) else f_img
|
|
384
|
+
if not np.isfinite(f_whm) or f_whm <= 0:
|
|
385
|
+
f_whm = 2.5
|
|
386
|
+
k_auto = _auto_ksize_from_fwhm(f_whm)
|
|
387
|
+
|
|
388
|
+
# --- Star-derived PSF with retries (dynamic det_sigma ladder) ---
|
|
389
|
+
psf = None
|
|
390
|
+
|
|
391
|
+
# Your existing ksize ladder
|
|
392
|
+
k_ladder = [k_auto, max(k_auto - 4, 11), 21, 17, 15, 13, 11]
|
|
393
|
+
|
|
394
|
+
# New: start high to avoid detecting 10k stars; step down only if needed
|
|
395
|
+
sigma_ladder = [50.0, 25.0, 12.0, 6.0]
|
|
396
|
+
|
|
397
|
+
tried = set()
|
|
398
|
+
for det_sigma in sigma_ladder:
|
|
399
|
+
for k_try in k_ladder:
|
|
400
|
+
if (det_sigma, k_try) in tried:
|
|
401
|
+
continue
|
|
402
|
+
tried.add((det_sigma, k_try))
|
|
403
|
+
try:
|
|
404
|
+
out = compute_psf_kernel_for_image(arr, ksize=k_try, det_sigma=det_sigma, max_stars=80)
|
|
405
|
+
psf_try = out[0] if (isinstance(out, tuple) and len(out) >= 1) else out
|
|
406
|
+
if psf_try is not None:
|
|
407
|
+
psf = psf_try
|
|
408
|
+
break
|
|
409
|
+
except Exception:
|
|
410
|
+
psf = None
|
|
411
|
+
if psf is not None:
|
|
412
|
+
break
|
|
413
|
+
|
|
414
|
+
if psf is None:
|
|
415
|
+
psf = _gaussian_psf(f_whm, ksize=k_auto)
|
|
416
|
+
|
|
417
|
+
psf = _soften_psf(_normalize_psf(psf.astype(np.float32, copy=False)), sigma_px=0.25)
|
|
418
|
+
|
|
419
|
+
mask = None
|
|
420
|
+
var = None
|
|
421
|
+
|
|
422
|
+
if make_masks or make_varmaps:
|
|
423
|
+
# one background per frame (reused by both)
|
|
424
|
+
luma = _to_luma_local(arr)
|
|
425
|
+
vmc = (varmap_cfg or {})
|
|
426
|
+
sky_map, rms_map, err_scalar = _sep_background_precompute(
|
|
427
|
+
luma, bw=int(vmc.get("bw", 64)), bh=int(vmc.get("bh", 64))
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
if make_masks:
|
|
431
|
+
smc = star_mask_cfg or {}
|
|
432
|
+
mask = _star_mask_from_precomputed(
|
|
433
|
+
luma, sky_map, err_scalar,
|
|
434
|
+
thresh_sigma = smc.get("thresh_sigma", THRESHOLD_SIGMA),
|
|
435
|
+
max_objs = smc.get("max_objs", STAR_MASK_MAXOBJS),
|
|
436
|
+
grow_px = smc.get("grow_px", GROW_PX),
|
|
437
|
+
ellipse_scale= smc.get("ellipse_scale", ELLIPSE_SCALE),
|
|
438
|
+
soft_sigma = smc.get("soft_sigma", SOFT_SIGMA),
|
|
439
|
+
max_radius_px= smc.get("max_radius_px", MAX_STAR_RADIUS),
|
|
440
|
+
keep_floor = smc.get("keep_floor", KEEP_FLOOR),
|
|
441
|
+
max_side = smc.get("max_side", STAR_MASK_MAXSIDE),
|
|
442
|
+
status_cb = log,
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
if make_varmaps:
|
|
446
|
+
vmc = varmap_cfg or {}
|
|
447
|
+
var = _variance_map_from_precomputed(
|
|
448
|
+
luma, sky_map, rms_map, hdr,
|
|
449
|
+
smooth_sigma = vmc.get("smooth_sigma", 1.0),
|
|
450
|
+
floor = vmc.get("floor", 1e-8),
|
|
451
|
+
status_cb = log,
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
# small per-frame summary
|
|
455
|
+
fwhm_est = _psf_fwhm_px(psf)
|
|
456
|
+
logs.insert(0, f"MFDeconv: PSF{i}: ksize={psf.shape[0]} | FWHM≈{fwhm_est:.2f}px")
|
|
457
|
+
|
|
458
|
+
return i, psf, mask, var, logs
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
def _compute_one_worker(args):
|
|
462
|
+
"""
|
|
463
|
+
Top-level picklable worker for ProcessPoolExecutor.
|
|
464
|
+
args: (i, path, make_masks_in_worker, make_varmaps, star_mask_cfg, varmap_cfg)
|
|
465
|
+
Returns (i, psf, mask, var, logs)
|
|
466
|
+
"""
|
|
467
|
+
(i, path, make_masks_in_worker, make_varmaps, star_mask_cfg, varmap_cfg) = args
|
|
468
|
+
# avoid BLAS/OMP storm inside each process
|
|
469
|
+
with threadpool_limits(limits=1):
|
|
470
|
+
arr, hdr = _load_image_array(path) # FITS or XISF
|
|
471
|
+
arr = np.asarray(arr, dtype=np.float32, order="C")
|
|
472
|
+
if arr.ndim == 3 and arr.shape[-1] == 1:
|
|
473
|
+
arr = np.squeeze(arr, axis=-1)
|
|
474
|
+
if not isinstance(hdr, fits.Header): # synthesize FITS-like header for XISF
|
|
475
|
+
hdr = _safe_primary_header(path)
|
|
476
|
+
return _compute_frame_assets(
|
|
477
|
+
i, arr, hdr,
|
|
478
|
+
make_masks=bool(make_masks_in_worker),
|
|
479
|
+
make_varmaps=bool(make_varmaps),
|
|
480
|
+
star_mask_cfg=star_mask_cfg,
|
|
481
|
+
varmap_cfg=varmap_cfg,
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
def _build_psf_and_assets(
|
|
486
|
+
paths, # list[str]
|
|
487
|
+
make_masks=False,
|
|
488
|
+
make_varmaps=False,
|
|
489
|
+
status_cb=lambda s: None,
|
|
490
|
+
save_dir: str | None = None,
|
|
491
|
+
star_mask_cfg: dict | None = None,
|
|
492
|
+
varmap_cfg: dict | None = None,
|
|
493
|
+
max_workers: int | None = None,
|
|
494
|
+
star_mask_ref_path: str | None = None, # build one mask from this frame if provided
|
|
495
|
+
# NEW (passed from multiframe_deconv so we don’t re-probe/convert):
|
|
496
|
+
Ht: int | None = None,
|
|
497
|
+
Wt: int | None = None,
|
|
498
|
+
color_mode: str = "luma",
|
|
499
|
+
):
|
|
500
|
+
"""
|
|
501
|
+
Parallel PSF + (optional) star mask + variance map per frame.
|
|
502
|
+
|
|
503
|
+
Changes from the original:
|
|
504
|
+
• Reuses the decoded frame cache (_FRAME_LRU) for FITS/XISF so we never re-decode.
|
|
505
|
+
• Automatically switches to threads for XISF (so memmaps are shared across workers).
|
|
506
|
+
• Builds a single reference star mask (if requested) from the cached frame and
|
|
507
|
+
center-pads/crops it for all frames (no extra I/O).
|
|
508
|
+
• Preserves return order and streams worker logs back to the UI.
|
|
509
|
+
"""
|
|
510
|
+
if save_dir:
|
|
511
|
+
os.makedirs(save_dir, exist_ok=True)
|
|
512
|
+
|
|
513
|
+
n = len(paths)
|
|
514
|
+
|
|
515
|
+
# Resolve target intersection size if caller didn't pass it
|
|
516
|
+
if Ht is None or Wt is None:
|
|
517
|
+
Ht, Wt = _common_hw_from_paths(paths)
|
|
518
|
+
|
|
519
|
+
# Sensible default worker count (cap at 8)
|
|
520
|
+
if max_workers is None:
|
|
521
|
+
try:
|
|
522
|
+
hw = os.cpu_count() or 4
|
|
523
|
+
except Exception:
|
|
524
|
+
hw = 4
|
|
525
|
+
max_workers = max(1, min(8, hw))
|
|
526
|
+
|
|
527
|
+
# Decide executor: for any XISF, prefer threads so the memmap/cache is shared
|
|
528
|
+
any_xisf = any(os.path.splitext(p)[1].lower() == ".xisf" for p in paths)
|
|
529
|
+
use_proc_pool = (not any_xisf) and _USE_PROCESS_POOL_FOR_ASSETS
|
|
530
|
+
Executor = ProcessPoolExecutor if use_proc_pool else ThreadPoolExecutor
|
|
531
|
+
pool_kind = "process" if use_proc_pool else "thread"
|
|
532
|
+
status_cb(f"MFDeconv: measuring PSFs/masks/varmaps with {max_workers} {pool_kind}s…")
|
|
533
|
+
|
|
534
|
+
# ---- helper: pad-or-crop a 2D array to (Ht,Wt), centered ----
|
|
535
|
+
def _center_pad_or_crop_2d(a2d: np.ndarray, Ht: int, Wt: int, fill: float = 1.0) -> np.ndarray:
|
|
536
|
+
a2d = np.asarray(a2d, dtype=np.float32)
|
|
537
|
+
H, W = int(a2d.shape[0]), int(a2d.shape[1])
|
|
538
|
+
# crop first if bigger
|
|
539
|
+
y0 = max(0, (H - Ht) // 2); x0 = max(0, (W - Wt) // 2)
|
|
540
|
+
y1 = min(H, y0 + Ht); x1 = min(W, x0 + Wt)
|
|
541
|
+
cropped = a2d[y0:y1, x0:x1]
|
|
542
|
+
ch, cw = cropped.shape
|
|
543
|
+
if ch == Ht and cw == Wt:
|
|
544
|
+
return np.ascontiguousarray(cropped, dtype=np.float32)
|
|
545
|
+
# pad if smaller
|
|
546
|
+
out = np.full((Ht, Wt), float(fill), dtype=np.float32)
|
|
547
|
+
oy = (Ht - ch) // 2; ox = (Wt - cw) // 2
|
|
548
|
+
out[oy:oy+ch, ox:ox+cw] = cropped
|
|
549
|
+
return out
|
|
550
|
+
|
|
551
|
+
# ---- optional: build one mask from the reference frame and reuse ----
|
|
552
|
+
base_ref_mask = None
|
|
553
|
+
if make_masks and star_mask_ref_path:
|
|
554
|
+
try:
|
|
555
|
+
status_cb(f"Star mask: using reference frame for all masks → {os.path.basename(star_mask_ref_path)}")
|
|
556
|
+
# Pull from the shared frame cache as luma on (Ht,Wt)
|
|
557
|
+
ref_chw = _FRAME_LRU.get(star_mask_ref_path, Ht, Wt, "luma") # (1,H,W) or (H,W)
|
|
558
|
+
L = ref_chw[0] if (ref_chw.ndim == 3) else ref_chw # 2D float32
|
|
559
|
+
|
|
560
|
+
vmc = (varmap_cfg or {})
|
|
561
|
+
sky_map, rms_map, err_scalar = _sep_background_precompute(
|
|
562
|
+
L, bw=int(vmc.get("bw", 64)), bh=int(vmc.get("bh", 64))
|
|
563
|
+
)
|
|
564
|
+
smc = (star_mask_cfg or {})
|
|
565
|
+
base_ref_mask = _star_mask_from_precomputed(
|
|
566
|
+
L, sky_map, err_scalar,
|
|
567
|
+
thresh_sigma = smc.get("thresh_sigma", THRESHOLD_SIGMA),
|
|
568
|
+
max_objs = smc.get("max_objs", STAR_MASK_MAXOBJS),
|
|
569
|
+
grow_px = smc.get("grow_px", GROW_PX),
|
|
570
|
+
ellipse_scale= smc.get("ellipse_scale", ELLIPSE_SCALE),
|
|
571
|
+
soft_sigma = smc.get("soft_sigma", SOFT_SIGMA),
|
|
572
|
+
max_radius_px= smc.get("max_radius_px", MAX_STAR_RADIUS),
|
|
573
|
+
keep_floor = smc.get("keep_floor", KEEP_FLOOR),
|
|
574
|
+
max_side = smc.get("max_side", STAR_MASK_MAXSIDE),
|
|
575
|
+
status_cb = status_cb,
|
|
576
|
+
)
|
|
577
|
+
except Exception as e:
|
|
578
|
+
status_cb(f"⚠️ Star mask (reference) failed: {e}. Falling back to per-frame masks.")
|
|
579
|
+
base_ref_mask = None
|
|
580
|
+
|
|
581
|
+
# for GUI safety, queue logs from workers and flush in the main thread
|
|
582
|
+
log_queue: SimpleQueue = SimpleQueue()
|
|
583
|
+
|
|
584
|
+
def enqueue_logs(lines):
|
|
585
|
+
for s in lines:
|
|
586
|
+
log_queue.put(s)
|
|
587
|
+
|
|
588
|
+
psfs = [None] * n
|
|
589
|
+
masks = ([None] * n) if make_masks else None
|
|
590
|
+
vars_ = ([None] * n) if make_varmaps else None
|
|
591
|
+
make_masks_in_worker = bool(make_masks and (base_ref_mask is None))
|
|
592
|
+
|
|
593
|
+
# --- thread worker: get frame from cache and compute assets ---
|
|
594
|
+
def _compute_one(i: int, path: str):
|
|
595
|
+
# avoid heavy BLAS oversubscription inside each worker
|
|
596
|
+
with threadpool_limits(limits=1):
|
|
597
|
+
# Pull frame from cache honoring color_mode & target (Ht,Wt)
|
|
598
|
+
img_chw = _FRAME_LRU.get(path, Ht, Wt, color_mode) # (C,H,W) float32
|
|
599
|
+
# For PSF/mask/varmap we operate on a 2D plane (luma/mono)
|
|
600
|
+
arr2d = img_chw[0] if (img_chw.ndim == 3) else img_chw # (H,W) float32
|
|
601
|
+
|
|
602
|
+
# Header: synthesize a safe FITS-like header (works for XISF too)
|
|
603
|
+
try:
|
|
604
|
+
hdr = _safe_primary_header(path)
|
|
605
|
+
except Exception:
|
|
606
|
+
hdr = fits.Header()
|
|
607
|
+
|
|
608
|
+
return _compute_frame_assets(
|
|
609
|
+
i, arr2d, hdr,
|
|
610
|
+
make_masks=bool(make_masks_in_worker),
|
|
611
|
+
make_varmaps=bool(make_varmaps),
|
|
612
|
+
star_mask_cfg=star_mask_cfg,
|
|
613
|
+
varmap_cfg=varmap_cfg,
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
# --- submit jobs ---
|
|
617
|
+
with Executor(max_workers=max_workers) as ex:
|
|
618
|
+
futs = []
|
|
619
|
+
for i, p in enumerate(paths, start=1):
|
|
620
|
+
status_cb(f"MFDeconv: measuring PSF {i}/{n} …")
|
|
621
|
+
if use_proc_pool:
|
|
622
|
+
# Process-safe path: worker re-loads inside the subprocess
|
|
623
|
+
futs.append(ex.submit(
|
|
624
|
+
_compute_one_worker,
|
|
625
|
+
(i, p, bool(make_masks_in_worker), bool(make_varmaps), star_mask_cfg, varmap_cfg)
|
|
626
|
+
))
|
|
627
|
+
else:
|
|
628
|
+
# Thread path: hits the shared cache (fast path for XISF/FITS)
|
|
629
|
+
futs.append(ex.submit(_compute_one, i, p))
|
|
630
|
+
|
|
631
|
+
done_cnt = 0
|
|
632
|
+
for fut in as_completed(futs):
|
|
633
|
+
i, psf, m, v, logs = fut.result()
|
|
634
|
+
idx = i - 1
|
|
635
|
+
psfs[idx] = psf
|
|
636
|
+
if masks is not None:
|
|
637
|
+
masks[idx] = m
|
|
638
|
+
if vars_ is not None:
|
|
639
|
+
vars_[idx] = v
|
|
640
|
+
enqueue_logs(logs)
|
|
641
|
+
|
|
642
|
+
done_cnt += 1
|
|
643
|
+
if (done_cnt % 4) == 0 or done_cnt == n:
|
|
644
|
+
while not log_queue.empty():
|
|
645
|
+
try:
|
|
646
|
+
status_cb(log_queue.get_nowait())
|
|
647
|
+
except Exception:
|
|
648
|
+
break
|
|
649
|
+
|
|
650
|
+
# If we built a single reference mask, apply it to every frame (center pad/crop)
|
|
651
|
+
if base_ref_mask is not None and masks is not None:
|
|
652
|
+
for idx in range(n):
|
|
653
|
+
masks[idx] = _center_pad_or_crop_2d(base_ref_mask, int(Ht), int(Wt), fill=1.0)
|
|
654
|
+
|
|
655
|
+
# final flush of any remaining logs
|
|
656
|
+
while not log_queue.empty():
|
|
657
|
+
try:
|
|
658
|
+
status_cb(log_queue.get_nowait())
|
|
659
|
+
except Exception:
|
|
660
|
+
break
|
|
661
|
+
|
|
662
|
+
# save PSFs if requested
|
|
663
|
+
if save_dir:
|
|
664
|
+
for i, k in enumerate(psfs, start=1):
|
|
665
|
+
if k is not None:
|
|
666
|
+
fits.PrimaryHDU(k.astype(np.float32, copy=False)).writeto(
|
|
667
|
+
os.path.join(save_dir, f"psf_{i:03d}.fit"), overwrite=True
|
|
668
|
+
)
|
|
669
|
+
|
|
670
|
+
return psfs, masks, vars_
|
|
671
|
+
|
|
672
|
+
_ALLOWED = re.compile(r"[^A-Za-z0-9_-]+")
|
|
673
|
+
|
|
674
|
+
# known FITS-style multi-extensions (rightmost-first match)
|
|
675
|
+
_KNOWN_EXTS = [
|
|
676
|
+
".fits.fz", ".fit.fz", ".fits.gz", ".fit.gz",
|
|
677
|
+
".fz", ".gz",
|
|
678
|
+
".fits", ".fit"
|
|
679
|
+
]
|
|
680
|
+
|
|
681
|
+
def _sanitize_token(s: str) -> str:
|
|
682
|
+
s = _ALLOWED.sub("_", s)
|
|
683
|
+
s = re.sub(r"_+", "_", s).strip("_")
|
|
684
|
+
return s
|
|
685
|
+
|
|
686
|
+
def _split_known_exts(p: Path) -> tuple[str, str]:
|
|
687
|
+
"""
|
|
688
|
+
Return (name_body, full_ext) where full_ext is a REAL extension block
|
|
689
|
+
(e.g. '.fits.fz'). Any junk like '.0s (1310x880)_MFDeconv' stays in body.
|
|
690
|
+
"""
|
|
691
|
+
name = p.name
|
|
692
|
+
for ext in _KNOWN_EXTS:
|
|
693
|
+
if name.lower().endswith(ext):
|
|
694
|
+
body = name[:-len(ext)]
|
|
695
|
+
return body, ext
|
|
696
|
+
# fallback: single suffix
|
|
697
|
+
return p.stem, "".join(p.suffixes)
|
|
698
|
+
|
|
699
|
+
_SIZE_RE = re.compile(r"\(?\s*(\d{2,5})x(\d{2,5})\s*\)?", re.IGNORECASE)
|
|
700
|
+
_EXP_RE = re.compile(r"(?<![A-Za-z0-9])(\d+(?:\.\d+)?)\s*s\b", re.IGNORECASE)
|
|
701
|
+
_RX_RE = re.compile(r"(?<![A-Za-z0-9])(\d+)x\b", re.IGNORECASE)
|
|
702
|
+
|
|
703
|
+
def _extract_size(body: str) -> str | None:
|
|
704
|
+
m = _SIZE_RE.search(body)
|
|
705
|
+
return f"{m.group(1)}x{m.group(2)}" if m else None
|
|
706
|
+
|
|
707
|
+
def _extract_exposure_secs(body: str) -> str | None:
|
|
708
|
+
m = _EXP_RE.search(body)
|
|
709
|
+
if not m:
|
|
710
|
+
return None
|
|
711
|
+
secs = int(round(float(m.group(1))))
|
|
712
|
+
return f"{secs}s"
|
|
713
|
+
|
|
714
|
+
def _strip_metadata_from_base(body: str) -> str:
|
|
715
|
+
s = body
|
|
716
|
+
|
|
717
|
+
# normalize common separators first
|
|
718
|
+
s = s.replace(" - ", "_")
|
|
719
|
+
|
|
720
|
+
# remove known trailing marker '_MFDeconv'
|
|
721
|
+
s = re.sub(r"(?i)[\s_]+MFDeconv$", "", s)
|
|
722
|
+
|
|
723
|
+
# remove parenthetical copy counters e.g. '(1)'
|
|
724
|
+
s = re.sub(r"\(\s*\d+\s*\)$", "", s)
|
|
725
|
+
|
|
726
|
+
# remove size (with or without parens) anywhere
|
|
727
|
+
s = _SIZE_RE.sub("", s)
|
|
728
|
+
|
|
729
|
+
# remove exposures like '0s', '0.5s', ' 45 s' (even if preceded by a dot)
|
|
730
|
+
s = _EXP_RE.sub("", s)
|
|
731
|
+
|
|
732
|
+
# remove any _#x tokens
|
|
733
|
+
s = _RX_RE.sub("", s)
|
|
734
|
+
|
|
735
|
+
# collapse whitespace/underscores and sanitize
|
|
736
|
+
s = re.sub(r"[\s]+", "_", s)
|
|
737
|
+
s = _sanitize_token(s)
|
|
738
|
+
return s or "output"
|
|
739
|
+
|
|
740
|
+
def _canonical_out_name_prefix(base: str, r: int, size: str | None,
|
|
741
|
+
exposure_secs: str | None, tag: str = "MFDeconv") -> str:
|
|
742
|
+
parts = [_sanitize_token(tag), _sanitize_token(base)]
|
|
743
|
+
if size:
|
|
744
|
+
parts.append(_sanitize_token(size))
|
|
745
|
+
if exposure_secs:
|
|
746
|
+
parts.append(_sanitize_token(exposure_secs))
|
|
747
|
+
if int(max(1, r)) > 1:
|
|
748
|
+
parts.append(f"{int(r)}x")
|
|
749
|
+
return "_".join(parts)
|
|
750
|
+
|
|
751
|
+
def _sr_out_path(out_path: str, r: int) -> Path:
|
|
752
|
+
"""
|
|
753
|
+
Build: MFDeconv_<base>[_<HxW>][_<secs>s][_2x], preserving REAL extensions.
|
|
754
|
+
"""
|
|
755
|
+
p = Path(out_path)
|
|
756
|
+
body, real_ext = _split_known_exts(p)
|
|
757
|
+
|
|
758
|
+
# harvest metadata from the whole body (not Path.stem)
|
|
759
|
+
size = _extract_size(body)
|
|
760
|
+
ex_sec = _extract_exposure_secs(body)
|
|
761
|
+
|
|
762
|
+
# clean base
|
|
763
|
+
base = _strip_metadata_from_base(body)
|
|
764
|
+
|
|
765
|
+
new_stem = _canonical_out_name_prefix(base, r=int(max(1, r)), size=size, exposure_secs=ex_sec, tag="MFDeconv")
|
|
766
|
+
return p.with_name(f"{new_stem}{real_ext}")
|
|
767
|
+
|
|
768
|
+
def _nonclobber_path(path: str) -> str:
|
|
769
|
+
"""
|
|
770
|
+
Version collisions as '_v2', '_v3', ... (no spaces/parentheses).
|
|
771
|
+
"""
|
|
772
|
+
p = Path(path)
|
|
773
|
+
if not p.exists():
|
|
774
|
+
return str(p)
|
|
775
|
+
|
|
776
|
+
# keep the true extension(s)
|
|
777
|
+
body, real_ext = _split_known_exts(p)
|
|
778
|
+
|
|
779
|
+
# if already has _vN, bump it
|
|
780
|
+
m = re.search(r"(.*)_v(\d+)$", body)
|
|
781
|
+
if m:
|
|
782
|
+
base = m.group(1); n = int(m.group(2)) + 1
|
|
783
|
+
else:
|
|
784
|
+
base = body; n = 2
|
|
785
|
+
|
|
786
|
+
while True:
|
|
787
|
+
candidate = p.with_name(f"{base}_v{n}{real_ext}")
|
|
788
|
+
if not candidate.exists():
|
|
789
|
+
return str(candidate)
|
|
790
|
+
n += 1
|
|
791
|
+
|
|
792
|
+
def _iter_folder(basefile: str) -> str:
|
|
793
|
+
d, fname = os.path.split(basefile)
|
|
794
|
+
root, ext = os.path.splitext(fname)
|
|
795
|
+
tgt = os.path.join(d, f"{root}.iters")
|
|
796
|
+
if not os.path.exists(tgt):
|
|
797
|
+
try:
|
|
798
|
+
os.makedirs(tgt, exist_ok=True)
|
|
799
|
+
except Exception:
|
|
800
|
+
# last resort: suffix (n)
|
|
801
|
+
n = 1
|
|
802
|
+
while True:
|
|
803
|
+
cand = os.path.join(d, f"{root}.iters ({n})")
|
|
804
|
+
try:
|
|
805
|
+
os.makedirs(cand, exist_ok=True)
|
|
806
|
+
return cand
|
|
807
|
+
except Exception:
|
|
808
|
+
n += 1
|
|
809
|
+
return tgt
|
|
810
|
+
|
|
811
|
+
def _save_iter_image(arr, hdr_base, folder, tag, color_mode):
|
|
812
|
+
"""
|
|
813
|
+
arr: numpy array (H,W) or (C,H,W) float32
|
|
814
|
+
tag: 'seed' or 'iter_###'
|
|
815
|
+
"""
|
|
816
|
+
if arr.ndim == 3 and arr.shape[0] not in (1, 3) and arr.shape[-1] in (1, 3):
|
|
817
|
+
arr = np.moveaxis(arr, -1, 0)
|
|
818
|
+
if arr.ndim == 3 and arr.shape[0] == 1:
|
|
819
|
+
arr = arr[0]
|
|
820
|
+
|
|
821
|
+
hdr = fits.Header(hdr_base) if isinstance(hdr_base, fits.Header) else fits.Header()
|
|
822
|
+
hdr['MF_PART'] = (str(tag), 'MFDeconv intermediate (seed/iter)')
|
|
823
|
+
hdr['MF_COLOR'] = (str(color_mode), 'Color mode used')
|
|
824
|
+
path = os.path.join(folder, f"{tag}.fit")
|
|
825
|
+
# overwrite allowed inside the dedicated folder
|
|
826
|
+
fits.PrimaryHDU(data=arr.astype(np.float32, copy=False), header=hdr).writeto(path, overwrite=True)
|
|
827
|
+
return path
|
|
828
|
+
|
|
829
|
+
|
|
830
|
+
def _process_gui_events_safely():
|
|
831
|
+
app = QApplication.instance()
|
|
832
|
+
if app and QThread.currentThread() is app.thread():
|
|
833
|
+
app.processEvents()
|
|
834
|
+
|
|
835
|
+
EPS = 1e-6
|
|
836
|
+
|
|
837
|
+
# -----------------------------
|
|
838
|
+
# Helpers: image prep / shapes
|
|
839
|
+
# -----------------------------
|
|
840
|
+
|
|
841
|
+
# new: lightweight loader that yields one frame at a time
|
|
842
|
+
|
|
843
|
+
def _to_luma_local(a: np.ndarray) -> np.ndarray:
|
|
844
|
+
a = np.asarray(a, dtype=np.float32)
|
|
845
|
+
if a.ndim == 2:
|
|
846
|
+
return a
|
|
847
|
+
if a.ndim == 3:
|
|
848
|
+
# mono fast paths
|
|
849
|
+
if a.shape[-1] == 1: # HWC mono
|
|
850
|
+
return a[..., 0].astype(np.float32, copy=False)
|
|
851
|
+
if a.shape[0] == 1: # CHW mono
|
|
852
|
+
return a[0].astype(np.float32, copy=False)
|
|
853
|
+
# RGB
|
|
854
|
+
if a.shape[-1] == 3: # HWC RGB
|
|
855
|
+
r, g, b = a[..., 0], a[..., 1], a[..., 2]
|
|
856
|
+
return (0.2126*r + 0.7152*g + 0.0722*b).astype(np.float32, copy=False)
|
|
857
|
+
if a.shape[0] == 3: # CHW RGB
|
|
858
|
+
r, g, b = a[0], a[1], a[2]
|
|
859
|
+
return (0.2126*r + 0.7152*g + 0.0722*b).astype(np.float32, copy=False)
|
|
860
|
+
# fallback: average last axis
|
|
861
|
+
return a.mean(axis=-1).astype(np.float32, copy=False)
|
|
862
|
+
|
|
863
|
+
def _normalize_layout_single(a, color_mode):
|
|
864
|
+
"""
|
|
865
|
+
Coerce to:
|
|
866
|
+
- 'luma' -> (H, W)
|
|
867
|
+
- 'perchannel' -> (C, H, W); mono stays (1,H,W), RGB → (3,H,W)
|
|
868
|
+
Accepts (H,W), (H,W,3), or (3,H,W).
|
|
869
|
+
"""
|
|
870
|
+
a = np.asarray(a, dtype=np.float32)
|
|
871
|
+
|
|
872
|
+
if color_mode == "luma":
|
|
873
|
+
return _to_luma_local(a) # returns (H,W)
|
|
874
|
+
|
|
875
|
+
# perchannel
|
|
876
|
+
if a.ndim == 2:
|
|
877
|
+
return a[None, ...] # (1,H,W) ← keep mono as 1 channel
|
|
878
|
+
if a.ndim == 3 and a.shape[-1] == 3:
|
|
879
|
+
return np.moveaxis(a, -1, 0) # (3,H,W)
|
|
880
|
+
if a.ndim == 3 and a.shape[0] in (1, 3):
|
|
881
|
+
return a # already (1,H,W) or (3,H,W)
|
|
882
|
+
# fallback: average any weird shape into luma 1×H×W
|
|
883
|
+
l = _to_luma_local(a)
|
|
884
|
+
return l[None, ...]
|
|
885
|
+
|
|
886
|
+
|
|
887
|
+
def _normalize_layout_batch(arrs, color_mode):
|
|
888
|
+
return [_normalize_layout_single(a, color_mode) for a in arrs]
|
|
889
|
+
|
|
890
|
+
def _common_hw(data_list):
|
|
891
|
+
"""Return minimal (H,W) across items; items are (H,W) or (C,H,W)."""
|
|
892
|
+
Hs, Ws = [], []
|
|
893
|
+
for a in data_list:
|
|
894
|
+
if a.ndim == 2:
|
|
895
|
+
H, W = a.shape
|
|
896
|
+
else:
|
|
897
|
+
_, H, W = a.shape
|
|
898
|
+
Hs.append(H); Ws.append(W)
|
|
899
|
+
return int(min(Hs)), int(min(Ws))
|
|
900
|
+
|
|
901
|
+
def _center_crop(arr, Ht, Wt):
|
|
902
|
+
"""Center-crop arr (H,W) or (C,H,W) to (Ht,Wt)."""
|
|
903
|
+
if arr.ndim == 2:
|
|
904
|
+
H, W = arr.shape
|
|
905
|
+
if H == Ht and W == Wt:
|
|
906
|
+
return arr
|
|
907
|
+
y0 = max(0, (H - Ht) // 2)
|
|
908
|
+
x0 = max(0, (W - Wt) // 2)
|
|
909
|
+
return arr[y0:y0+Ht, x0:x0+Wt]
|
|
910
|
+
else:
|
|
911
|
+
C, H, W = arr.shape
|
|
912
|
+
if H == Ht and W == Wt:
|
|
913
|
+
return arr
|
|
914
|
+
y0 = max(0, (H - Ht) // 2)
|
|
915
|
+
x0 = max(0, (W - Wt) // 2)
|
|
916
|
+
return arr[:, y0:y0+Ht, x0:x0+Wt]
|
|
917
|
+
|
|
918
|
+
def _sanitize_numeric(a):
|
|
919
|
+
"""Replace NaN/Inf, clip negatives, make contiguous float32."""
|
|
920
|
+
a = np.nan_to_num(a, nan=0.0, posinf=0.0, neginf=0.0)
|
|
921
|
+
a = np.clip(a, 0.0, None).astype(np.float32, copy=False)
|
|
922
|
+
return np.ascontiguousarray(a)
|
|
923
|
+
|
|
924
|
+
# -----------------------------
|
|
925
|
+
# PSF utilities
|
|
926
|
+
# -----------------------------
|
|
927
|
+
|
|
928
|
+
def _gaussian_psf(fwhm_px: float, ksize: int) -> np.ndarray:
|
|
929
|
+
sigma = max(fwhm_px, 1.0) / 2.3548
|
|
930
|
+
r = (ksize - 1) / 2
|
|
931
|
+
y, x = np.mgrid[-r:r+1, -r:r+1]
|
|
932
|
+
g = np.exp(-(x*x + y*y) / (2*sigma*sigma))
|
|
933
|
+
g /= (np.sum(g) + EPS)
|
|
934
|
+
return g.astype(np.float32, copy=False)
|
|
935
|
+
|
|
936
|
+
def _estimate_fwhm_from_header(hdr) -> float:
|
|
937
|
+
for key in ("FWHM", "FWHM_PIX", "PSF_FWHM"):
|
|
938
|
+
if key in hdr:
|
|
939
|
+
try:
|
|
940
|
+
val = float(hdr[key])
|
|
941
|
+
if np.isfinite(val) and val > 0:
|
|
942
|
+
return val
|
|
943
|
+
except Exception:
|
|
944
|
+
pass
|
|
945
|
+
return float("nan")
|
|
946
|
+
|
|
947
|
+
def _estimate_fwhm_from_image(arr) -> float:
|
|
948
|
+
"""Fast FWHM estimate from SEP 'a','b' parameters (≈ sigma in px)."""
|
|
949
|
+
if sep is None:
|
|
950
|
+
return float("nan")
|
|
951
|
+
try:
|
|
952
|
+
img = _contig(_to_luma_local(arr)) # ← ensure C-contig float32
|
|
953
|
+
bkg = sep.Background(img)
|
|
954
|
+
data = _contig(img - bkg.back()) # ← ensure data is C-contig
|
|
955
|
+
try:
|
|
956
|
+
err = bkg.globalrms
|
|
957
|
+
except Exception:
|
|
958
|
+
err = float(np.median(bkg.rms()))
|
|
959
|
+
sources = sep.extract(data, 6.0, err=err)
|
|
960
|
+
if sources is None or len(sources) == 0:
|
|
961
|
+
return float("nan")
|
|
962
|
+
a = np.asarray(sources["a"], dtype=np.float32)
|
|
963
|
+
b = np.asarray(sources["b"], dtype=np.float32)
|
|
964
|
+
ab = (a + b) * 0.5
|
|
965
|
+
sigma = float(np.median(ab[np.isfinite(ab) & (ab > 0)]))
|
|
966
|
+
if not np.isfinite(sigma) or sigma <= 0:
|
|
967
|
+
return float("nan")
|
|
968
|
+
return 2.3548 * sigma
|
|
969
|
+
except Exception:
|
|
970
|
+
return float("nan")
|
|
971
|
+
|
|
972
|
+
def _auto_ksize_from_fwhm(fwhm_px: float, kmin: int = 11, kmax: int = 51) -> int:
|
|
973
|
+
"""
|
|
974
|
+
Choose odd kernel size to cover about ±4σ.
|
|
975
|
+
"""
|
|
976
|
+
sigma = max(fwhm_px, 1.0) / 2.3548
|
|
977
|
+
r = int(math.ceil(4.0 * sigma))
|
|
978
|
+
k = 2 * r + 1
|
|
979
|
+
k = max(kmin, min(k, kmax))
|
|
980
|
+
if (k % 2) == 0:
|
|
981
|
+
k += 1
|
|
982
|
+
return k
|
|
983
|
+
|
|
984
|
+
def _flip_kernel(psf):
|
|
985
|
+
# PyTorch dislikes negative strides; make it contiguous.
|
|
986
|
+
return np.flip(np.flip(psf, -1), -2).copy()
|
|
987
|
+
|
|
988
|
+
def _conv_same_np(img, psf):
|
|
989
|
+
# img: (H,W) or (C,H,W) numpy
|
|
990
|
+
import numpy.fft as fft
|
|
991
|
+
def fftconv2(a, k):
|
|
992
|
+
H, W = a.shape[-2:]
|
|
993
|
+
kh, kw = k.shape
|
|
994
|
+
pad_h, pad_w = H + kh - 1, W + kw - 1
|
|
995
|
+
A = fft.rfftn(a, s=(pad_h, pad_w), axes=(-2, -1))
|
|
996
|
+
K = fft.rfftn(k, s=(pad_h, pad_w), axes=(-2, -1))
|
|
997
|
+
Y = A * K
|
|
998
|
+
y = fft.irfftn(Y, s=(pad_h, pad_w), axes=(-2, -1))
|
|
999
|
+
sh, sw = (kh - 1)//2, (kw - 1)//2
|
|
1000
|
+
return y[..., sh:sh+H, sw:sw+W]
|
|
1001
|
+
if img.ndim == 2:
|
|
1002
|
+
return fftconv2(img[None], psf)[0]
|
|
1003
|
+
else:
|
|
1004
|
+
return np.stack([fftconv2(img[c:c+1], psf)[0] for c in range(img.shape[0])], axis=0)
|
|
1005
|
+
|
|
1006
|
+
def _normalize_psf(psf):
|
|
1007
|
+
psf = np.maximum(psf, 0.0).astype(np.float32, copy=False)
|
|
1008
|
+
s = float(psf.sum())
|
|
1009
|
+
if not np.isfinite(s) or s <= EPS:
|
|
1010
|
+
return psf
|
|
1011
|
+
return (psf / s).astype(np.float32, copy=False)
|
|
1012
|
+
|
|
1013
|
+
def _soften_psf(psf, sigma_px=0.25):
|
|
1014
|
+
# optional tiny Gaussian soften to reduce ringing; sigma<=0 disables
|
|
1015
|
+
if sigma_px <= 0:
|
|
1016
|
+
return psf
|
|
1017
|
+
r = int(max(1, round(3 * sigma_px)))
|
|
1018
|
+
y, x = np.mgrid[-r:r+1, -r:r+1]
|
|
1019
|
+
g = np.exp(-(x*x + y*y) / (2 * sigma_px * sigma_px)).astype(np.float32)
|
|
1020
|
+
g /= g.sum() + EPS
|
|
1021
|
+
return _conv_same_np(psf[None], g)[0]
|
|
1022
|
+
|
|
1023
|
+
def _psf_fwhm_px(psf: np.ndarray) -> float:
|
|
1024
|
+
"""Approximate FWHM (pixels) from second moments of a normalized kernel."""
|
|
1025
|
+
psf = np.maximum(psf, 0).astype(np.float32, copy=False)
|
|
1026
|
+
s = float(psf.sum())
|
|
1027
|
+
if s <= EPS:
|
|
1028
|
+
return float("nan")
|
|
1029
|
+
k = psf.shape[0]
|
|
1030
|
+
y, x = np.mgrid[:k, :k].astype(np.float32)
|
|
1031
|
+
cy = float((psf * y).sum() / s)
|
|
1032
|
+
cx = float((psf * x).sum() / s)
|
|
1033
|
+
var_y = float((psf * (y - cy) ** 2).sum() / s)
|
|
1034
|
+
var_x = float((psf * (x - cx) ** 2).sum() / s)
|
|
1035
|
+
sigma = math.sqrt(max(0.0, 0.5 * (var_x + var_y)))
|
|
1036
|
+
return 2.3548 * sigma # FWHM≈2.355σ
|
|
1037
|
+
|
|
1038
|
+
STAR_MASK_MAXSIDE = 2048
|
|
1039
|
+
STAR_MASK_MAXOBJS = 2000 # cap number of objects
|
|
1040
|
+
VARMAP_SAMPLE_STRIDE = 8 # (kept for compat; currently unused internally)
|
|
1041
|
+
THRESHOLD_SIGMA = 2.0
|
|
1042
|
+
KEEP_FLOOR = 0.20
|
|
1043
|
+
GROW_PX = 8
|
|
1044
|
+
MAX_STAR_RADIUS = 16
|
|
1045
|
+
SOFT_SIGMA = 2.0
|
|
1046
|
+
ELLIPSE_SCALE = 1.2
|
|
1047
|
+
|
|
1048
|
+
def _sep_background_precompute(img_2d: np.ndarray, bw: int = 64, bh: int = 64):
|
|
1049
|
+
"""
|
|
1050
|
+
One-time SEP background build; returns (sky_map, rms_map, err_scalar).
|
|
1051
|
+
|
|
1052
|
+
Guarantees:
|
|
1053
|
+
- Always returns a 3-tuple (sky, rms, err)
|
|
1054
|
+
- sky/rms are float32 and same shape as img_2d
|
|
1055
|
+
- Robust to sep missing, sep errors, NaNs/Infs, and tiny frames
|
|
1056
|
+
"""
|
|
1057
|
+
a = np.asarray(img_2d, dtype=np.float32)
|
|
1058
|
+
if a.ndim != 2:
|
|
1059
|
+
# be strict; callers expect 2D
|
|
1060
|
+
raise ValueError(f"_sep_background_precompute expects 2D, got shape={a.shape}")
|
|
1061
|
+
|
|
1062
|
+
H, W = int(a.shape[0]), int(a.shape[1])
|
|
1063
|
+
if H == 0 or W == 0:
|
|
1064
|
+
# should never happen, but don't return empty tuple
|
|
1065
|
+
sky = np.zeros((H, W), dtype=np.float32)
|
|
1066
|
+
rms = np.ones((H, W), dtype=np.float32)
|
|
1067
|
+
return sky, rms, 1.0
|
|
1068
|
+
|
|
1069
|
+
# --- robust fallback builder (works for any input) ---
|
|
1070
|
+
def _fallback():
|
|
1071
|
+
# Use finite-only stats if possible
|
|
1072
|
+
finite = np.isfinite(a)
|
|
1073
|
+
if finite.any():
|
|
1074
|
+
vals = a[finite]
|
|
1075
|
+
med = float(np.median(vals))
|
|
1076
|
+
mad = float(np.median(np.abs(vals - med))) + 1e-6
|
|
1077
|
+
else:
|
|
1078
|
+
med = 0.0
|
|
1079
|
+
mad = 1.0
|
|
1080
|
+
sky = np.full((H, W), med, dtype=np.float32)
|
|
1081
|
+
rms = np.full((H, W), 1.4826 * mad, dtype=np.float32)
|
|
1082
|
+
err = float(np.median(rms))
|
|
1083
|
+
return sky, rms, err
|
|
1084
|
+
|
|
1085
|
+
# If sep isn't available, always fallback
|
|
1086
|
+
if sep is None:
|
|
1087
|
+
return _fallback()
|
|
1088
|
+
|
|
1089
|
+
# SEP is present: sanitize input and clamp tile sizes
|
|
1090
|
+
# sep can choke on NaNs/Infs
|
|
1091
|
+
if not np.isfinite(a).all():
|
|
1092
|
+
# replace non-finite with median of finite values (or 0)
|
|
1093
|
+
finite = np.isfinite(a)
|
|
1094
|
+
fill = float(np.median(a[finite])) if finite.any() else 0.0
|
|
1095
|
+
a = np.where(finite, a, fill).astype(np.float32, copy=False)
|
|
1096
|
+
|
|
1097
|
+
a = np.ascontiguousarray(a, dtype=np.float32)
|
|
1098
|
+
|
|
1099
|
+
# Clamp bw/bh to image size; SEP doesn't like bw/bh > dims
|
|
1100
|
+
bw = int(max(8, min(int(bw), W)))
|
|
1101
|
+
bh = int(max(8, min(int(bh), H)))
|
|
1102
|
+
|
|
1103
|
+
try:
|
|
1104
|
+
b = sep.Background(a, bw=bw, bh=bh, fw=3, fh=3)
|
|
1105
|
+
|
|
1106
|
+
sky = np.asarray(b.back(), dtype=np.float32)
|
|
1107
|
+
rms = np.asarray(b.rms(), dtype=np.float32)
|
|
1108
|
+
|
|
1109
|
+
# Ensure shape sanity (SEP should match, but be paranoid)
|
|
1110
|
+
if sky.shape != a.shape or rms.shape != a.shape:
|
|
1111
|
+
return _fallback()
|
|
1112
|
+
|
|
1113
|
+
# globalrms sometimes isn't available depending on SEP build
|
|
1114
|
+
err = float(getattr(b, "globalrms", np.nan))
|
|
1115
|
+
if not np.isfinite(err) or err <= 0:
|
|
1116
|
+
# robust scalar: median rms
|
|
1117
|
+
err = float(np.median(rms)) if rms.size else 1.0
|
|
1118
|
+
|
|
1119
|
+
return sky, rms, err
|
|
1120
|
+
|
|
1121
|
+
except Exception:
|
|
1122
|
+
# If SEP blows up for any reason, degrade gracefully
|
|
1123
|
+
return _fallback()
|
|
1124
|
+
|
|
1125
|
+
|
|
1126
|
+
|
|
1127
|
+
def _star_mask_from_precomputed(
|
|
1128
|
+
img_2d: np.ndarray,
|
|
1129
|
+
sky_map: np.ndarray,
|
|
1130
|
+
err_scalar: float,
|
|
1131
|
+
*,
|
|
1132
|
+
thresh_sigma: float,
|
|
1133
|
+
max_objs: int,
|
|
1134
|
+
grow_px: int,
|
|
1135
|
+
ellipse_scale: float,
|
|
1136
|
+
soft_sigma: float,
|
|
1137
|
+
max_radius_px: int,
|
|
1138
|
+
keep_floor: float,
|
|
1139
|
+
max_side: int,
|
|
1140
|
+
status_cb=lambda s: None
|
|
1141
|
+
) -> np.ndarray:
|
|
1142
|
+
"""
|
|
1143
|
+
Build a KEEP weight map using a *downscaled detection / full-res draw* path.
|
|
1144
|
+
**Never writes to img_2d**; all drawing happens in a fresh `mask_u8`.
|
|
1145
|
+
"""
|
|
1146
|
+
# Optional OpenCV fast path
|
|
1147
|
+
try:
|
|
1148
|
+
import cv2 as _cv2
|
|
1149
|
+
_HAS_CV2 = True
|
|
1150
|
+
except Exception:
|
|
1151
|
+
_HAS_CV2 = False
|
|
1152
|
+
_cv2 = None # type: ignore
|
|
1153
|
+
|
|
1154
|
+
H, W = map(int, img_2d.shape)
|
|
1155
|
+
|
|
1156
|
+
# Residual for detection (contiguous, separate buffer)
|
|
1157
|
+
data_sub = np.ascontiguousarray((img_2d - sky_map).astype(np.float32))
|
|
1158
|
+
|
|
1159
|
+
# Downscale *detection only* to speed up, never the draw step
|
|
1160
|
+
det = data_sub
|
|
1161
|
+
scale = 1.0
|
|
1162
|
+
if max_side and max(H, W) > int(max_side):
|
|
1163
|
+
scale = float(max(H, W)) / float(max_side)
|
|
1164
|
+
if _HAS_CV2:
|
|
1165
|
+
det = _cv2.resize(
|
|
1166
|
+
det,
|
|
1167
|
+
(max(1, int(round(W / scale))), max(1, int(round(H / scale)))),
|
|
1168
|
+
interpolation=_cv2.INTER_AREA
|
|
1169
|
+
)
|
|
1170
|
+
else:
|
|
1171
|
+
s = int(max(1, round(scale)))
|
|
1172
|
+
det = det[:(H // s) * s, :(W // s) * s].reshape(H // s, s, W // s, s).mean(axis=(1, 3))
|
|
1173
|
+
scale = float(s)
|
|
1174
|
+
|
|
1175
|
+
# Threshold ladder
|
|
1176
|
+
thresholds = [thresh_sigma, thresh_sigma*2, thresh_sigma*4,
|
|
1177
|
+
thresh_sigma*8, thresh_sigma*16]
|
|
1178
|
+
objs = None; used = float("nan"); raw = 0
|
|
1179
|
+
for t in thresholds:
|
|
1180
|
+
cand = sep.extract(det, thresh=float(t), err=float(err_scalar))
|
|
1181
|
+
n = 0 if cand is None else len(cand)
|
|
1182
|
+
if n == 0: continue
|
|
1183
|
+
if n > max_objs*12: continue
|
|
1184
|
+
objs, raw, used = cand, n, float(t)
|
|
1185
|
+
break
|
|
1186
|
+
|
|
1187
|
+
if objs is None or len(objs) == 0:
|
|
1188
|
+
try:
|
|
1189
|
+
cand = sep.extract(det, thresh=thresholds[-1], err=float(err_scalar), minarea=9)
|
|
1190
|
+
except Exception:
|
|
1191
|
+
cand = None
|
|
1192
|
+
if cand is None or len(cand) == 0:
|
|
1193
|
+
status_cb("Star mask: no sources found (mask disabled for this frame).")
|
|
1194
|
+
return np.ones((H, W), dtype=np.float32, order="C")
|
|
1195
|
+
objs, raw, used = cand, len(cand), float(thresholds[-1])
|
|
1196
|
+
|
|
1197
|
+
# Brightest max_objs
|
|
1198
|
+
if "flux" in objs.dtype.names:
|
|
1199
|
+
idx = np.argsort(objs["flux"])[-int(max_objs):]
|
|
1200
|
+
objs = objs[idx]
|
|
1201
|
+
else:
|
|
1202
|
+
objs = objs[:int(max_objs)]
|
|
1203
|
+
kept = len(objs)
|
|
1204
|
+
|
|
1205
|
+
# ---- draw back on full-res into a brand-new buffer ----
|
|
1206
|
+
mask_u8 = np.zeros((H, W), dtype=np.uint8, order="C")
|
|
1207
|
+
s_back = float(scale)
|
|
1208
|
+
MR = int(max(1, max_radius_px))
|
|
1209
|
+
G = int(max(0, grow_px))
|
|
1210
|
+
ES = float(max(0.1, ellipse_scale))
|
|
1211
|
+
|
|
1212
|
+
drawn = 0
|
|
1213
|
+
if _HAS_CV2:
|
|
1214
|
+
for o in objs:
|
|
1215
|
+
x = int(round(float(o["x"]) * s_back))
|
|
1216
|
+
y = int(round(float(o["y"]) * s_back))
|
|
1217
|
+
if not (0 <= x < W and 0 <= y < H):
|
|
1218
|
+
continue
|
|
1219
|
+
a = float(o["a"]) * s_back
|
|
1220
|
+
b = float(o["b"]) * s_back
|
|
1221
|
+
r = int(math.ceil(ES * max(a, b)))
|
|
1222
|
+
r = min(max(r, 0) + G, MR)
|
|
1223
|
+
if r <= 0:
|
|
1224
|
+
continue
|
|
1225
|
+
_cv2.circle(mask_u8, (x, y), r, 1, thickness=-1, lineType=_cv2.LINE_8)
|
|
1226
|
+
drawn += 1
|
|
1227
|
+
else:
|
|
1228
|
+
for o in objs:
|
|
1229
|
+
x = int(round(float(o["x"]) * s_back))
|
|
1230
|
+
y = int(round(float(o["y"]) * s_back))
|
|
1231
|
+
if not (0 <= x < W and 0 <= y < H):
|
|
1232
|
+
continue
|
|
1233
|
+
a = float(o["a"]) * s_back
|
|
1234
|
+
b = float(o["b"]) * s_back
|
|
1235
|
+
r = int(math.ceil(ES * max(a, b)))
|
|
1236
|
+
r = min(max(r, 0) + G, MR)
|
|
1237
|
+
if r <= 0:
|
|
1238
|
+
continue
|
|
1239
|
+
y0 = max(0, y - r); y1 = min(H, y + r + 1)
|
|
1240
|
+
x0 = max(0, x - r); x1 = min(W, x + r + 1)
|
|
1241
|
+
yy, xx = np.ogrid[y0:y1, x0:x1]
|
|
1242
|
+
disk = (yy - y)*(yy - y) + (xx - x)*(xx - x) <= r*r
|
|
1243
|
+
mask_u8[y0:y1, x0:x1][disk] = 1
|
|
1244
|
+
drawn += 1
|
|
1245
|
+
|
|
1246
|
+
# Feather + convert to keep weights
|
|
1247
|
+
m = mask_u8.astype(np.float32, copy=False)
|
|
1248
|
+
if soft_sigma > 0:
|
|
1249
|
+
try:
|
|
1250
|
+
if _HAS_CV2:
|
|
1251
|
+
k = int(max(1, int(round(3*soft_sigma)))*2 + 1)
|
|
1252
|
+
m = _cv2.GaussianBlur(m, (k, k), float(soft_sigma),
|
|
1253
|
+
borderType=_cv2.BORDER_REFLECT)
|
|
1254
|
+
else:
|
|
1255
|
+
from scipy.ndimage import gaussian_filter
|
|
1256
|
+
m = gaussian_filter(m, sigma=float(soft_sigma), mode="reflect")
|
|
1257
|
+
except Exception:
|
|
1258
|
+
pass
|
|
1259
|
+
np.clip(m, 0.0, 1.0, out=m)
|
|
1260
|
+
|
|
1261
|
+
keep = 1.0 - m
|
|
1262
|
+
kf = float(max(0.0, min(0.99, keep_floor)))
|
|
1263
|
+
keep = kf + (1.0 - kf) * keep
|
|
1264
|
+
np.clip(keep, 0.0, 1.0, out=keep)
|
|
1265
|
+
|
|
1266
|
+
status_cb(f"Star mask: thresh={used:.3g} | detected={raw} | kept={kept} | drawn={drawn} | keep_floor={keep_floor}")
|
|
1267
|
+
return np.ascontiguousarray(keep, dtype=np.float32)
|
|
1268
|
+
|
|
1269
|
+
|
|
1270
|
+
def _variance_map_from_precomputed(
|
|
1271
|
+
img_2d: np.ndarray,
|
|
1272
|
+
sky_map: np.ndarray,
|
|
1273
|
+
rms_map: np.ndarray,
|
|
1274
|
+
hdr,
|
|
1275
|
+
*,
|
|
1276
|
+
smooth_sigma: float,
|
|
1277
|
+
floor: float,
|
|
1278
|
+
status_cb=lambda s: None
|
|
1279
|
+
) -> np.ndarray:
|
|
1280
|
+
img = np.clip(np.asarray(img_2d, dtype=np.float32), 0.0, None)
|
|
1281
|
+
var_bg_dn2 = np.maximum(rms_map, 1e-6) ** 2
|
|
1282
|
+
obj_dn = np.clip(img - sky_map, 0.0, None)
|
|
1283
|
+
|
|
1284
|
+
gain = None
|
|
1285
|
+
for k in ("EGAIN", "GAIN", "GAIN1", "GAIN2"):
|
|
1286
|
+
if k in hdr:
|
|
1287
|
+
try:
|
|
1288
|
+
g = float(hdr[k]); gain = g if (np.isfinite(g) and g > 0) else None
|
|
1289
|
+
if gain is not None: break
|
|
1290
|
+
except Exception as e:
|
|
1291
|
+
import logging
|
|
1292
|
+
logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
|
|
1293
|
+
|
|
1294
|
+
if gain is not None:
|
|
1295
|
+
a_shot = 1.0 / gain
|
|
1296
|
+
else:
|
|
1297
|
+
sky_med = float(np.median(sky_map))
|
|
1298
|
+
varbg_med= float(np.median(var_bg_dn2))
|
|
1299
|
+
a_shot = (varbg_med / sky_med) if sky_med > 1e-6 else 0.0
|
|
1300
|
+
a_shot = float(np.clip(a_shot, 0.0, 10.0))
|
|
1301
|
+
|
|
1302
|
+
v = var_bg_dn2 + a_shot * obj_dn
|
|
1303
|
+
if smooth_sigma > 0:
|
|
1304
|
+
try:
|
|
1305
|
+
import cv2 as _cv2
|
|
1306
|
+
k = int(max(1, int(round(3*smooth_sigma)))*2 + 1)
|
|
1307
|
+
v = _cv2.GaussianBlur(v, (k,k), float(smooth_sigma), borderType=_cv2.BORDER_REFLECT)
|
|
1308
|
+
except Exception:
|
|
1309
|
+
try:
|
|
1310
|
+
from scipy.ndimage import gaussian_filter
|
|
1311
|
+
v = gaussian_filter(v, sigma=float(smooth_sigma), mode="reflect")
|
|
1312
|
+
except Exception:
|
|
1313
|
+
pass
|
|
1314
|
+
|
|
1315
|
+
np.clip(v, float(floor), None, out=v)
|
|
1316
|
+
try:
|
|
1317
|
+
rms_med = float(np.median(np.sqrt(var_bg_dn2)))
|
|
1318
|
+
status_cb(f"Variance map: sky_med={float(np.median(sky_map)):.3g} DN | rms_med={rms_med:.3g} DN | smooth_sigma={smooth_sigma} | floor={floor}")
|
|
1319
|
+
except Exception:
|
|
1320
|
+
pass
|
|
1321
|
+
return v.astype(np.float32, copy=False)
|
|
1322
|
+
|
|
1323
|
+
|
|
1324
|
+
|
|
1325
|
+
# -----------------------------
|
|
1326
|
+
# Robust weighting (Huber)
|
|
1327
|
+
# -----------------------------
|
|
1328
|
+
|
|
1329
|
+
def _estimate_scalar_variance_t(r):
|
|
1330
|
+
# r: tensor on device
|
|
1331
|
+
med = torch.median(r)
|
|
1332
|
+
mad = torch.median(torch.abs(r - med)) + 1e-6
|
|
1333
|
+
return (1.4826 * mad) ** 2
|
|
1334
|
+
|
|
1335
|
+
def _estimate_scalar_variance(a):
|
|
1336
|
+
med = np.median(a)
|
|
1337
|
+
mad = np.median(np.abs(a - med)) + 1e-6
|
|
1338
|
+
return float((1.4826 * mad) ** 2)
|
|
1339
|
+
|
|
1340
|
+
def _weight_map(y, pred, huber_delta, var_map=None, mask=None):
|
|
1341
|
+
"""
|
|
1342
|
+
Robust per-pixel weights for the MM update.
|
|
1343
|
+
W = [psi(r)/r] * 1/(var + eps) * mask
|
|
1344
|
+
If huber_delta < 0, delta = (-huber_delta) * RMS(residual) (auto).
|
|
1345
|
+
var_map: per-pixel variance (2D); if None, fall back to robust scalar via MAD.
|
|
1346
|
+
mask: 2D {0,1} validity; if None, treat as ones.
|
|
1347
|
+
"""
|
|
1348
|
+
r = y - pred
|
|
1349
|
+
eps = EPS
|
|
1350
|
+
|
|
1351
|
+
# resolve Huber delta
|
|
1352
|
+
if huber_delta < 0:
|
|
1353
|
+
if TORCH_OK and isinstance(r, torch.Tensor):
|
|
1354
|
+
med = torch.median(r)
|
|
1355
|
+
mad = torch.median(torch.abs(r - med)) + 1e-6
|
|
1356
|
+
rms = 1.4826 * mad
|
|
1357
|
+
delta = (-huber_delta) * torch.clamp(rms, min=1e-6)
|
|
1358
|
+
else:
|
|
1359
|
+
med = np.median(r)
|
|
1360
|
+
mad = np.median(np.abs(r - med)) + 1e-6
|
|
1361
|
+
rms = 1.4826 * mad
|
|
1362
|
+
delta = (-huber_delta) * max(rms, 1e-6)
|
|
1363
|
+
else:
|
|
1364
|
+
delta = huber_delta
|
|
1365
|
+
|
|
1366
|
+
# psi(r)/r
|
|
1367
|
+
if TORCH_OK and isinstance(r, torch.Tensor):
|
|
1368
|
+
absr = torch.abs(r)
|
|
1369
|
+
if float(delta) > 0:
|
|
1370
|
+
psi_over_r = torch.where(absr <= delta, torch.ones_like(r), delta / (absr + eps))
|
|
1371
|
+
else:
|
|
1372
|
+
psi_over_r = torch.ones_like(r)
|
|
1373
|
+
if var_map is None:
|
|
1374
|
+
v = _estimate_scalar_variance_t(r)
|
|
1375
|
+
else:
|
|
1376
|
+
v = var_map
|
|
1377
|
+
if v.ndim == 2 and r.ndim == 3:
|
|
1378
|
+
v = v[None, ...] # broadcast over channels
|
|
1379
|
+
w = psi_over_r / (v + eps)
|
|
1380
|
+
if mask is not None:
|
|
1381
|
+
m = mask if mask.ndim == w.ndim else (mask[None, ...] if w.ndim == 3 else mask)
|
|
1382
|
+
w = w * m
|
|
1383
|
+
return w
|
|
1384
|
+
else:
|
|
1385
|
+
absr = np.abs(r)
|
|
1386
|
+
if float(delta) > 0:
|
|
1387
|
+
psi_over_r = np.where(absr <= delta, 1.0, delta / (absr + eps)).astype(np.float32)
|
|
1388
|
+
else:
|
|
1389
|
+
psi_over_r = np.ones_like(r, dtype=np.float32)
|
|
1390
|
+
if var_map is None:
|
|
1391
|
+
v = _estimate_scalar_variance(r)
|
|
1392
|
+
else:
|
|
1393
|
+
v = var_map
|
|
1394
|
+
if v.ndim == 2 and r.ndim == 3:
|
|
1395
|
+
v = v[None, ...]
|
|
1396
|
+
w = psi_over_r / (v + eps)
|
|
1397
|
+
if mask is not None:
|
|
1398
|
+
m = mask if mask.ndim == w.ndim else (mask[None, ...] if w.ndim == 3 else mask)
|
|
1399
|
+
w = w * m
|
|
1400
|
+
return w
|
|
1401
|
+
|
|
1402
|
+
|
|
1403
|
+
# -----------------------------
|
|
1404
|
+
# Torch / conv
|
|
1405
|
+
# -----------------------------
|
|
1406
|
+
|
|
1407
|
+
def _fftshape_same(H, W, kh, kw):
|
|
1408
|
+
return H + kh - 1, W + kw - 1
|
|
1409
|
+
|
|
1410
|
+
# ---------- Torch FFT helpers (FIXED: carry padH/padW) ----------
|
|
1411
|
+
def _precompute_torch_psf_ffts(psfs, flip_psf, H, W, device, dtype):
|
|
1412
|
+
tfft = torch.fft
|
|
1413
|
+
psf_fft, psfT_fft = [], []
|
|
1414
|
+
for k, kT in zip(psfs, flip_psf):
|
|
1415
|
+
kh, kw = k.shape
|
|
1416
|
+
padH, padW = _fftshape_same(H, W, kh, kw)
|
|
1417
|
+
|
|
1418
|
+
# shift the small kernels to the origin, then FFT into padded size
|
|
1419
|
+
k_small = torch.as_tensor(np.fft.ifftshift(k), device=device, dtype=dtype)
|
|
1420
|
+
kT_small = torch.as_tensor(np.fft.ifftshift(kT), device=device, dtype=dtype)
|
|
1421
|
+
|
|
1422
|
+
Kf = tfft.rfftn(k_small, s=(padH, padW))
|
|
1423
|
+
KTf = tfft.rfftn(kT_small, s=(padH, padW))
|
|
1424
|
+
|
|
1425
|
+
psf_fft.append((Kf, padH, padW, kh, kw))
|
|
1426
|
+
psfT_fft.append((KTf, padH, padW, kh, kw))
|
|
1427
|
+
return psf_fft, psfT_fft
|
|
1428
|
+
|
|
1429
|
+
|
|
1430
|
+
|
|
1431
|
+
# ---------- NumPy FFT helpers ----------
|
|
1432
|
+
def _precompute_np_psf_ffts(psfs, flip_psf, H, W):
|
|
1433
|
+
import numpy.fft as fft
|
|
1434
|
+
meta, Kfs, KTfs = [], [], []
|
|
1435
|
+
for k, kT in zip(psfs, flip_psf):
|
|
1436
|
+
kh, kw = k.shape
|
|
1437
|
+
fftH, fftW = _fftshape_same(H, W, kh, kw)
|
|
1438
|
+
Kfs.append( fft.rfftn(np.fft.ifftshift(k), s=(fftH, fftW)) )
|
|
1439
|
+
KTfs.append(fft.rfftn(np.fft.ifftshift(kT), s=(fftH, fftW)) )
|
|
1440
|
+
meta.append((kh, kw, fftH, fftW))
|
|
1441
|
+
return Kfs, KTfs, meta
|
|
1442
|
+
|
|
1443
|
+
def _fft_conv_same_np(a, Kf, kh, kw, fftH, fftW, out):
|
|
1444
|
+
import numpy.fft as fft
|
|
1445
|
+
if a.ndim == 2:
|
|
1446
|
+
A = fft.rfftn(a, s=(fftH, fftW))
|
|
1447
|
+
y = fft.irfftn(A * Kf, s=(fftH, fftW))
|
|
1448
|
+
sh, sw = kh // 2, kw // 2
|
|
1449
|
+
out[...] = y[sh:sh+a.shape[0], sw:sw+a.shape[1]]
|
|
1450
|
+
return out
|
|
1451
|
+
else:
|
|
1452
|
+
C, H, W = a.shape
|
|
1453
|
+
acc = []
|
|
1454
|
+
for c in range(C):
|
|
1455
|
+
A = fft.rfftn(a[c], s=(fftH, fftW))
|
|
1456
|
+
y = fft.irfftn(A * Kf, s=(fftH, fftW))
|
|
1457
|
+
sh, sw = kh // 2, kw // 2
|
|
1458
|
+
acc.append(y[sh:sh+H, sw:sw+W])
|
|
1459
|
+
out[...] = np.stack(acc, 0)
|
|
1460
|
+
return out
|
|
1461
|
+
|
|
1462
|
+
|
|
1463
|
+
|
|
1464
|
+
def _torch_device():
|
|
1465
|
+
if TORCH_OK and (torch is not None):
|
|
1466
|
+
if hasattr(torch, "cuda") and torch.cuda.is_available():
|
|
1467
|
+
return torch.device("cuda")
|
|
1468
|
+
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
|
1469
|
+
return torch.device("mps")
|
|
1470
|
+
# DirectML: we passed dml_device from outer scope; keep a module-global
|
|
1471
|
+
if globals().get("dml_ok", False) and globals().get("dml_device", None) is not None:
|
|
1472
|
+
return globals()["dml_device"]
|
|
1473
|
+
return torch.device("cpu")
|
|
1474
|
+
|
|
1475
|
+
def _to_t(x: np.ndarray):
|
|
1476
|
+
if not (TORCH_OK and (torch is not None)):
|
|
1477
|
+
raise RuntimeError("Torch path requested but torch is unavailable")
|
|
1478
|
+
device = _torch_device()
|
|
1479
|
+
t = torch.from_numpy(x)
|
|
1480
|
+
# DirectML wants explicit .to(device)
|
|
1481
|
+
return t.to(device, non_blocking=True) if str(device) != "cpu" else t
|
|
1482
|
+
|
|
1483
|
+
def _contig(x):
|
|
1484
|
+
return np.ascontiguousarray(x, dtype=np.float32)
|
|
1485
|
+
|
|
1486
|
+
def _conv_same_torch(img_t, psf_t):
|
|
1487
|
+
"""
|
|
1488
|
+
img_t: torch tensor on DEVICE, (H,W) or (C,H,W)
|
|
1489
|
+
psf_t: torch tensor on DEVICE, (1,1,kh,kw) (single kernel)
|
|
1490
|
+
Pads with 'reflect' to avoid zero-padding ringing.
|
|
1491
|
+
"""
|
|
1492
|
+
kh, kw = psf_t.shape[-2:]
|
|
1493
|
+
pad = (kw // 2, kw - kw // 2 - 1, # left, right
|
|
1494
|
+
kh // 2, kh - kh // 2 - 1) # top, bottom
|
|
1495
|
+
|
|
1496
|
+
if img_t.ndim == 2:
|
|
1497
|
+
x = img_t[None, None]
|
|
1498
|
+
x = torch.nn.functional.pad(x, pad, mode="reflect")
|
|
1499
|
+
y = torch.nn.functional.conv2d(x, psf_t, padding=0)
|
|
1500
|
+
return y[0, 0]
|
|
1501
|
+
else:
|
|
1502
|
+
C = img_t.shape[0]
|
|
1503
|
+
x = img_t[None]
|
|
1504
|
+
x = torch.nn.functional.pad(x, pad, mode="reflect")
|
|
1505
|
+
w = psf_t.repeat(C, 1, 1, 1)
|
|
1506
|
+
y = torch.nn.functional.conv2d(x, w, padding=0, groups=C)
|
|
1507
|
+
return y[0]
|
|
1508
|
+
|
|
1509
|
+
def _safe_inference_context():
|
|
1510
|
+
"""
|
|
1511
|
+
Return a valid, working no-grad context:
|
|
1512
|
+
- prefer torch.inference_mode() if it exists *and* can be entered,
|
|
1513
|
+
- otherwise fall back to torch.no_grad(),
|
|
1514
|
+
- if torch is unavailable, return NO_GRAD.
|
|
1515
|
+
"""
|
|
1516
|
+
if not (TORCH_OK and (torch is not None)):
|
|
1517
|
+
return NO_GRAD
|
|
1518
|
+
|
|
1519
|
+
cm = getattr(torch, "inference_mode", None)
|
|
1520
|
+
if cm is None:
|
|
1521
|
+
return torch.no_grad
|
|
1522
|
+
|
|
1523
|
+
# Probe inference_mode once; if it explodes on this build, fall back.
|
|
1524
|
+
try:
|
|
1525
|
+
with cm():
|
|
1526
|
+
pass
|
|
1527
|
+
return cm
|
|
1528
|
+
except Exception:
|
|
1529
|
+
return torch.no_grad
|
|
1530
|
+
|
|
1531
|
+
def _ensure_mask_list(masks, data):
|
|
1532
|
+
# 1s where valid, 0s where invalid (soft edges allowed)
|
|
1533
|
+
if masks is None:
|
|
1534
|
+
return [np.ones_like(a if a.ndim==2 else a[0], dtype=np.float32) for a in data]
|
|
1535
|
+
out = []
|
|
1536
|
+
for a, m in zip(data, masks):
|
|
1537
|
+
base = a if a.ndim==2 else a[0] # mask is 2D; shared across channels
|
|
1538
|
+
if m is None:
|
|
1539
|
+
out.append(np.ones_like(base, dtype=np.float32))
|
|
1540
|
+
else:
|
|
1541
|
+
mm = np.asarray(m, dtype=np.float32)
|
|
1542
|
+
if mm.ndim == 3: # tolerate (1,H,W) or (C,H,W)
|
|
1543
|
+
mm = mm[0]
|
|
1544
|
+
if mm.shape != base.shape:
|
|
1545
|
+
# center crop to match (common intersection already applied)
|
|
1546
|
+
Ht, Wt = base.shape
|
|
1547
|
+
mm = _center_crop(mm, Ht, Wt)
|
|
1548
|
+
# keep as float weights in [0,1] (do not threshold!)
|
|
1549
|
+
out.append(np.clip(mm.astype(np.float32, copy=False), 0.0, 1.0))
|
|
1550
|
+
return out
|
|
1551
|
+
|
|
1552
|
+
def _ensure_var_list(variances, data):
|
|
1553
|
+
# If None, we’ll estimate a robust scalar per frame on-the-fly.
|
|
1554
|
+
if variances is None:
|
|
1555
|
+
return [None]*len(data)
|
|
1556
|
+
out = []
|
|
1557
|
+
for a, v in zip(data, variances):
|
|
1558
|
+
if v is None:
|
|
1559
|
+
out.append(None)
|
|
1560
|
+
else:
|
|
1561
|
+
vv = np.asarray(v, dtype=np.float32)
|
|
1562
|
+
if vv.ndim == 3:
|
|
1563
|
+
vv = vv[0]
|
|
1564
|
+
base = a if a.ndim==2 else a[0]
|
|
1565
|
+
if vv.shape != base.shape:
|
|
1566
|
+
Ht, Wt = base.shape
|
|
1567
|
+
vv = _center_crop(vv, Ht, Wt)
|
|
1568
|
+
# clip tiny/negatives
|
|
1569
|
+
vv = np.clip(vv, 1e-8, None).astype(np.float32, copy=False)
|
|
1570
|
+
out.append(vv)
|
|
1571
|
+
return out
|
|
1572
|
+
|
|
1573
|
+
# ---- SR operators (downsample / upsample-sum) ----
|
|
1574
|
+
def _downsample_avg(img, r: int):
|
|
1575
|
+
"""Average-pool over non-overlapping r×r blocks. Works for (H,W) or (C,H,W)."""
|
|
1576
|
+
if r <= 1:
|
|
1577
|
+
return img
|
|
1578
|
+
a = np.asarray(img, dtype=np.float32)
|
|
1579
|
+
if a.ndim == 2:
|
|
1580
|
+
H, W = a.shape
|
|
1581
|
+
Hs, Ws = (H // r) * r, (W // r) * r
|
|
1582
|
+
a = a[:Hs, :Ws].reshape(Hs//r, r, Ws//r, r).mean(axis=(1,3))
|
|
1583
|
+
return a
|
|
1584
|
+
else:
|
|
1585
|
+
C, H, W = a.shape
|
|
1586
|
+
Hs, Ws = (H // r) * r, (W // r) * r
|
|
1587
|
+
a = a[:, :Hs, :Ws].reshape(C, Hs//r, r, Ws//r, r).mean(axis=(2,4))
|
|
1588
|
+
return a
|
|
1589
|
+
|
|
1590
|
+
def _upsample_sum(img, r: int, target_hw: tuple[int,int] | None = None):
|
|
1591
|
+
"""Adjoint of average-pooling: replicate-sum each pixel into an r×r block.
|
|
1592
|
+
For (H,W) or (C,H,W). If target_hw given, center-crop/pad to that size.
|
|
1593
|
+
"""
|
|
1594
|
+
if r <= 1:
|
|
1595
|
+
return img
|
|
1596
|
+
a = np.asarray(img, dtype=np.float32)
|
|
1597
|
+
if a.ndim == 2:
|
|
1598
|
+
H, W = a.shape
|
|
1599
|
+
out = np.kron(a, np.ones((r, r), dtype=np.float32))
|
|
1600
|
+
else:
|
|
1601
|
+
C, H, W = a.shape
|
|
1602
|
+
out = np.stack([np.kron(a[c], np.ones((r, r), dtype=np.float32)) for c in range(C)], axis=0)
|
|
1603
|
+
if target_hw is not None:
|
|
1604
|
+
Ht, Wt = target_hw
|
|
1605
|
+
out = _center_crop(out, Ht, Wt)
|
|
1606
|
+
return out
|
|
1607
|
+
|
|
1608
|
+
def _gaussian2d(ksize: int, sigma: float) -> np.ndarray:
|
|
1609
|
+
r = (ksize - 1) // 2
|
|
1610
|
+
y, x = np.mgrid[-r:r+1, -r:r+1].astype(np.float32)
|
|
1611
|
+
g = np.exp(-(x*x + y*y)/(2.0*sigma*sigma)).astype(np.float32)
|
|
1612
|
+
g /= g.sum() + EPS
|
|
1613
|
+
return g
|
|
1614
|
+
|
|
1615
|
+
def _conv2_same_np(a: np.ndarray, k: np.ndarray) -> np.ndarray:
|
|
1616
|
+
# lightweight wrap for 2D conv on (H,W) or (C,H,W) with same-size output
|
|
1617
|
+
return _conv_same_np(a if a.ndim==3 else a[None], k)[0] if a.ndim==2 else _conv_same_np(a, k)
|
|
1618
|
+
|
|
1619
|
+
def _solve_super_psf_from_native(f_native: np.ndarray, r: int, sigma: float = 1.1,
|
|
1620
|
+
iters: int = 500, lr: float = 0.1) -> np.ndarray:
|
|
1621
|
+
"""
|
|
1622
|
+
Solve: h* = argmin_h || f_native - (D(h) * g_sigma) ||_2^2,
|
|
1623
|
+
where h is (r*k)×(r*k) if f_native is k×k. Returns normalized h (sum=1).
|
|
1624
|
+
"""
|
|
1625
|
+
f = np.asarray(f_native, dtype=np.float32)
|
|
1626
|
+
k = int(f.shape[0]); assert f.shape[0] == f.shape[1]
|
|
1627
|
+
kr = int(k * r)
|
|
1628
|
+
|
|
1629
|
+
# build Gaussian pre-blur at native scale (match paper §4.2)
|
|
1630
|
+
g = _gaussian2d(k, max(sigma, 1e-3)).astype(np.float32)
|
|
1631
|
+
|
|
1632
|
+
# init h by zero-insertion (nearest upsample of f) then deconvolving g very mildly
|
|
1633
|
+
h0 = np.zeros((kr, kr), dtype=np.float32)
|
|
1634
|
+
h0[::r, ::r] = f
|
|
1635
|
+
h0 = _normalize_psf(h0)
|
|
1636
|
+
|
|
1637
|
+
if TORCH_OK:
|
|
1638
|
+
dev = _torch_device()
|
|
1639
|
+
t = torch.tensor(h0, device=dev, dtype=torch.float32, requires_grad=True)
|
|
1640
|
+
f_t = torch.tensor(f, device=dev, dtype=torch.float32)
|
|
1641
|
+
g_t = torch.tensor(g, device=dev, dtype=torch.float32)
|
|
1642
|
+
opt = torch.optim.Adam([t], lr=lr)
|
|
1643
|
+
for _ in range(max(10, iters)):
|
|
1644
|
+
opt.zero_grad(set_to_none=True)
|
|
1645
|
+
H, W = t.shape
|
|
1646
|
+
Hr, Wr = H//r, W//r
|
|
1647
|
+
th = t[:Hr*r, :Wr*r].reshape(Hr, r, Wr, r).mean(dim=(1,3))
|
|
1648
|
+
# conv native: (Dh) * g
|
|
1649
|
+
conv = torch.nn.functional.conv2d(th[None,None], g_t[None,None], padding=g_t.shape[-1]//2)[0,0]
|
|
1650
|
+
loss = torch.mean((conv - f_t)**2)
|
|
1651
|
+
loss.backward()
|
|
1652
|
+
opt.step()
|
|
1653
|
+
with torch.no_grad():
|
|
1654
|
+
t.clamp_(min=0.0)
|
|
1655
|
+
t /= (t.sum() + 1e-8)
|
|
1656
|
+
h = t.detach().cpu().numpy().astype(np.float32)
|
|
1657
|
+
else:
|
|
1658
|
+
# Tiny gradient-descent fallback on numpy
|
|
1659
|
+
h = h0.copy()
|
|
1660
|
+
eta = float(lr)
|
|
1661
|
+
for _ in range(max(50, iters)):
|
|
1662
|
+
Dh = _downsample_avg(h, r)
|
|
1663
|
+
conv = _conv2_same_np(Dh, g)
|
|
1664
|
+
resid = (conv - f)
|
|
1665
|
+
# backprop through conv and D: grad wrt Dh is resid * g^T conv; adjoint of D is upsample-sum
|
|
1666
|
+
grad_Dh = _conv2_same_np(resid, np.flip(np.flip(g, 0), 1))
|
|
1667
|
+
grad_h = _upsample_sum(grad_Dh, r, target_hw=h.shape)
|
|
1668
|
+
h = np.clip(h - eta * grad_h, 0.0, None)
|
|
1669
|
+
s = float(h.sum()); h /= (s + 1e-8)
|
|
1670
|
+
eta *= 0.995
|
|
1671
|
+
return _normalize_psf(h)
|
|
1672
|
+
|
|
1673
|
+
def _downsample_avg_t(x, r: int):
|
|
1674
|
+
"""
|
|
1675
|
+
Average-pool over non-overlapping r×r blocks.
|
|
1676
|
+
Works for (H,W) or (C,H,W). Crops to multiples of r.
|
|
1677
|
+
"""
|
|
1678
|
+
if r <= 1:
|
|
1679
|
+
return x
|
|
1680
|
+
if x.ndim == 2:
|
|
1681
|
+
H, W = x.shape
|
|
1682
|
+
Hr, Wr = (H // r) * r, (W // r) * r
|
|
1683
|
+
if Hr == 0 or Wr == 0:
|
|
1684
|
+
return x # nothing to pool
|
|
1685
|
+
x2 = x[:Hr, :Wr]
|
|
1686
|
+
return x2.view(Hr // r, r, Wr // r, r).mean(dim=(1, 3))
|
|
1687
|
+
else:
|
|
1688
|
+
C, H, W = x.shape
|
|
1689
|
+
Hr, Wr = (H // r) * r, (W // r) * r
|
|
1690
|
+
if Hr == 0 or Wr == 0:
|
|
1691
|
+
return x
|
|
1692
|
+
x2 = x[:, :Hr, :Wr]
|
|
1693
|
+
return x2.view(C, Hr // r, r, Wr // r, r).mean(dim=(2, 4))
|
|
1694
|
+
|
|
1695
|
+
def _upsample_sum_t(x, r: int):
|
|
1696
|
+
if r <= 1:
|
|
1697
|
+
return x
|
|
1698
|
+
if x.ndim == 2:
|
|
1699
|
+
return x.repeat_interleave(r, dim=0).repeat_interleave(r, dim=1)
|
|
1700
|
+
else:
|
|
1701
|
+
return x.repeat_interleave(r, dim=-2).repeat_interleave(r, dim=-1)
|
|
1702
|
+
|
|
1703
|
+
def _sep_bg_rms(frames):
|
|
1704
|
+
"""Return a robust background RMS using SEP's background model on the first frame."""
|
|
1705
|
+
if sep is None or not frames:
|
|
1706
|
+
return None
|
|
1707
|
+
try:
|
|
1708
|
+
y0 = frames[0] if frames[0].ndim == 2 else frames[0][0] # use luma/first channel
|
|
1709
|
+
a = np.ascontiguousarray(y0, dtype=np.float32)
|
|
1710
|
+
b = sep.Background(a, bw=64, bh=64, fw=3, fh=3)
|
|
1711
|
+
try:
|
|
1712
|
+
rms_val = float(b.globalrms)
|
|
1713
|
+
except Exception:
|
|
1714
|
+
# some SEP builds don’t expose globalrms; fall back to the map’s median
|
|
1715
|
+
rms_val = float(np.median(np.asarray(b.rms(), dtype=np.float32)))
|
|
1716
|
+
return rms_val
|
|
1717
|
+
except Exception:
|
|
1718
|
+
return None
|
|
1719
|
+
|
|
1720
|
+
# =========================
|
|
1721
|
+
# Memory/streaming helpers
|
|
1722
|
+
# =========================
|
|
1723
|
+
|
|
1724
|
+
def _approx_bytes(arr_like_shape, dtype=np.float32):
|
|
1725
|
+
"""Rough byte estimator for a given shape/dtype."""
|
|
1726
|
+
return int(np.prod(arr_like_shape)) * np.dtype(dtype).itemsize
|
|
1727
|
+
|
|
1728
|
+
|
|
1729
|
+
|
|
1730
|
+
def _read_shape_fast(path) -> tuple[int,int,int]:
|
|
1731
|
+
if _is_xisf(path):
|
|
1732
|
+
a, _ = _load_image_array(path)
|
|
1733
|
+
if a is None:
|
|
1734
|
+
raise ValueError(f"No data in {path}")
|
|
1735
|
+
a = np.asarray(a)
|
|
1736
|
+
else:
|
|
1737
|
+
with fits.open(path, memmap=True, ignore_missing_simple=True) as hdul:
|
|
1738
|
+
a = hdul[0].data
|
|
1739
|
+
if a is None:
|
|
1740
|
+
raise ValueError(f"No data in {path}")
|
|
1741
|
+
|
|
1742
|
+
# common logic for both XISF and FITS
|
|
1743
|
+
if a.ndim == 2:
|
|
1744
|
+
H, W = a.shape
|
|
1745
|
+
return (1, int(H), int(W))
|
|
1746
|
+
if a.ndim == 3:
|
|
1747
|
+
if a.shape[-1] in (1, 3): # HWC
|
|
1748
|
+
C = int(a.shape[-1]); H = int(a.shape[0]); W = int(a.shape[1])
|
|
1749
|
+
return (1 if C == 1 else 3, H, W)
|
|
1750
|
+
if a.shape[0] in (1, 3): # CHW
|
|
1751
|
+
return (int(a.shape[0]), int(a.shape[1]), int(a.shape[2]))
|
|
1752
|
+
s = tuple(map(int, a.shape))
|
|
1753
|
+
H, W = s[-2], s[-1]
|
|
1754
|
+
return (1, H, W)
|
|
1755
|
+
|
|
1756
|
+
|
|
1757
|
+
def _read_tile_fits_any(path: str, y0: int, y1: int, x0: int, x1: int) -> np.ndarray:
|
|
1758
|
+
"""FITS/XISF-aware tile read: returns spatial tile; supports 2D, HWC, and CHW."""
|
|
1759
|
+
ext = os.path.splitext(path)[1].lower()
|
|
1760
|
+
|
|
1761
|
+
if ext == ".xisf":
|
|
1762
|
+
a, _ = _load_image_array(path) # helper returns array-like + hdr/metadata
|
|
1763
|
+
if a is None:
|
|
1764
|
+
raise ValueError(f"XISF loader returned None for {path}")
|
|
1765
|
+
a = np.asarray(a)
|
|
1766
|
+
if a.ndim == 2: # HW
|
|
1767
|
+
return np.array(a[y0:y1, x0:x1], copy=True)
|
|
1768
|
+
elif a.ndim == 3:
|
|
1769
|
+
if a.shape[-1] in (1, 3): # HWC
|
|
1770
|
+
out = a[y0:y1, x0:x1, :]
|
|
1771
|
+
if out.shape[-1] == 1:
|
|
1772
|
+
out = out[..., 0]
|
|
1773
|
+
return np.array(out, copy=True)
|
|
1774
|
+
elif a.shape[0] in (1, 3): # CHW
|
|
1775
|
+
out = a[:, y0:y1, x0:x1]
|
|
1776
|
+
if out.shape[0] == 1:
|
|
1777
|
+
out = out[0]
|
|
1778
|
+
return np.array(out, copy=True)
|
|
1779
|
+
else:
|
|
1780
|
+
raise ValueError(f"Unsupported XISF 3D shape {a.shape} in {path}")
|
|
1781
|
+
else:
|
|
1782
|
+
raise ValueError(f"Unsupported XISF ndim {a.ndim} in {path}")
|
|
1783
|
+
|
|
1784
|
+
# FITS
|
|
1785
|
+
with fits.open(path, memmap=True, ignore_missing_simple=True) as hdul:
|
|
1786
|
+
a = None
|
|
1787
|
+
for h in hdul:
|
|
1788
|
+
if getattr(h, "data", None) is not None:
|
|
1789
|
+
a = h.data
|
|
1790
|
+
break
|
|
1791
|
+
if a is None:
|
|
1792
|
+
raise ValueError(f"No image data in {path}")
|
|
1793
|
+
|
|
1794
|
+
a = np.asarray(a)
|
|
1795
|
+
|
|
1796
|
+
if a.ndim == 2: # HW
|
|
1797
|
+
return np.array(a[y0:y1, x0:x1], copy=True)
|
|
1798
|
+
|
|
1799
|
+
if a.ndim == 3:
|
|
1800
|
+
if a.shape[0] in (1, 3): # CHW (planes, rows, cols)
|
|
1801
|
+
out = a[:, y0:y1, x0:x1]
|
|
1802
|
+
if out.shape[0] == 1: out = out[0]
|
|
1803
|
+
return np.array(out, copy=True)
|
|
1804
|
+
if a.shape[-1] in (1, 3): # HWC
|
|
1805
|
+
out = a[y0:y1, x0:x1, :]
|
|
1806
|
+
if out.shape[-1] == 1: out = out[..., 0]
|
|
1807
|
+
return np.array(out, copy=True)
|
|
1808
|
+
|
|
1809
|
+
# Fallback: assume last two axes are spatial (…, H, W)
|
|
1810
|
+
try:
|
|
1811
|
+
out = a[(..., slice(y0, y1), slice(x0, x1))]
|
|
1812
|
+
return np.array(out, copy=True)
|
|
1813
|
+
except Exception:
|
|
1814
|
+
raise ValueError(f"Unsupported FITS data shape {a.shape} in {path}")
|
|
1815
|
+
|
|
1816
|
+
|
|
1817
|
+
def _seed_median_full_from_data(data_list):
|
|
1818
|
+
"""
|
|
1819
|
+
data_list: list of np.ndarray each shaped either (H,W) or (C,H,W),
|
|
1820
|
+
already cropped/sanitized to the same size by the caller.
|
|
1821
|
+
Returns: (H,W) or (C,H,W) median image in float32.
|
|
1822
|
+
"""
|
|
1823
|
+
if not data_list:
|
|
1824
|
+
raise ValueError("Empty stack for median seed")
|
|
1825
|
+
|
|
1826
|
+
a0 = data_list[0]
|
|
1827
|
+
if a0.ndim == 2:
|
|
1828
|
+
# (N, H, W) -> (H, W)
|
|
1829
|
+
cube = np.stack([np.asarray(a, dtype=np.float32, order="C") for a in data_list], axis=0)
|
|
1830
|
+
med = np.median(cube, axis=0).astype(np.float32, copy=False)
|
|
1831
|
+
return np.ascontiguousarray(med)
|
|
1832
|
+
else:
|
|
1833
|
+
# (N, C, H, W) -> (C, H, W)
|
|
1834
|
+
cube = np.stack([np.asarray(a, dtype=np.float32, order="C") for a in data_list], axis=0)
|
|
1835
|
+
med = np.median(cube, axis=0).astype(np.float32, copy=False)
|
|
1836
|
+
return np.ascontiguousarray(med)
|
|
1837
|
+
|
|
1838
|
+
|
|
1839
|
+
def _build_seed_running_mu_sigma_from_paths(paths, Ht, Wt, color_mode,
|
|
1840
|
+
*, bootstrap_frames=20, clip_sigma=5.0,
|
|
1841
|
+
status_cb=lambda s: None, progress_cb=lambda f,m='': None):
|
|
1842
|
+
K = max(1, min(int(bootstrap_frames), len(paths)))
|
|
1843
|
+
def _load_chw(i):
|
|
1844
|
+
ys, _ = _stack_loader_memmap([paths[i]], Ht, Wt, color_mode)
|
|
1845
|
+
return _as_chw(ys[0]).astype(np.float32, copy=False)
|
|
1846
|
+
x0 = _load_chw(0).copy()
|
|
1847
|
+
mean = x0; m2 = np.zeros_like(mean); count = 1
|
|
1848
|
+
for i in range(1, K):
|
|
1849
|
+
x = _load_chw(i); count += 1
|
|
1850
|
+
d = x - mean; mean += d/count; m2 += d*(x-mean)
|
|
1851
|
+
progress_cb(i/K*0.5, "μ-σ bootstrap")
|
|
1852
|
+
var = m2 / max(1, count-1); sigma = np.sqrt(np.clip(var, 1e-12, None)).astype(np.float32)
|
|
1853
|
+
lo = mean - float(clip_sigma)*sigma; hi = mean + float(clip_sigma)*sigma
|
|
1854
|
+
acc = np.zeros_like(mean); n=0
|
|
1855
|
+
for i in range(len(paths)):
|
|
1856
|
+
x = _load_chw(i); x = np.clip(x, lo, hi, out=x)
|
|
1857
|
+
acc += x; n += 1; progress_cb(0.5 + 0.5*(i+1)/len(paths), "clipped mean")
|
|
1858
|
+
seed = (acc/max(1,n)).astype(np.float32)
|
|
1859
|
+
return seed[0] if (seed.ndim==3 and seed.shape[0]==1) else seed
|
|
1860
|
+
|
|
1861
|
+
# -----------------------------
|
|
1862
|
+
# Core
|
|
1863
|
+
# -----------------------------
|
|
1864
|
+
def multiframe_deconv(
|
|
1865
|
+
paths,
|
|
1866
|
+
out_path,
|
|
1867
|
+
iters=20,
|
|
1868
|
+
kappa=2.0,
|
|
1869
|
+
color_mode="luma",
|
|
1870
|
+
seed_mode: str = "robust",
|
|
1871
|
+
huber_delta=0.0,
|
|
1872
|
+
masks=None,
|
|
1873
|
+
variances=None,
|
|
1874
|
+
rho="huber",
|
|
1875
|
+
status_cb=lambda s: None,
|
|
1876
|
+
min_iters: int = 3,
|
|
1877
|
+
use_star_masks: bool = False,
|
|
1878
|
+
use_variance_maps: bool = False,
|
|
1879
|
+
star_mask_cfg: dict | None = None,
|
|
1880
|
+
varmap_cfg: dict | None = None,
|
|
1881
|
+
save_intermediate: bool = False,
|
|
1882
|
+
save_every: int = 1,
|
|
1883
|
+
# >>> SR options
|
|
1884
|
+
super_res_factor: int = 1,
|
|
1885
|
+
sr_sigma: float = 1.1,
|
|
1886
|
+
sr_psf_opt_iters: int = 250,
|
|
1887
|
+
sr_psf_opt_lr: float = 0.1,
|
|
1888
|
+
star_mask_ref_path: str | None = None,
|
|
1889
|
+
):
|
|
1890
|
+
# sanitize and clamp
|
|
1891
|
+
max_iters = max(1, int(iters))
|
|
1892
|
+
min_iters = max(1, int(min_iters))
|
|
1893
|
+
if min_iters > max_iters:
|
|
1894
|
+
min_iters = max_iters
|
|
1895
|
+
|
|
1896
|
+
def _emit_pct(pct: float, msg: str | None = None):
|
|
1897
|
+
pct = float(max(0.0, min(1.0, pct)))
|
|
1898
|
+
status_cb(f"__PROGRESS__ {pct:.4f}" + (f" {msg}" if msg else ""))
|
|
1899
|
+
|
|
1900
|
+
status_cb(f"MFDeconv: loading {len(paths)} aligned frames…")
|
|
1901
|
+
_emit_pct(0.02, "loading")
|
|
1902
|
+
|
|
1903
|
+
# Use unified probe to pick a common crop without loading full images
|
|
1904
|
+
Ht, Wt = _common_hw_from_paths(paths)
|
|
1905
|
+
_emit_pct(0.05, "preparing")
|
|
1906
|
+
|
|
1907
|
+
# Stream actual pixels cropped to (Ht,Wt), float32 CHW/2D + headers
|
|
1908
|
+
ys_raw, hdrs = _stack_loader_memmap(paths, Ht, Wt, color_mode)
|
|
1909
|
+
relax = 0.7
|
|
1910
|
+
use_torch = False
|
|
1911
|
+
global torch, TORCH_OK
|
|
1912
|
+
|
|
1913
|
+
# -------- try to import torch from per-user runtime venv --------
|
|
1914
|
+
torch = None
|
|
1915
|
+
TORCH_OK = False
|
|
1916
|
+
cuda_ok = mps_ok = dml_ok = False
|
|
1917
|
+
dml_device = None
|
|
1918
|
+
try:
|
|
1919
|
+
from setiastro.saspro.runtime_torch import import_torch
|
|
1920
|
+
torch = import_torch(prefer_cuda=True, status_cb=status_cb)
|
|
1921
|
+
TORCH_OK = True
|
|
1922
|
+
|
|
1923
|
+
try: cuda_ok = hasattr(torch, "cuda") and torch.cuda.is_available()
|
|
1924
|
+
except Exception: cuda_ok = False
|
|
1925
|
+
try: mps_ok = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
|
1926
|
+
except Exception: mps_ok = False
|
|
1927
|
+
try:
|
|
1928
|
+
import torch_directml
|
|
1929
|
+
dml_device = torch_directml.device()
|
|
1930
|
+
_ = (torch.ones(1, device=dml_device) + 1).item()
|
|
1931
|
+
dml_ok = True
|
|
1932
|
+
except Exception:
|
|
1933
|
+
dml_ok = False
|
|
1934
|
+
|
|
1935
|
+
if cuda_ok:
|
|
1936
|
+
status_cb(f"PyTorch CUDA available: True | device={torch.cuda.get_device_name(0)}")
|
|
1937
|
+
elif mps_ok:
|
|
1938
|
+
status_cb("PyTorch MPS (Apple) available: True")
|
|
1939
|
+
elif dml_ok:
|
|
1940
|
+
status_cb("PyTorch DirectML (Windows) available: True")
|
|
1941
|
+
else:
|
|
1942
|
+
status_cb("PyTorch present, using CPU backend.")
|
|
1943
|
+
|
|
1944
|
+
status_cb(
|
|
1945
|
+
f"PyTorch {getattr(torch, '__version__', '?')} backend: "
|
|
1946
|
+
+ ("CUDA" if cuda_ok else "MPS" if mps_ok else "DirectML" if dml_ok else "CPU")
|
|
1947
|
+
)
|
|
1948
|
+
except Exception as e:
|
|
1949
|
+
TORCH_OK = False
|
|
1950
|
+
status_cb(f"PyTorch not available → CPU path. ({e})")
|
|
1951
|
+
|
|
1952
|
+
use_torch = bool(TORCH_OK)
|
|
1953
|
+
if use_torch:
|
|
1954
|
+
# ----- Precision policy (Sport mode but strict FP32) -----
|
|
1955
|
+
try:
|
|
1956
|
+
# Keep autotune for speed
|
|
1957
|
+
torch.backends.cudnn.benchmark = True
|
|
1958
|
+
|
|
1959
|
+
# Force true FP32 everywhere (no TF32 shortcuts)
|
|
1960
|
+
if hasattr(torch.backends, "cudnn"):
|
|
1961
|
+
torch.backends.cudnn.allow_tf32 = False
|
|
1962
|
+
if hasattr(torch.backends, "cuda") and hasattr(torch.backends.cuda, "matmul"):
|
|
1963
|
+
torch.backends.cuda.matmul.allow_tf32 = False
|
|
1964
|
+
if hasattr(torch, "set_float32_matmul_precision"):
|
|
1965
|
+
torch.set_float32_matmul_precision("highest")
|
|
1966
|
+
except Exception:
|
|
1967
|
+
pass
|
|
1968
|
+
|
|
1969
|
+
# (optional: telemetry)
|
|
1970
|
+
try:
|
|
1971
|
+
c_tf32 = getattr(torch.backends.cudnn, "allow_tf32", None)
|
|
1972
|
+
m_tf32 = getattr(getattr(torch.backends.cuda, "matmul", object()), "allow_tf32", None)
|
|
1973
|
+
status_cb(
|
|
1974
|
+
f"Precision: cudnn.allow_tf32={c_tf32} | "
|
|
1975
|
+
f"matmul.allow_tf32={m_tf32} | "
|
|
1976
|
+
f"benchmark={torch.backends.cudnn.benchmark}"
|
|
1977
|
+
)
|
|
1978
|
+
except Exception:
|
|
1979
|
+
pass
|
|
1980
|
+
|
|
1981
|
+
_process_gui_events_safely()
|
|
1982
|
+
|
|
1983
|
+
# PSFs (auto-size per frame) + flipped copies
|
|
1984
|
+
psf_out_dir = None
|
|
1985
|
+
psfs, masks_auto, vars_auto = _build_psf_and_assets(
|
|
1986
|
+
paths,
|
|
1987
|
+
make_masks=bool(use_star_masks),
|
|
1988
|
+
make_varmaps=bool(use_variance_maps),
|
|
1989
|
+
status_cb=status_cb,
|
|
1990
|
+
save_dir=None,
|
|
1991
|
+
star_mask_cfg=star_mask_cfg,
|
|
1992
|
+
varmap_cfg=varmap_cfg,
|
|
1993
|
+
star_mask_ref_path=star_mask_ref_path,
|
|
1994
|
+
# NEW:
|
|
1995
|
+
Ht=Ht, Wt=Wt, color_mode=color_mode,
|
|
1996
|
+
)
|
|
1997
|
+
|
|
1998
|
+
# >>> SR: lift PSFs to super-res if requested
|
|
1999
|
+
r = int(max(1, super_res_factor))
|
|
2000
|
+
if r > 1:
|
|
2001
|
+
status_cb(f"MFDeconv: Super-resolution r={r} with σ={sr_sigma} — solving SR PSFs…")
|
|
2002
|
+
_process_gui_events_safely()
|
|
2003
|
+
sr_psfs = []
|
|
2004
|
+
for i, k_native in enumerate(psfs, start=1):
|
|
2005
|
+
h = _solve_super_psf_from_native(k_native, r=r, sigma=float(sr_sigma),
|
|
2006
|
+
iters=int(sr_psf_opt_iters), lr=float(sr_psf_opt_lr))
|
|
2007
|
+
sr_psfs.append(h)
|
|
2008
|
+
status_cb(f" SR-PSF{i}: native {k_native.shape[0]} → {h.shape[0]} (sum={h.sum():.6f})")
|
|
2009
|
+
psfs = sr_psfs
|
|
2010
|
+
|
|
2011
|
+
flip_psf = [_flip_kernel(k) for k in psfs]
|
|
2012
|
+
_emit_pct(0.20, "PSF Ready")
|
|
2013
|
+
|
|
2014
|
+
# Normalize layout BEFORE size harmonization
|
|
2015
|
+
data = _normalize_layout_batch(ys_raw, color_mode) # list of (H,W) or (3,H,W)
|
|
2016
|
+
if str(color_mode).lower() != "luma":
|
|
2017
|
+
# Force strict CHW for every frame
|
|
2018
|
+
data = [_as_chw(a) for a in data]
|
|
2019
|
+
Cs = {a.shape[0] for a in data}
|
|
2020
|
+
if len(Cs) != 1:
|
|
2021
|
+
raise ValueError(f"Inconsistent channel counts in PerChannel mode: {Cs}")
|
|
2022
|
+
_emit_pct(0.25, "Calculating Seed Image...")
|
|
2023
|
+
|
|
2024
|
+
# Center-crop all to common intersection
|
|
2025
|
+
Ht, Wt = _common_hw(data)
|
|
2026
|
+
if any(((a.shape[-2] != Ht) or (a.shape[-1] != Wt)) for a in data):
|
|
2027
|
+
status_cb(f"MFDeconv: Standardizing shapes → crop to {Ht}×{Wt}")
|
|
2028
|
+
data = [_center_crop(a, Ht, Wt) for a in data]
|
|
2029
|
+
|
|
2030
|
+
# Numeric hygiene
|
|
2031
|
+
data = [_sanitize_numeric(a) for a in data]
|
|
2032
|
+
|
|
2033
|
+
# --- SR/native seed ---
|
|
2034
|
+
# --- Seed (choose robust μ-σ or median) ---
|
|
2035
|
+
seed_mode_s = str(seed_mode).lower().strip()
|
|
2036
|
+
if seed_mode_s not in ("robust","median"):
|
|
2037
|
+
seed_mode_s = "robust"
|
|
2038
|
+
|
|
2039
|
+
if seed_mode_s == "median":
|
|
2040
|
+
status_cb("MFDeconv: Building median seed (in-memory)…")
|
|
2041
|
+
# Use already normalized, cropped, sanitized frames
|
|
2042
|
+
seed_native = _seed_median_full_from_data(data)
|
|
2043
|
+
else:
|
|
2044
|
+
status_cb("MFDeconv: Building robust seed (live μ-σ stacking)…")
|
|
2045
|
+
seed_native = _build_seed_running_mu_sigma_from_paths(
|
|
2046
|
+
paths, Ht, Wt, color_mode,
|
|
2047
|
+
bootstrap_frames=20, clip_sigma=5.0,
|
|
2048
|
+
status_cb=status_cb, progress_cb=lambda f,m='': None
|
|
2049
|
+
)
|
|
2050
|
+
if r > 1:
|
|
2051
|
+
if seed_native.ndim == 2:
|
|
2052
|
+
x = _upsample_sum(seed_native / (r*r), r, target_hw=(Ht*r, Wt*r))
|
|
2053
|
+
else:
|
|
2054
|
+
C, Hn, Wn = seed_native.shape
|
|
2055
|
+
x = np.stack(
|
|
2056
|
+
[_upsample_sum(seed_native[c] / (r*r), r, target_hw=(Hn*r, Wn*r)) for c in range(C)],
|
|
2057
|
+
axis=0
|
|
2058
|
+
)
|
|
2059
|
+
else:
|
|
2060
|
+
x = seed_native
|
|
2061
|
+
Hs, Ws = x.shape if x.ndim == 2 else x.shape[-2:]
|
|
2062
|
+
|
|
2063
|
+
# masks/vars aligned to native grid (2D each)
|
|
2064
|
+
auto_masks = masks_auto if use_star_masks else None
|
|
2065
|
+
auto_vars = vars_auto if use_variance_maps else None
|
|
2066
|
+
mask_list = _ensure_mask_list(masks if masks is not None else auto_masks, data)
|
|
2067
|
+
var_list = _ensure_var_list(variances if variances is not None else auto_vars, data)
|
|
2068
|
+
|
|
2069
|
+
iter_dir = None
|
|
2070
|
+
hdr0_seed = None
|
|
2071
|
+
if save_intermediate:
|
|
2072
|
+
iter_dir = _iter_folder(out_path)
|
|
2073
|
+
status_cb(f"MFDeconv: Intermediate outputs → {iter_dir}")
|
|
2074
|
+
try:
|
|
2075
|
+
hdr0_seed = _safe_primary_header(paths[0])
|
|
2076
|
+
except Exception:
|
|
2077
|
+
hdr0_seed = fits.Header()
|
|
2078
|
+
_save_iter_image(x, hdr0_seed, iter_dir, "seed", color_mode)
|
|
2079
|
+
|
|
2080
|
+
status_cb("MFDeconv: Calculating Backgrounds and MADs…")
|
|
2081
|
+
_process_gui_events_safely()
|
|
2082
|
+
bg_est = _sep_bg_rms(data) or (np.median([np.median(np.abs(y - np.median(y))) for y in (data if isinstance(data, list) else [data])]) * 1.4826)
|
|
2083
|
+
status_cb(f"MFDeconv: color_mode={color_mode}, huber_delta={huber_delta} (bg RMS~{bg_est:.3g})")
|
|
2084
|
+
_process_gui_events_safely()
|
|
2085
|
+
|
|
2086
|
+
status_cb("Computing FFTs and Allocating Scratch…")
|
|
2087
|
+
_process_gui_events_safely()
|
|
2088
|
+
|
|
2089
|
+
# -------- precompute and allocate scratch --------
|
|
2090
|
+
per_frame_logging = (r > 1)
|
|
2091
|
+
if use_torch:
|
|
2092
|
+
x_t = _to_t(_contig(x))
|
|
2093
|
+
num = torch.zeros_like(x_t)
|
|
2094
|
+
den = torch.zeros_like(x_t)
|
|
2095
|
+
|
|
2096
|
+
if r > 1:
|
|
2097
|
+
# >>> SR path now uses SPATIAL CONV (cuDNN) to avoid huge FFT buffers
|
|
2098
|
+
psf_t = [_to_t(_contig(k))[None, None] for k in psfs] # (1,1,kh,kw)
|
|
2099
|
+
psfT_t = [_to_t(_contig(kT))[None, None] for kT in flip_psf]
|
|
2100
|
+
else:
|
|
2101
|
+
# Native spatial (as before)
|
|
2102
|
+
psf_t = [_to_t(_contig(k))[None, None] for k in psfs]
|
|
2103
|
+
psfT_t = [_to_t(_contig(kT))[None, None] for kT in flip_psf]
|
|
2104
|
+
|
|
2105
|
+
else:
|
|
2106
|
+
x_t = x
|
|
2107
|
+
# CPU path: keep your (more RAM-tolerant) FFT packs
|
|
2108
|
+
if r > 1:
|
|
2109
|
+
if x_t.ndim == 2:
|
|
2110
|
+
Hs, Ws = x_t.shape
|
|
2111
|
+
else:
|
|
2112
|
+
_, Hs, Ws = x_t.shape
|
|
2113
|
+
Kfs, KTfs, meta = _precompute_np_psf_ffts(psfs, flip_psf, Hs, Ws)
|
|
2114
|
+
pred_super = np.empty_like(x_t)
|
|
2115
|
+
tmp_out = np.empty_like(x_t)
|
|
2116
|
+
else:
|
|
2117
|
+
Kfs, KTfs, meta = _precompute_np_psf_ffts(psfs, flip_psf, Hs, Ws)
|
|
2118
|
+
pred_super = np.empty_like(x_t)
|
|
2119
|
+
tmp_out = np.empty_like(x_t)
|
|
2120
|
+
num = np.zeros_like(x_t)
|
|
2121
|
+
den = np.zeros_like(x_t)
|
|
2122
|
+
|
|
2123
|
+
status_cb("Starting First Multiplicative Iteration…")
|
|
2124
|
+
_process_gui_events_safely()
|
|
2125
|
+
|
|
2126
|
+
cm = _safe_inference_context() if use_torch else NO_GRAD
|
|
2127
|
+
rho_is_l2 = (str(rho).lower() == "l2")
|
|
2128
|
+
local_delta = 0.0 if rho_is_l2 else huber_delta
|
|
2129
|
+
used_iters = 0
|
|
2130
|
+
early_stopped = False
|
|
2131
|
+
|
|
2132
|
+
auto_delta_cache = None
|
|
2133
|
+
if use_torch and (huber_delta < 0) and (not rho_is_l2):
|
|
2134
|
+
auto_delta_cache = [None] * len(paths)
|
|
2135
|
+
|
|
2136
|
+
early = EarlyStopper(
|
|
2137
|
+
tol_upd_floor=2e-4, # match new version
|
|
2138
|
+
tol_rel_floor=5e-4,
|
|
2139
|
+
early_frac=0.40,
|
|
2140
|
+
ema_alpha=0.5,
|
|
2141
|
+
patience=2,
|
|
2142
|
+
min_iters=min_iters,
|
|
2143
|
+
status_cb=status_cb
|
|
2144
|
+
)
|
|
2145
|
+
x_ndim = 2 if (np.ndim(x) == 2) else 3
|
|
2146
|
+
fixed = 0
|
|
2147
|
+
for i, a in enumerate(data):
|
|
2148
|
+
if a.ndim != x_ndim:
|
|
2149
|
+
# fix common mono cases only
|
|
2150
|
+
if x_ndim == 2 and a.ndim == 3 and a.shape[0] == 1:
|
|
2151
|
+
data[i] = a[0]; fixed += 1
|
|
2152
|
+
elif x_ndim == 2 and a.ndim == 3 and a.shape[-1] == 1:
|
|
2153
|
+
data[i] = a[..., 0]; fixed += 1
|
|
2154
|
+
|
|
2155
|
+
with cm():
|
|
2156
|
+
for it in range(1, max_iters + 1):
|
|
2157
|
+
if use_torch:
|
|
2158
|
+
num.zero_(); den.zero_()
|
|
2159
|
+
|
|
2160
|
+
if r > 1:
|
|
2161
|
+
# -------- SR path (SPATIAL conv + stream) --------
|
|
2162
|
+
for fidx, (wk, wkT) in enumerate(zip(psf_t, psfT_t)):
|
|
2163
|
+
yt_np = data[fidx] # CHW or HW (CPU)
|
|
2164
|
+
mt_np = mask_list[fidx]
|
|
2165
|
+
vt_np = var_list[fidx]
|
|
2166
|
+
|
|
2167
|
+
yt = torch.as_tensor(yt_np, dtype=x_t.dtype, device=x_t.device)
|
|
2168
|
+
mt = None if mt_np is None else torch.as_tensor(mt_np, dtype=x_t.dtype, device=x_t.device)
|
|
2169
|
+
vt = None if vt_np is None else torch.as_tensor(vt_np, dtype=x_t.dtype, device=x_t.device)
|
|
2170
|
+
|
|
2171
|
+
# SR conv on grid of x_t
|
|
2172
|
+
pred_super = _conv_same_torch(x_t, wk) # SR grid
|
|
2173
|
+
pred_low = _downsample_avg_t(pred_super, r) # native grid
|
|
2174
|
+
|
|
2175
|
+
if auto_delta_cache is not None:
|
|
2176
|
+
if (auto_delta_cache[fidx] is None) or (it % 5 == 1):
|
|
2177
|
+
rnat = yt - pred_low
|
|
2178
|
+
med = torch.median(rnat)
|
|
2179
|
+
mad = torch.median(torch.abs(rnat - med)) + 1e-6
|
|
2180
|
+
rms = 1.4826 * mad
|
|
2181
|
+
auto_delta_cache[fidx] = float((-huber_delta) * torch.clamp(rms, min=1e-6).item())
|
|
2182
|
+
wmap_low = _weight_map(yt, pred_low, auto_delta_cache[fidx], var_map=vt, mask=mt)
|
|
2183
|
+
else:
|
|
2184
|
+
wmap_low = _weight_map(yt, pred_low, local_delta, var_map=vt, mask=mt)
|
|
2185
|
+
|
|
2186
|
+
# lift back to SR via sum-replicate
|
|
2187
|
+
up_y = _upsample_sum_t(wmap_low * yt, r)
|
|
2188
|
+
up_pred = _upsample_sum_t(wmap_low * pred_low, r)
|
|
2189
|
+
|
|
2190
|
+
# accumulate via adjoint kernel on SR grid
|
|
2191
|
+
num += _conv_same_torch(up_y, wkT)
|
|
2192
|
+
den += _conv_same_torch(up_pred, wkT)
|
|
2193
|
+
|
|
2194
|
+
# free temps as aggressively as possible
|
|
2195
|
+
del yt, mt, vt, pred_super, pred_low, wmap_low, up_y, up_pred
|
|
2196
|
+
if cuda_ok:
|
|
2197
|
+
try: torch.cuda.empty_cache()
|
|
2198
|
+
except Exception as e:
|
|
2199
|
+
import logging
|
|
2200
|
+
logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
|
|
2201
|
+
|
|
2202
|
+
if per_frame_logging and ((fidx & 7) == 0):
|
|
2203
|
+
status_cb(f"Iter {it}/{max_iters} — frame {fidx+1}/{len(paths)} (SR spatial)")
|
|
2204
|
+
|
|
2205
|
+
else:
|
|
2206
|
+
# -------- Native path (spatial conv, stream) --------
|
|
2207
|
+
for fidx, (wk, wkT) in enumerate(zip(psf_t, psfT_t)):
|
|
2208
|
+
yt_np = data[fidx]
|
|
2209
|
+
mt_np = mask_list[fidx]
|
|
2210
|
+
vt_np = var_list[fidx]
|
|
2211
|
+
|
|
2212
|
+
yt = torch.as_tensor(yt_np, dtype=x_t.dtype, device=x_t.device)
|
|
2213
|
+
mt = None if mt_np is None else torch.as_tensor(mt_np, dtype=x_t.dtype, device=x_t.device)
|
|
2214
|
+
vt = None if vt_np is None else torch.as_tensor(vt_np, dtype=x_t.dtype, device=x_t.device)
|
|
2215
|
+
|
|
2216
|
+
pred = _conv_same_torch(x_t, wk)
|
|
2217
|
+
wmap = _weight_map(yt, pred, local_delta, var_map=vt, mask=mt)
|
|
2218
|
+
up_y = wmap * yt
|
|
2219
|
+
up_pred = wmap * pred
|
|
2220
|
+
num += _conv_same_torch(up_y, wkT)
|
|
2221
|
+
den += _conv_same_torch(up_pred, wkT)
|
|
2222
|
+
|
|
2223
|
+
del yt, mt, vt, pred, wmap, up_y, up_pred
|
|
2224
|
+
if cuda_ok:
|
|
2225
|
+
try: torch.cuda.empty_cache()
|
|
2226
|
+
except Exception as e:
|
|
2227
|
+
import logging
|
|
2228
|
+
logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
|
|
2229
|
+
|
|
2230
|
+
ratio = num / (den + EPS)
|
|
2231
|
+
neutral = (den.abs() < 1e-12) & (num.abs() < 1e-12)
|
|
2232
|
+
ratio = torch.where(neutral, torch.ones_like(ratio), ratio)
|
|
2233
|
+
upd = torch.clamp(ratio, 1.0 / kappa, kappa)
|
|
2234
|
+
x_next = torch.clamp(x_t * upd, min=0.0)
|
|
2235
|
+
|
|
2236
|
+
upd_med = torch.median(torch.abs(upd - 1))
|
|
2237
|
+
rel_change = (torch.median(torch.abs(x_next - x_t)) /
|
|
2238
|
+
(torch.median(torch.abs(x_t)) + 1e-8))
|
|
2239
|
+
|
|
2240
|
+
# candidates for convergence
|
|
2241
|
+
try:
|
|
2242
|
+
um = float(upd_med.detach().cpu().item())
|
|
2243
|
+
except Exception:
|
|
2244
|
+
um = float(upd_med)
|
|
2245
|
+
|
|
2246
|
+
try:
|
|
2247
|
+
rc = float(rel_change.detach().cpu().item())
|
|
2248
|
+
except Exception:
|
|
2249
|
+
rc = float(rel_change)
|
|
2250
|
+
|
|
2251
|
+
if early.step(it, max_iters, um, rc):
|
|
2252
|
+
x_t = x_next
|
|
2253
|
+
used_iters = it
|
|
2254
|
+
early_stopped = True
|
|
2255
|
+
_process_gui_events_safely()
|
|
2256
|
+
break
|
|
2257
|
+
|
|
2258
|
+
x_t = (1.0 - relax) * x_t + relax * x_next
|
|
2259
|
+
|
|
2260
|
+
else:
|
|
2261
|
+
# -------- NumPy path (unchanged) --------
|
|
2262
|
+
num.fill(0.0); den.fill(0.0)
|
|
2263
|
+
if r > 1:
|
|
2264
|
+
for (Kf, KTf, (kh, kw, fftH, fftW)), m2d, v2d, y_nat in zip(
|
|
2265
|
+
zip(Kfs, KTfs, meta), mask_list, var_list, data):
|
|
2266
|
+
_fft_conv_same_np(x_t, Kf, kh, kw, fftH, fftW, pred_super)
|
|
2267
|
+
pred_low = _downsample_avg(pred_super, r)
|
|
2268
|
+
wmap_low = _weight_map(y_nat, pred_low, local_delta, var_map=v2d, mask=m2d)
|
|
2269
|
+
up_y = _upsample_sum(wmap_low * y_nat, r, target_hw=pred_super.shape[-2:])
|
|
2270
|
+
up_pred = _upsample_sum(wmap_low * pred_low, r, target_hw=pred_super.shape[-2:])
|
|
2271
|
+
_fft_conv_same_np(up_y, KTf, kh, kw, fftH, fftW, tmp_out); num += tmp_out
|
|
2272
|
+
_fft_conv_same_np(up_pred, KTf, kh, kw, fftH, fftW, tmp_out); den += tmp_out
|
|
2273
|
+
else:
|
|
2274
|
+
for (Kf, KTf, (kh, kw, fftH, fftW)), m2d, v2d, y_nat in zip(
|
|
2275
|
+
zip(Kfs, KTfs, meta), mask_list, var_list, data):
|
|
2276
|
+
_fft_conv_same_np(x_t, Kf, kh, kw, fftH, fftW, pred_super)
|
|
2277
|
+
pred = pred_super
|
|
2278
|
+
wmap = _weight_map(y_nat, pred, local_delta, var_map=v2d, mask=m2d)
|
|
2279
|
+
up_y, up_pred = (wmap * y_nat), (wmap * pred)
|
|
2280
|
+
_fft_conv_same_np(up_y, KTf, kh, kw, fftH, fftW, tmp_out); num += tmp_out
|
|
2281
|
+
_fft_conv_same_np(up_pred, KTf, kh, kw, fftH, fftW, tmp_out); den += tmp_out
|
|
2282
|
+
|
|
2283
|
+
ratio = num / (den + EPS)
|
|
2284
|
+
neutral = (np.abs(den) < 1e-12) & (np.abs(num) < 1e-12)
|
|
2285
|
+
ratio[neutral] = 1.0
|
|
2286
|
+
upd = np.clip(ratio, 1.0 / kappa, kappa)
|
|
2287
|
+
x_next = np.clip(x_t * upd, 0.0, None)
|
|
2288
|
+
|
|
2289
|
+
upd_med = np.median(np.abs(upd - 1.0))
|
|
2290
|
+
rel_change = (np.median(np.abs(x_next - x_t)) /
|
|
2291
|
+
(np.median(np.abs(x_t)) + 1e-8))
|
|
2292
|
+
um = float(upd_med)
|
|
2293
|
+
rc = float(rel_change)
|
|
2294
|
+
|
|
2295
|
+
if early.step(it, max_iters, um, rc):
|
|
2296
|
+
x_t = x_next
|
|
2297
|
+
used_iters = it
|
|
2298
|
+
early_stopped = True
|
|
2299
|
+
_process_gui_events_safely()
|
|
2300
|
+
break
|
|
2301
|
+
|
|
2302
|
+
|
|
2303
|
+
x_t = (1.0 - relax) * x_t + relax * x_next
|
|
2304
|
+
|
|
2305
|
+
# save intermediate
|
|
2306
|
+
if save_intermediate and (it % int(max(1, save_every)) == 0):
|
|
2307
|
+
try:
|
|
2308
|
+
x_np = x_t.detach().cpu().numpy().astype(np.float32) if use_torch else x_t.astype(np.float32)
|
|
2309
|
+
_save_iter_image(x_np, hdr0_seed, iter_dir, f"iter_{it:03d}", color_mode)
|
|
2310
|
+
except Exception as _e:
|
|
2311
|
+
status_cb(f"Intermediate save failed at iter {it}: {_e}")
|
|
2312
|
+
|
|
2313
|
+
frac = 0.25 + 0.70 * (it / float(max_iters))
|
|
2314
|
+
_emit_pct(frac, f"Iteration {it}/{max_iters}")
|
|
2315
|
+
|
|
2316
|
+
_process_gui_events_safely()
|
|
2317
|
+
|
|
2318
|
+
if not early_stopped:
|
|
2319
|
+
used_iters = max_iters
|
|
2320
|
+
|
|
2321
|
+
# ----------------------------
|
|
2322
|
+
# Save result (keep FITS-friendly order: (C,H,W))
|
|
2323
|
+
# ----------------------------
|
|
2324
|
+
_emit_pct(0.97, "saving")
|
|
2325
|
+
x_final = x_t.detach().cpu().numpy().astype(np.float32) if use_torch else x_t.astype(np.float32)
|
|
2326
|
+
|
|
2327
|
+
if x_final.ndim == 3:
|
|
2328
|
+
if x_final.shape[0] not in (1, 3) and x_final.shape[-1] in (1, 3):
|
|
2329
|
+
x_final = np.moveaxis(x_final, -1, 0)
|
|
2330
|
+
if x_final.shape[0] == 1:
|
|
2331
|
+
x_final = x_final[0]
|
|
2332
|
+
|
|
2333
|
+
try:
|
|
2334
|
+
hdr0 = _safe_primary_header(paths[0])
|
|
2335
|
+
except Exception:
|
|
2336
|
+
hdr0 = fits.Header()
|
|
2337
|
+
hdr0['MFDECONV'] = (True, 'Seti Astro multi-frame deconvolution')
|
|
2338
|
+
hdr0['MF_COLOR'] = (str(color_mode), 'Color mode used')
|
|
2339
|
+
hdr0['MF_RHO'] = (str(rho), 'Loss: huber|l2')
|
|
2340
|
+
hdr0['MF_HDEL'] = (float(huber_delta), 'Huber delta (>0 abs, <0 autoxRMS)')
|
|
2341
|
+
hdr0['MF_MASK'] = (bool(use_star_masks), 'Used auto star masks')
|
|
2342
|
+
hdr0['MF_VAR'] = (bool(use_variance_maps), 'Used auto variance maps')
|
|
2343
|
+
|
|
2344
|
+
hdr0['MF_SR'] = (int(r), 'Super-resolution factor (1 := native)')
|
|
2345
|
+
if r > 1:
|
|
2346
|
+
hdr0['MF_SRSIG'] = (float(sr_sigma), 'Gaussian sigma for SR PSF fit (pixels at native)')
|
|
2347
|
+
hdr0['MF_SRIT'] = (int(sr_psf_opt_iters), 'SR-PSF solver iters')
|
|
2348
|
+
|
|
2349
|
+
hdr0['MF_ITMAX'] = (int(max_iters), 'Requested max iterations')
|
|
2350
|
+
hdr0['MF_ITERS'] = (int(used_iters), 'Actual iterations run')
|
|
2351
|
+
hdr0['MF_ESTOP'] = (bool(early_stopped), 'Early stop triggered')
|
|
2352
|
+
|
|
2353
|
+
if isinstance(x_final, np.ndarray):
|
|
2354
|
+
if x_final.ndim == 2:
|
|
2355
|
+
hdr0['MF_SHAPE'] = (f"{x_final.shape[0]}x{x_final.shape[1]}", 'Saved as 2D image (HxW)')
|
|
2356
|
+
elif x_final.ndim == 3:
|
|
2357
|
+
C, H, W = x_final.shape
|
|
2358
|
+
hdr0['MF_SHAPE'] = (f"{C}x{H}x{W}", 'Saved as 3D cube (CxHxW)')
|
|
2359
|
+
|
|
2360
|
+
save_path = _sr_out_path(out_path, super_res_factor)
|
|
2361
|
+
safe_out_path = _nonclobber_path(str(save_path))
|
|
2362
|
+
if safe_out_path != str(save_path):
|
|
2363
|
+
status_cb(f"Output exists — saving as: {safe_out_path}")
|
|
2364
|
+
fits.PrimaryHDU(data=x_final, header=hdr0).writeto(safe_out_path, overwrite=False)
|
|
2365
|
+
|
|
2366
|
+
status_cb(f"✅ MFDeconv saved: {safe_out_path} (iters used: {used_iters}{', early stop' if early_stopped else ''})")
|
|
2367
|
+
_emit_pct(1.00, "done")
|
|
2368
|
+
_process_gui_events_safely()
|
|
2369
|
+
|
|
2370
|
+
try:
|
|
2371
|
+
if use_torch:
|
|
2372
|
+
try: del num, den
|
|
2373
|
+
except Exception as e:
|
|
2374
|
+
import logging
|
|
2375
|
+
logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
|
|
2376
|
+
try: del psf_t, psfT_t
|
|
2377
|
+
except Exception as e:
|
|
2378
|
+
import logging
|
|
2379
|
+
logging.debug(f"Exception suppressed: {type(e).__name__}: {e}")
|
|
2380
|
+
_free_torch_memory()
|
|
2381
|
+
except Exception:
|
|
2382
|
+
pass
|
|
2383
|
+
|
|
2384
|
+
return safe_out_path
|
|
2385
|
+
|
|
2386
|
+
|
|
2387
|
+
|
|
2388
|
+
# -----------------------------
|
|
2389
|
+
# Worker
|
|
2390
|
+
# -----------------------------
|
|
2391
|
+
|
|
2392
|
+
class MultiFrameDeconvWorkerSport(QObject):
|
|
2393
|
+
progress = pyqtSignal(str)
|
|
2394
|
+
finished = pyqtSignal(bool, str, str) # success, message, out_path
|
|
2395
|
+
|
|
2396
|
+
def __init__(self, parent, aligned_paths, output_path, iters, kappa, color_mode,
|
|
2397
|
+
huber_delta, min_iters, use_star_masks=False, use_variance_maps=False, rho="huber",
|
|
2398
|
+
star_mask_cfg: dict | None = None, varmap_cfg: dict | None = None,
|
|
2399
|
+
save_intermediate: bool = False,
|
|
2400
|
+
seed_mode: str = "robust",
|
|
2401
|
+
# NEW SR params
|
|
2402
|
+
super_res_factor: int = 1,
|
|
2403
|
+
sr_sigma: float = 1.1,
|
|
2404
|
+
sr_psf_opt_iters: int = 250,
|
|
2405
|
+
sr_psf_opt_lr: float = 0.1,
|
|
2406
|
+
star_mask_ref_path: str | None = None):
|
|
2407
|
+
super().__init__(parent)
|
|
2408
|
+
self.aligned_paths = aligned_paths
|
|
2409
|
+
self.output_path = output_path
|
|
2410
|
+
self.iters = iters
|
|
2411
|
+
self.kappa = kappa
|
|
2412
|
+
self.color_mode = color_mode
|
|
2413
|
+
self.huber_delta = huber_delta
|
|
2414
|
+
self.min_iters = min_iters # NEW
|
|
2415
|
+
self.star_mask_cfg = star_mask_cfg or {}
|
|
2416
|
+
self.varmap_cfg = varmap_cfg or {}
|
|
2417
|
+
self.use_star_masks = use_star_masks
|
|
2418
|
+
self.use_variance_maps = use_variance_maps
|
|
2419
|
+
self.rho = rho
|
|
2420
|
+
self.save_intermediate = save_intermediate
|
|
2421
|
+
self.super_res_factor = int(super_res_factor)
|
|
2422
|
+
self.sr_sigma = float(sr_sigma)
|
|
2423
|
+
self.sr_psf_opt_iters = int(sr_psf_opt_iters)
|
|
2424
|
+
self.sr_psf_opt_lr = float(sr_psf_opt_lr)
|
|
2425
|
+
self.star_mask_ref_path = star_mask_ref_path
|
|
2426
|
+
self.seed_mode = seed_mode
|
|
2427
|
+
|
|
2428
|
+
|
|
2429
|
+
def _log(self, s): self.progress.emit(s)
|
|
2430
|
+
|
|
2431
|
+
def run(self):
|
|
2432
|
+
try:
|
|
2433
|
+
out = multiframe_deconv(
|
|
2434
|
+
self.aligned_paths,
|
|
2435
|
+
self.output_path,
|
|
2436
|
+
iters=self.iters,
|
|
2437
|
+
kappa=self.kappa,
|
|
2438
|
+
color_mode=self.color_mode,
|
|
2439
|
+
seed_mode=self.seed_mode,
|
|
2440
|
+
huber_delta=self.huber_delta,
|
|
2441
|
+
use_star_masks=self.use_star_masks,
|
|
2442
|
+
use_variance_maps=self.use_variance_maps,
|
|
2443
|
+
rho=self.rho,
|
|
2444
|
+
min_iters=self.min_iters,
|
|
2445
|
+
status_cb=self._log,
|
|
2446
|
+
star_mask_cfg=self.star_mask_cfg,
|
|
2447
|
+
varmap_cfg=self.varmap_cfg,
|
|
2448
|
+
save_intermediate=self.save_intermediate,
|
|
2449
|
+
# NEW SR forwards
|
|
2450
|
+
super_res_factor=self.super_res_factor,
|
|
2451
|
+
sr_sigma=self.sr_sigma,
|
|
2452
|
+
sr_psf_opt_iters=self.sr_psf_opt_iters,
|
|
2453
|
+
sr_psf_opt_lr=self.sr_psf_opt_lr,
|
|
2454
|
+
star_mask_ref_path=self.star_mask_ref_path,
|
|
2455
|
+
)
|
|
2456
|
+
self.finished.emit(True, "MF deconvolution complete.", out)
|
|
2457
|
+
_process_gui_events_safely()
|
|
2458
|
+
except Exception as e:
|
|
2459
|
+
self.finished.emit(False, f"MF deconvolution failed: {e}", "")
|