spikezoo 0.1.2__tar.gz → 0.2__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- spikezoo-0.2/MANIFEST.in +5 -0
- spikezoo-0.2/PKG-INFO +163 -0
- spikezoo-0.2/README.md +126 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/setup.py +4 -4
- spikezoo-0.2/spikezoo/__init__.py +13 -0
- spikezoo-0.2/spikezoo/archs/__pycache__/__init__.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/base/__pycache__/nets.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/base/nets.py +34 -0
- spikezoo-0.2/spikezoo/archs/bsf/README.md +92 -0
- spikezoo-0.2/spikezoo/archs/bsf/datasets/datasets.py +328 -0
- spikezoo-0.2/spikezoo/archs/bsf/datasets/ds_utils.py +64 -0
- spikezoo-0.2/spikezoo/archs/bsf/main.py +398 -0
- spikezoo-0.2/spikezoo/archs/bsf/metrics/psnr.py +22 -0
- spikezoo-0.2/spikezoo/archs/bsf/metrics/ssim.py +54 -0
- spikezoo-0.2/spikezoo/archs/bsf/models/bsf/__pycache__/align.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/bsf/models/bsf/__pycache__/bsf.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/bsf/models/bsf/__pycache__/rep.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/bsf/models/bsf/align.py +154 -0
- spikezoo-0.2/spikezoo/archs/bsf/models/bsf/bsf.py +105 -0
- spikezoo-0.2/spikezoo/archs/bsf/models/bsf/dsft_convert.py +96 -0
- spikezoo-0.2/spikezoo/archs/bsf/models/bsf/rep.py +44 -0
- spikezoo-0.2/spikezoo/archs/bsf/models/get_model.py +7 -0
- spikezoo-0.2/spikezoo/archs/bsf/prepare_data/DSFT.py +62 -0
- spikezoo-0.2/spikezoo/archs/bsf/prepare_data/crop_dataset_train.py +135 -0
- spikezoo-0.2/spikezoo/archs/bsf/prepare_data/crop_dataset_val.py +139 -0
- spikezoo-0.2/spikezoo/archs/bsf/prepare_data/crop_train.sh +4 -0
- spikezoo-0.2/spikezoo/archs/bsf/prepare_data/crop_val.sh +4 -0
- spikezoo-0.2/spikezoo/archs/bsf/prepare_data/io_utils.py +64 -0
- spikezoo-0.2/spikezoo/archs/bsf/requirements.txt +9 -0
- spikezoo-0.2/spikezoo/archs/bsf/test.py +16 -0
- spikezoo-0.2/spikezoo/archs/bsf/utils.py +154 -0
- spikezoo-0.2/spikezoo/archs/spikeclip/__pycache__/nets.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/spikeclip/nets.py +40 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/CheckPoints/readme +1 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/DataProcess/DataExtactor.py +60 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/DataProcess/DataLoader.py +115 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/DataProcess/LoadSpike.py +39 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/EvalResults/readme +1 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/LICENSE +21 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/Metrics/Metrics.py +50 -0
- {spikezoo-0.1.2/spikezoo/archs → spikezoo-0.2/spikezoo/archs/spikeformer/Metrics}/__init__.py +0 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/Model/Loss.py +89 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/Model/SpikeFormer.py +230 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/Model/__init__.py +0 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/Model/__pycache__/SpikeFormer.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/Model/__pycache__/__init__.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/README.md +30 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/evaluate.py +87 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/recon_real_data.py +97 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/requirements.yml +95 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/train.py +173 -0
- spikezoo-0.2/spikezoo/archs/spikeformer/utils.py +22 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/.github/workflows/pylint.yml +23 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/.gitignore +150 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/DCNv2.py +135 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/__pycache__/DCNv2.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/__pycache__/align_arch.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/__pycache__/nets.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/align_arch.py +159 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/dataset.py +144 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/nets.py +230 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/readme.md +86 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/test_gen_imgseq.py +118 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/train.py +189 -0
- spikezoo-0.2/spikezoo/archs/spk2imgnet/utils.py +64 -0
- spikezoo-0.2/spikezoo/archs/ssir/README.md +87 -0
- spikezoo-0.2/spikezoo/archs/ssir/configs/SSIR.yml +37 -0
- spikezoo-0.2/spikezoo/archs/ssir/configs/yml_parser.py +78 -0
- spikezoo-0.2/spikezoo/archs/ssir/datasets/dataset_sreds.py +170 -0
- spikezoo-0.2/spikezoo/archs/ssir/datasets/ds_utils.py +66 -0
- spikezoo-0.2/spikezoo/archs/ssir/losses.py +21 -0
- spikezoo-0.2/spikezoo/archs/ssir/main.py +326 -0
- spikezoo-0.2/spikezoo/archs/ssir/metrics/psnr.py +22 -0
- spikezoo-0.2/spikezoo/archs/ssir/metrics/ssim.py +54 -0
- spikezoo-0.2/spikezoo/archs/ssir/models/Vgg19.py +42 -0
- spikezoo-0.2/spikezoo/archs/ssir/models/__pycache__/layers.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/ssir/models/__pycache__/networks.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/ssir/models/layers.py +110 -0
- spikezoo-0.2/spikezoo/archs/ssir/models/networks.py +61 -0
- spikezoo-0.2/spikezoo/archs/ssir/requirements.txt +8 -0
- spikezoo-0.2/spikezoo/archs/ssir/shells/eval_SREDS.sh +6 -0
- spikezoo-0.2/spikezoo/archs/ssir/shells/train_SSIR.sh +12 -0
- spikezoo-0.2/spikezoo/archs/ssir/test.py +3 -0
- spikezoo-0.2/spikezoo/archs/ssir/utils.py +154 -0
- spikezoo-0.2/spikezoo/archs/ssml/__pycache__/cbam.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/ssml/__pycache__/model.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/ssml/cbam.py +224 -0
- spikezoo-0.2/spikezoo/archs/ssml/model.py +290 -0
- spikezoo-0.2/spikezoo/archs/ssml/res.png +0 -0
- spikezoo-0.2/spikezoo/archs/ssml/test.py +67 -0
- spikezoo-0.2/spikezoo/archs/stir/.git-credentials +0 -0
- spikezoo-0.2/spikezoo/archs/stir/README.md +65 -0
- spikezoo-0.2/spikezoo/archs/stir/ckpt_outputs/Descriptions.txt +1 -0
- spikezoo-0.2/spikezoo/archs/stir/configs/STIR.yml +37 -0
- spikezoo-0.2/spikezoo/archs/stir/configs/utils.py +155 -0
- spikezoo-0.2/spikezoo/archs/stir/configs/yml_parser.py +78 -0
- spikezoo-0.2/spikezoo/archs/stir/datasets/dataset_sreds.py +180 -0
- spikezoo-0.2/spikezoo/archs/stir/datasets/ds_utils.py +66 -0
- spikezoo-0.2/spikezoo/archs/stir/eval_SREDS.sh +5 -0
- spikezoo-0.2/spikezoo/archs/stir/main.py +397 -0
- spikezoo-0.2/spikezoo/archs/stir/metrics/losses.py +219 -0
- spikezoo-0.2/spikezoo/archs/stir/metrics/psnr.py +22 -0
- spikezoo-0.2/spikezoo/archs/stir/metrics/ssim.py +54 -0
- spikezoo-0.2/spikezoo/archs/stir/models/Vgg19.py +42 -0
- spikezoo-0.2/spikezoo/archs/stir/models/__pycache__/networks_STIR.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/stir/models/__pycache__/submodules.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/stir/models/__pycache__/transformer_new.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/stir/models/networks_STIR.py +361 -0
- spikezoo-0.2/spikezoo/archs/stir/models/submodules.py +86 -0
- spikezoo-0.2/spikezoo/archs/stir/models/transformer_new.py +151 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/__init__.py +0 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/convertions.py +721 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/disp_netS.py +133 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/flow_utils.py +167 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/generic_train_test.py +76 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/geometry.py +458 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/image_proc.py +183 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/linalg.py +40 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/losses.py +198 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/metrics.py +51 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/model_base.py +53 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/net_basics.py +100 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/resnet.py +333 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/transforms.py +123 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/build/lib/package_core/utils.py +72 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/dist/package_core-0.0.0-py3.9.egg +0 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/__init__.py +0 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/__pycache__/__init__.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/__pycache__/net_basics.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/convertions.py +721 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/disp_netS.py +133 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/flow_utils.py +167 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/generic_train_test.py +76 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/geometry.py +458 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/image_proc.py +183 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/linalg.py +40 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/losses.py +198 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/metrics.py +51 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/model_base.py +53 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/net_basics.py +100 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/resnet.py +333 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/transforms.py +123 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core/utils.py +72 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core.egg-info/PKG-INFO +3 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core.egg-info/SOURCES.txt +20 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core.egg-info/dependency_links.txt +1 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/package_core.egg-info/top_level.txt +1 -0
- spikezoo-0.2/spikezoo/archs/stir/package_core/setup.py +5 -0
- spikezoo-0.2/spikezoo/archs/stir/requirements.txt +12 -0
- spikezoo-0.2/spikezoo/archs/stir/train_STIR.sh +9 -0
- spikezoo-0.2/spikezoo/archs/tfi/__pycache__/nets.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/tfi/nets.py +43 -0
- spikezoo-0.2/spikezoo/archs/tfp/__pycache__/nets.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/tfp/nets.py +13 -0
- spikezoo-0.2/spikezoo/archs/wgse/README.md +64 -0
- spikezoo-0.2/spikezoo/archs/wgse/__pycache__/dwtnets.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/wgse/__pycache__/submodules.cpython-39.pyc +0 -0
- spikezoo-0.2/spikezoo/archs/wgse/dataset.py +59 -0
- spikezoo-0.2/spikezoo/archs/wgse/demo.png +0 -0
- spikezoo-0.2/spikezoo/archs/wgse/demo.py +83 -0
- spikezoo-0.2/spikezoo/archs/wgse/dwtnets.py +145 -0
- spikezoo-0.2/spikezoo/archs/wgse/eval.py +133 -0
- spikezoo-0.2/spikezoo/archs/wgse/logs/WGSE-Dwt1dNet-db8-5-ks3/log.txt +11 -0
- spikezoo-0.2/spikezoo/archs/wgse/submodules.py +68 -0
- spikezoo-0.2/spikezoo/archs/wgse/train.py +261 -0
- spikezoo-0.2/spikezoo/archs/wgse/transform.py +139 -0
- spikezoo-0.2/spikezoo/archs/wgse/utils.py +128 -0
- spikezoo-0.2/spikezoo/archs/wgse/weights/demo.png +0 -0
- spikezoo-0.2/spikezoo/data/base/test/gt/200_part1_key_id151.png +0 -0
- spikezoo-0.2/spikezoo/data/base/test/gt/200_part3_key_id151.png +0 -0
- spikezoo-0.2/spikezoo/data/base/test/gt/203_part1_key_id151.png +0 -0
- spikezoo-0.2/spikezoo/data/base/test/spike/200_part1_key_id151.dat +0 -0
- spikezoo-0.2/spikezoo/data/base/test/spike/200_part3_key_id151.dat +0 -0
- spikezoo-0.2/spikezoo/data/base/test/spike/203_part1_key_id151.dat +0 -0
- spikezoo-0.2/spikezoo/data/base/train/gt/203_part2_key_id151.png +0 -0
- spikezoo-0.2/spikezoo/data/base/train/gt/203_part3_key_id151.png +0 -0
- spikezoo-0.2/spikezoo/data/base/train/gt/203_part4_key_id151.png +0 -0
- spikezoo-0.2/spikezoo/data/base/train/spike/203_part2_key_id151.dat +0 -0
- spikezoo-0.2/spikezoo/data/base/train/spike/203_part3_key_id151.dat +0 -0
- spikezoo-0.2/spikezoo/data/base/train/spike/203_part4_key_id151.dat +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/datasets/base_dataset.py +2 -3
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/metrics/__init__.py +1 -1
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/base_model.py +1 -3
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/pipeline/base_pipeline.py +7 -5
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/pipeline/train_pipeline.py +1 -1
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/utils/other_utils.py +16 -6
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/utils/spike_utils.py +33 -29
- spikezoo-0.2/spikezoo/utils/vidar_loader.cpython-39-x86_64-linux-gnu.so +0 -0
- spikezoo-0.2/spikezoo.egg-info/PKG-INFO +163 -0
- spikezoo-0.2/spikezoo.egg-info/SOURCES.txt +216 -0
- spikezoo-0.1.2/MANIFEST.in +0 -1
- spikezoo-0.1.2/PKG-INFO +0 -39
- spikezoo-0.1.2/README.md +0 -2
- spikezoo-0.1.2/spikezoo/models/spcsnet_model.py +0 -19
- spikezoo-0.1.2/spikezoo.egg-info/PKG-INFO +0 -39
- spikezoo-0.1.2/spikezoo.egg-info/SOURCES.txt +0 -41
- {spikezoo-0.1.2 → spikezoo-0.2}/LICENSE.txt +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/requirements.txt +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/setup.cfg +0 -0
- {spikezoo-0.1.2/spikezoo → spikezoo-0.2/spikezoo/archs}/__init__.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/datasets/__init__.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/datasets/realworld_dataset.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/datasets/reds_small_dataset.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/datasets/szdata_dataset.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/datasets/uhsr_dataset.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/__init__.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/bsf_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/spikeclip_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/spikeformer_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/spk2imgnet_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/ssir_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/ssml_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/stir_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/tfi_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/tfp_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/models/wgse_model.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/pipeline/__init__.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/pipeline/ensemble_pipeline.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/utils/__init__.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/utils/data_utils.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo/utils/img_utils.py +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo.egg-info/dependency_links.txt +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo.egg-info/requires.txt +0 -0
- {spikezoo-0.1.2 → spikezoo-0.2}/spikezoo.egg-info/top_level.txt +0 -0
spikezoo-0.2/MANIFEST.in
ADDED
spikezoo-0.2/PKG-INFO
ADDED
@@ -0,0 +1,163 @@
|
|
1
|
+
Metadata-Version: 2.2
|
2
|
+
Name: spikezoo
|
3
|
+
Version: 0.2
|
4
|
+
Summary: A deep learning toolbox for spike-to-image models.
|
5
|
+
Home-page: https://github.com/chenkang455/Spike-Zoo
|
6
|
+
Author: Kang Chen
|
7
|
+
Author-email: mrchenkang@stu.pku.edu.cn
|
8
|
+
Requires-Python: >=3.7
|
9
|
+
Description-Content-Type: text/markdown
|
10
|
+
License-File: LICENSE.txt
|
11
|
+
Requires-Dist: torch
|
12
|
+
Requires-Dist: requests
|
13
|
+
Requires-Dist: numpy
|
14
|
+
Requires-Dist: tqdm
|
15
|
+
Requires-Dist: scikit-image
|
16
|
+
Requires-Dist: lpips
|
17
|
+
Requires-Dist: pyiqa
|
18
|
+
Requires-Dist: opencv-python
|
19
|
+
Requires-Dist: thop
|
20
|
+
Requires-Dist: pytorch-wavelets
|
21
|
+
Requires-Dist: pytz
|
22
|
+
Requires-Dist: PyWavelets
|
23
|
+
Requires-Dist: pandas
|
24
|
+
Requires-Dist: pillow
|
25
|
+
Requires-Dist: scikit-learn
|
26
|
+
Requires-Dist: scipy
|
27
|
+
Requires-Dist: spikingjelly
|
28
|
+
Requires-Dist: setuptools
|
29
|
+
Dynamic: author
|
30
|
+
Dynamic: author-email
|
31
|
+
Dynamic: description
|
32
|
+
Dynamic: description-content-type
|
33
|
+
Dynamic: home-page
|
34
|
+
Dynamic: requires-dist
|
35
|
+
Dynamic: requires-python
|
36
|
+
Dynamic: summary
|
37
|
+
|
38
|
+
<h2 align="center">
|
39
|
+
<a href="">Spike-Zoo: A Toolbox for Spike-to-Image Reconstruction
|
40
|
+
</a>
|
41
|
+
</h2>
|
42
|
+
|
43
|
+
## 📖 About
|
44
|
+
⚡Spike-Zoo is the go-to library for state-of-the-art pretrained **spike-to-image** models for reconstructing the image from the given spike stream. Whether you're looking for a **simple inference** solution or **training** your own spike-to-image models, ⚡Spike-Zoo is a modular toolbox that supports both.
|
45
|
+
|
46
|
+
If Spike-Zoo helps your research or work, please help to ⭐ this repo or recommend it to your friends. Thanks😊
|
47
|
+
|
48
|
+
## 🚩 Updates/Changelog
|
49
|
+
* **25-02-02:** Release the `Spike-Zoo v0.2` code, which supports more methods, provide more usages.
|
50
|
+
* **24-08-26:** Update the `SpikeFormer` and `RSIR` methods, the `UHSR` dataset and the `piqe` non-reference metric.
|
51
|
+
|
52
|
+
* **24-07-19:** Release the `Spike-Zoo v0.1` base code.
|
53
|
+
|
54
|
+
## 🍾 Quick Start
|
55
|
+
### 1. Installation
|
56
|
+
For users focused on **utilizing pretrained models for spike-to-image conversion**, we recommend installing SpikeZoo using one of the following methods:
|
57
|
+
|
58
|
+
* Install the last stable version from PyPI:
|
59
|
+
```
|
60
|
+
pip install spikezoo
|
61
|
+
```
|
62
|
+
* Install the latest developing version from the source code:
|
63
|
+
```
|
64
|
+
git clone https://github.com/chenkang455/Spike-Zoo
|
65
|
+
cd Spike-Zoo
|
66
|
+
python setup.py install
|
67
|
+
```
|
68
|
+
|
69
|
+
For users interested in **training their own spike-to-image model based on our framework**, we recommend cloning the repository and modifying the related code directly.
|
70
|
+
|
71
|
+
### 2. Inference
|
72
|
+
Reconstructing images from the spike input is super easy with Spike-Zoo. Try the following code of the single model:
|
73
|
+
``` python
|
74
|
+
from spikezoo.pipeline import Pipeline, PipelineConfig
|
75
|
+
pipeline = Pipeline(
|
76
|
+
cfg = PipelineConfig(save_folder="results"),
|
77
|
+
model_cfg="spk2imgnet",
|
78
|
+
dataset_cfg="base"
|
79
|
+
)
|
80
|
+
```
|
81
|
+
You can also run multiple models at once by changing the pipeline:
|
82
|
+
``` python
|
83
|
+
from spikezoo.pipeline import EnsemblePipeline, EnsemblePipelineConfig
|
84
|
+
pipeline = EnsemblePipeline(
|
85
|
+
cfg = EnsemblePipelineConfig(save_folder="results"),
|
86
|
+
model_cfg_list=['tfp','tfi', 'spk2imgnet', 'wgse', 'ssml', 'bsf', 'stir', 'spikeclip','spikeformer'],
|
87
|
+
dataset_cfg="base"
|
88
|
+
)
|
89
|
+
```
|
90
|
+
* Having established the pipeline, run the following code to obtain the metric and save the reconstructed image from the given spike:
|
91
|
+
``` python
|
92
|
+
# 1. spike-to-image from the given dataset
|
93
|
+
pipeline.spk2img_from_dataset(idx = 0)
|
94
|
+
|
95
|
+
# 2. spike-to-image from the given .dat file
|
96
|
+
pipeline.spk2img_from_file(file_path = 'data/scissor.dat',width = 400,height=250)
|
97
|
+
|
98
|
+
# 3. spike-to-image from the given spike
|
99
|
+
import spikezoo as sz
|
100
|
+
spike = sz.load_vidar_dat("data/scissor.dat",width = 400,height = 250,version='cpp')
|
101
|
+
pipeline.spk2img_from_spk(spike)
|
102
|
+
```
|
103
|
+
For detailed usage, welcome check [test_single.ipynb](examples/test_single.ipynb) and [test_multi.ipynb](examples/test_multi.ipynb) 😊😊😊.
|
104
|
+
|
105
|
+
* Save all images of the given dataset.
|
106
|
+
``` python
|
107
|
+
pipeline.save_imgs_from_dataset()
|
108
|
+
```
|
109
|
+
|
110
|
+
* Calculate the metrics for the specified dataset.
|
111
|
+
``` python
|
112
|
+
pipeline.cal_metrics()
|
113
|
+
```
|
114
|
+
|
115
|
+
* Calculate the parameters (params,flops,latency) based on the established pipeline.
|
116
|
+
``` python
|
117
|
+
pipeline.cal_params()
|
118
|
+
```
|
119
|
+
|
120
|
+
### 3. Training
|
121
|
+
We provide a user-friendly code for training our provided base model (modified from the `SpikeCLIP`) for the classic `REDS` dataset introduced in `Spk2ImgNet`:
|
122
|
+
``` python
|
123
|
+
from spikezoo.pipeline import TrainPipelineConfig, TrainPipeline
|
124
|
+
from spikezoo.datasets.reds_small_dataset import REDS_Small_Config
|
125
|
+
pipeline = TrainPipeline(
|
126
|
+
cfg=TrainPipelineConfig(save_folder="results", epochs = 10),
|
127
|
+
dataset_cfg=REDS_Small_Config(root_dir = "path/REDS_Small"),
|
128
|
+
model_cfg="base",
|
129
|
+
)
|
130
|
+
pipeline.train()
|
131
|
+
```
|
132
|
+
We finish the training with one 4090 GPU in `2 minutes`, achieving `34.7dB` in PSNR and `0.94` in SSIM.
|
133
|
+
|
134
|
+
> 🌟 We encourage users to develop their models using our framework, with the tutorial being released soon.
|
135
|
+
|
136
|
+
### 4. Others
|
137
|
+
We provide a faster `load_vidar_dat` function implemented with `cpp` (by @zeal-ye):
|
138
|
+
``` bash
|
139
|
+
import spikezoo as sz
|
140
|
+
spike = sz.load_vidar_dat("data/scissor.dat",width = 400,height = 250,version='cpp')
|
141
|
+
```
|
142
|
+
🚀 Results on [examples/test_load_dat.py](examples/test_load_dat.py) show that the `cpp` version is more than 10 times faster than the `python` version.
|
143
|
+
|
144
|
+
|
145
|
+
## 📅 TODO
|
146
|
+
- [ ] Provide the tutorials.
|
147
|
+
- [ ] Support more training settings.
|
148
|
+
- [ ] Support more spike-based image reconstruction methods and datasets.
|
149
|
+
- [ ] Support the overall pipeline for spike simulation.
|
150
|
+
|
151
|
+
## ✨ Acknowledgment
|
152
|
+
Our code is built on the open-source projects of [SpikeCV](https://spikecv.github.io/), [IQA-Pytorch](https://github.com/chaofengc/IQA-PyTorch), [BasicSR](https://github.com/XPixelGroup/BasicSR) and [NeRFStudio](https://github.com/nerfstudio-project/nerfstudio).We appreciate the effort of the contributors to these repositories. Thanks for @ruizhao26 and @Leozhangjiyuan for their help in building this project.
|
153
|
+
|
154
|
+
## 📑 Citation
|
155
|
+
If you find our codes helpful to your research, please consider to use the following citation:
|
156
|
+
```
|
157
|
+
@misc{spikezoo,
|
158
|
+
title={{Spike-Zoo}: Spike-Zoo: A Toolbox for Spike-to-Image Reconstruction},
|
159
|
+
author={Kang Chen and Zhiyuan Ye},
|
160
|
+
year={2025},
|
161
|
+
howpublished = "[Online]. Available: \url{https://github.com/chenkang455/Spike-Zoo}"
|
162
|
+
}
|
163
|
+
```
|
spikezoo-0.2/README.md
ADDED
@@ -0,0 +1,126 @@
|
|
1
|
+
<h2 align="center">
|
2
|
+
<a href="">Spike-Zoo: A Toolbox for Spike-to-Image Reconstruction
|
3
|
+
</a>
|
4
|
+
</h2>
|
5
|
+
|
6
|
+
## 📖 About
|
7
|
+
⚡Spike-Zoo is the go-to library for state-of-the-art pretrained **spike-to-image** models for reconstructing the image from the given spike stream. Whether you're looking for a **simple inference** solution or **training** your own spike-to-image models, ⚡Spike-Zoo is a modular toolbox that supports both.
|
8
|
+
|
9
|
+
If Spike-Zoo helps your research or work, please help to ⭐ this repo or recommend it to your friends. Thanks😊
|
10
|
+
|
11
|
+
## 🚩 Updates/Changelog
|
12
|
+
* **25-02-02:** Release the `Spike-Zoo v0.2` code, which supports more methods, provide more usages.
|
13
|
+
* **24-08-26:** Update the `SpikeFormer` and `RSIR` methods, the `UHSR` dataset and the `piqe` non-reference metric.
|
14
|
+
|
15
|
+
* **24-07-19:** Release the `Spike-Zoo v0.1` base code.
|
16
|
+
|
17
|
+
## 🍾 Quick Start
|
18
|
+
### 1. Installation
|
19
|
+
For users focused on **utilizing pretrained models for spike-to-image conversion**, we recommend installing SpikeZoo using one of the following methods:
|
20
|
+
|
21
|
+
* Install the last stable version from PyPI:
|
22
|
+
```
|
23
|
+
pip install spikezoo
|
24
|
+
```
|
25
|
+
* Install the latest developing version from the source code:
|
26
|
+
```
|
27
|
+
git clone https://github.com/chenkang455/Spike-Zoo
|
28
|
+
cd Spike-Zoo
|
29
|
+
python setup.py install
|
30
|
+
```
|
31
|
+
|
32
|
+
For users interested in **training their own spike-to-image model based on our framework**, we recommend cloning the repository and modifying the related code directly.
|
33
|
+
|
34
|
+
### 2. Inference
|
35
|
+
Reconstructing images from the spike input is super easy with Spike-Zoo. Try the following code of the single model:
|
36
|
+
``` python
|
37
|
+
from spikezoo.pipeline import Pipeline, PipelineConfig
|
38
|
+
pipeline = Pipeline(
|
39
|
+
cfg = PipelineConfig(save_folder="results"),
|
40
|
+
model_cfg="spk2imgnet",
|
41
|
+
dataset_cfg="base"
|
42
|
+
)
|
43
|
+
```
|
44
|
+
You can also run multiple models at once by changing the pipeline:
|
45
|
+
``` python
|
46
|
+
from spikezoo.pipeline import EnsemblePipeline, EnsemblePipelineConfig
|
47
|
+
pipeline = EnsemblePipeline(
|
48
|
+
cfg = EnsemblePipelineConfig(save_folder="results"),
|
49
|
+
model_cfg_list=['tfp','tfi', 'spk2imgnet', 'wgse', 'ssml', 'bsf', 'stir', 'spikeclip','spikeformer'],
|
50
|
+
dataset_cfg="base"
|
51
|
+
)
|
52
|
+
```
|
53
|
+
* Having established the pipeline, run the following code to obtain the metric and save the reconstructed image from the given spike:
|
54
|
+
``` python
|
55
|
+
# 1. spike-to-image from the given dataset
|
56
|
+
pipeline.spk2img_from_dataset(idx = 0)
|
57
|
+
|
58
|
+
# 2. spike-to-image from the given .dat file
|
59
|
+
pipeline.spk2img_from_file(file_path = 'data/scissor.dat',width = 400,height=250)
|
60
|
+
|
61
|
+
# 3. spike-to-image from the given spike
|
62
|
+
import spikezoo as sz
|
63
|
+
spike = sz.load_vidar_dat("data/scissor.dat",width = 400,height = 250,version='cpp')
|
64
|
+
pipeline.spk2img_from_spk(spike)
|
65
|
+
```
|
66
|
+
For detailed usage, welcome check [test_single.ipynb](examples/test_single.ipynb) and [test_multi.ipynb](examples/test_multi.ipynb) 😊😊😊.
|
67
|
+
|
68
|
+
* Save all images of the given dataset.
|
69
|
+
``` python
|
70
|
+
pipeline.save_imgs_from_dataset()
|
71
|
+
```
|
72
|
+
|
73
|
+
* Calculate the metrics for the specified dataset.
|
74
|
+
``` python
|
75
|
+
pipeline.cal_metrics()
|
76
|
+
```
|
77
|
+
|
78
|
+
* Calculate the parameters (params,flops,latency) based on the established pipeline.
|
79
|
+
``` python
|
80
|
+
pipeline.cal_params()
|
81
|
+
```
|
82
|
+
|
83
|
+
### 3. Training
|
84
|
+
We provide a user-friendly code for training our provided base model (modified from the `SpikeCLIP`) for the classic `REDS` dataset introduced in `Spk2ImgNet`:
|
85
|
+
``` python
|
86
|
+
from spikezoo.pipeline import TrainPipelineConfig, TrainPipeline
|
87
|
+
from spikezoo.datasets.reds_small_dataset import REDS_Small_Config
|
88
|
+
pipeline = TrainPipeline(
|
89
|
+
cfg=TrainPipelineConfig(save_folder="results", epochs = 10),
|
90
|
+
dataset_cfg=REDS_Small_Config(root_dir = "path/REDS_Small"),
|
91
|
+
model_cfg="base",
|
92
|
+
)
|
93
|
+
pipeline.train()
|
94
|
+
```
|
95
|
+
We finish the training with one 4090 GPU in `2 minutes`, achieving `34.7dB` in PSNR and `0.94` in SSIM.
|
96
|
+
|
97
|
+
> 🌟 We encourage users to develop their models using our framework, with the tutorial being released soon.
|
98
|
+
|
99
|
+
### 4. Others
|
100
|
+
We provide a faster `load_vidar_dat` function implemented with `cpp` (by @zeal-ye):
|
101
|
+
``` bash
|
102
|
+
import spikezoo as sz
|
103
|
+
spike = sz.load_vidar_dat("data/scissor.dat",width = 400,height = 250,version='cpp')
|
104
|
+
```
|
105
|
+
🚀 Results on [examples/test_load_dat.py](examples/test_load_dat.py) show that the `cpp` version is more than 10 times faster than the `python` version.
|
106
|
+
|
107
|
+
|
108
|
+
## 📅 TODO
|
109
|
+
- [ ] Provide the tutorials.
|
110
|
+
- [ ] Support more training settings.
|
111
|
+
- [ ] Support more spike-based image reconstruction methods and datasets.
|
112
|
+
- [ ] Support the overall pipeline for spike simulation.
|
113
|
+
|
114
|
+
## ✨ Acknowledgment
|
115
|
+
Our code is built on the open-source projects of [SpikeCV](https://spikecv.github.io/), [IQA-Pytorch](https://github.com/chaofengc/IQA-PyTorch), [BasicSR](https://github.com/XPixelGroup/BasicSR) and [NeRFStudio](https://github.com/nerfstudio-project/nerfstudio).We appreciate the effort of the contributors to these repositories. Thanks for @ruizhao26 and @Leozhangjiyuan for their help in building this project.
|
116
|
+
|
117
|
+
## 📑 Citation
|
118
|
+
If you find our codes helpful to your research, please consider to use the following citation:
|
119
|
+
```
|
120
|
+
@misc{spikezoo,
|
121
|
+
title={{Spike-Zoo}: Spike-Zoo: A Toolbox for Spike-to-Image Reconstruction},
|
122
|
+
author={Kang Chen and Zhiyuan Ye},
|
123
|
+
year={2025},
|
124
|
+
howpublished = "[Online]. Available: \url{https://github.com/chenkang455/Spike-Zoo}"
|
125
|
+
}
|
126
|
+
```
|
@@ -1,16 +1,16 @@
|
|
1
1
|
from setuptools import find_packages
|
2
2
|
from setuptools import setup
|
3
3
|
|
4
|
-
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
4
|
+
with open("./requirements.txt", "r", encoding="utf-8") as fh:
|
5
5
|
install_requires = fh.read()
|
6
6
|
|
7
|
-
with open("README.md", "r", encoding="utf-8") as fh:
|
7
|
+
with open("./README.md", "r", encoding="utf-8") as fh:
|
8
8
|
long_description = fh.read()
|
9
9
|
|
10
10
|
setup(
|
11
11
|
install_requires=install_requires,
|
12
12
|
name="spikezoo",
|
13
|
-
version="0.
|
13
|
+
version="0.2",
|
14
14
|
author="Kang Chen",
|
15
15
|
author_email="mrchenkang@stu.pku.edu.cn",
|
16
16
|
description="A deep learning toolbox for spike-to-image models.",
|
@@ -19,5 +19,5 @@ setup(
|
|
19
19
|
url="https://github.com/chenkang455/Spike-Zoo",
|
20
20
|
packages=find_packages(),
|
21
21
|
python_requires='>=3.7',
|
22
|
-
include_package_data=
|
22
|
+
include_package_data=True
|
23
23
|
)
|
@@ -0,0 +1,13 @@
|
|
1
|
+
from .utils.spike_utils import load_vidar_dat
|
2
|
+
from .models import model_list
|
3
|
+
from .datasets import dataset_list
|
4
|
+
from .metrics import metric_all_names
|
5
|
+
|
6
|
+
def get_datasets():
|
7
|
+
return dataset_list
|
8
|
+
|
9
|
+
def get_models():
|
10
|
+
return model_list
|
11
|
+
|
12
|
+
def get_metrics():
|
13
|
+
return metric_all_names
|
Binary file
|
Binary file
|
@@ -0,0 +1,34 @@
|
|
1
|
+
import torch.nn as nn
|
2
|
+
|
3
|
+
def conv_layer(inDim, outDim, ks, s, p, norm_layer='none'):
|
4
|
+
## convolutional layer
|
5
|
+
conv = nn.Conv2d(inDim, outDim, kernel_size=ks, stride=s, padding=p)
|
6
|
+
relu = nn.ReLU(True)
|
7
|
+
assert norm_layer in ('batch', 'instance', 'none')
|
8
|
+
if norm_layer == 'none':
|
9
|
+
seq = nn.Sequential(*[conv, relu])
|
10
|
+
else:
|
11
|
+
if (norm_layer == 'instance'):
|
12
|
+
norm = nn.InstanceNorm2d(outDim, affine=False, track_running_stats=False) # instance norm
|
13
|
+
else:
|
14
|
+
momentum = 0.1
|
15
|
+
norm = nn.BatchNorm2d(outDim, momentum = momentum, affine=True, track_running_stats=True)
|
16
|
+
seq = nn.Sequential(*[conv, norm, relu])
|
17
|
+
return seq
|
18
|
+
|
19
|
+
class BaseNet(nn.Module):
|
20
|
+
"""Borrow the structure from the SpikeCLIP. (https://arxiv.org/abs/2501.04477)"""
|
21
|
+
def __init__(self, inDim=41):
|
22
|
+
super(BaseNet, self).__init__()
|
23
|
+
norm='none'
|
24
|
+
outDim=1
|
25
|
+
convBlock1 = conv_layer(inDim,64,3,1,1)
|
26
|
+
convBlock2 = conv_layer(64,128,3,1,1,norm)
|
27
|
+
convBlock3 = conv_layer(128,64,3,1,1,norm)
|
28
|
+
convBlock4 = conv_layer(64,16,3,1,1,norm)
|
29
|
+
conv = nn.Conv2d(16, outDim, 3, 1, 1)
|
30
|
+
self.seq = nn.Sequential(*[convBlock1, convBlock2, convBlock3, convBlock4, conv])
|
31
|
+
|
32
|
+
def forward(self,x):
|
33
|
+
return self.seq(x)
|
34
|
+
|
@@ -0,0 +1,92 @@
|
|
1
|
+
## [CVPR 2024] Boosting Spike Camera Image Reconstruction from a Perspective of Dealing with Spike Fluctuations
|
2
|
+
|
3
|
+
<h4 align="center"> Rui Zhao<sup>1,2</sup>, Ruiqin Xiong<sup>1,2</sup>, Jing Zhao<sup>1,2</sup>, Jian Zhang<sup>3</sup>, Xiaopeng Fan<sup>4</sup>, Zhaofei Yu<sup>1,2</sup>, Tiejun Huang<sup>1,2</sup> </h4>
|
4
|
+
<h4 align="center">1. School of Computer Science, Peking University<br>
|
5
|
+
2. National Key Laboratory for Multimedia Information Processing, Peking University<br>
|
6
|
+
3. School of Electronic and Computer Engineering, Peking University<br>
|
7
|
+
4. School of Computer Science and Technology, Harbin Institute of Technology
|
8
|
+
</h4><br>
|
9
|
+
|
10
|
+
This repository contains the official source code for our paper:
|
11
|
+
|
12
|
+
Boosting Spike Camera Image Reconstruction from a Perspective of Dealing with Spike Fluctuations
|
13
|
+
|
14
|
+
CVPR 2024
|
15
|
+
|
16
|
+
## Environment
|
17
|
+
|
18
|
+
You can choose cudatoolkit version to match your server. The code is tested on PyTorch 2.0.1+cu120.
|
19
|
+
|
20
|
+
```bash
|
21
|
+
conda create -n bsf python==3.10.9
|
22
|
+
conda activate bsf
|
23
|
+
# You can choose the PyTorch version you like, we recommand version >= 1.10.1
|
24
|
+
# For example
|
25
|
+
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
|
26
|
+
pip3 install -r requirements.txt
|
27
|
+
```
|
28
|
+
|
29
|
+
## Prepare the Data
|
30
|
+
|
31
|
+
##### 1. Download the dataset (Approximate 50GB)
|
32
|
+
|
33
|
+
[Link of the dataset (BaiduNetDisk)](https://pan.baidu.com/s/1zBp-ed1KtmhAab5Z_62ttw) (Password: 2728)
|
34
|
+
|
35
|
+
##### 2. Deploy the dataset for training faster (Approximate <u>another</u> 125GB)
|
36
|
+
|
37
|
+
firstly modify the data root and output root in `./prepare_data/crop_dataset_train.py` and `./prepare_data/crop_dataset_val.py`
|
38
|
+
|
39
|
+
```shell
|
40
|
+
cd prepare_data &&
|
41
|
+
bash crop_train.sh $your_gpu_id &&
|
42
|
+
bash crop_val.sh $your_gpu_id
|
43
|
+
```
|
44
|
+
|
45
|
+
## Evaluate
|
46
|
+
|
47
|
+
```shell
|
48
|
+
CUDA_VISIBLE_DEVICES=$1 python3 -W ignore main.py \
|
49
|
+
--alpha 0.7 \
|
50
|
+
--vis-path vis/bsf \
|
51
|
+
-evp eval_vis/bsf \
|
52
|
+
--logs_file_name bsf \
|
53
|
+
--compile_model \
|
54
|
+
--test_eval \
|
55
|
+
--arch bsf \
|
56
|
+
--pretrained ckpt/bsf.pth
|
57
|
+
```
|
58
|
+
|
59
|
+
## Train
|
60
|
+
|
61
|
+
```shell
|
62
|
+
CUDA_VISIBLE_DEVICES=$1 python3 -W ignore main.py \
|
63
|
+
-bs 8 \
|
64
|
+
-j 8 \
|
65
|
+
-lr 1e-4 \
|
66
|
+
--epochs 61 \
|
67
|
+
--train-res 96 96 \
|
68
|
+
--lr-scale-factor 0.5 \
|
69
|
+
--milestones 10 20 30 40 50 60 70 80 90 100 \
|
70
|
+
--alpha 0.7 \
|
71
|
+
--vis-path vis/bsf \
|
72
|
+
-evp eval_vis/bsf \
|
73
|
+
--logs_file_name bsf \
|
74
|
+
--compile_model \
|
75
|
+
--weight_decay 0.0 \
|
76
|
+
--eval-interval 10 \
|
77
|
+
--half_reserve 0 \
|
78
|
+
--arch bsf
|
79
|
+
```
|
80
|
+
|
81
|
+
## Citations
|
82
|
+
|
83
|
+
If you find this code useful in your research, please consider citing our paper:
|
84
|
+
|
85
|
+
```
|
86
|
+
@inproceedings{zhao2024boosting,
|
87
|
+
title={Boosting Spike Camera Image Reconstruction from a Perspective of Dealing with Spike Fluctuations},
|
88
|
+
author={Zhao, Rui and Xiong, Ruiqin and Zhao, Jing and Zhang, Jian and Fan, Xiaopeng and Yu, Zhaofei, and Huang, Tiejun},
|
89
|
+
booktitle={IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
|
90
|
+
year={2024}
|
91
|
+
}
|
92
|
+
```
|