dnt 0.2.1__py3-none-any.whl → 0.3.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dnt/__init__.py +4 -1
- dnt/analysis/__init__.py +3 -1
- dnt/analysis/count.py +107 -0
- dnt/analysis/interaction2.py +518 -0
- dnt/analysis/position.py +12 -0
- dnt/analysis/stop.py +92 -33
- dnt/analysis/stop2.py +289 -0
- dnt/analysis/stop3.py +758 -0
- dnt/detect/__init__.py +1 -1
- dnt/detect/signal/detector.py +326 -0
- dnt/detect/timestamp.py +105 -0
- dnt/detect/yolov8/detector.py +182 -35
- dnt/detect/yolov8/segmentor.py +171 -0
- dnt/engine/__init__.py +8 -0
- dnt/engine/bbox_interp.py +83 -0
- dnt/engine/bbox_iou.py +20 -0
- dnt/engine/cluster.py +31 -0
- dnt/engine/iob.py +66 -0
- dnt/filter/__init__.py +4 -0
- dnt/filter/filter.py +450 -21
- dnt/label/__init__.py +1 -1
- dnt/label/labeler.py +215 -14
- dnt/label/labeler2.py +631 -0
- dnt/shared/__init__.py +2 -1
- dnt/shared/data/coco.names +0 -0
- dnt/shared/data/openimages.names +0 -0
- dnt/shared/data/voc.names +0 -0
- dnt/shared/download.py +12 -0
- dnt/shared/synhcro.py +150 -0
- dnt/shared/util.py +17 -4
- dnt/third_party/fast-reid/__init__.py +1 -0
- dnt/third_party/fast-reid/configs/Base-AGW.yml +19 -0
- dnt/third_party/fast-reid/configs/Base-MGN.yml +12 -0
- dnt/third_party/fast-reid/configs/Base-SBS.yml +63 -0
- dnt/third_party/fast-reid/configs/Base-bagtricks.yml +76 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_R101-ibn.yml +13 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_R50-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_S50.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_vit.yml +88 -0
- dnt/third_party/fast-reid/configs/Market1501/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/VERIWild/bagtricks_R50-ibn.yml +35 -0
- dnt/third_party/fast-reid/configs/VeRi/sbs_R50-ibn.yml +35 -0
- dnt/third_party/fast-reid/configs/VehicleID/bagtricks_R50-ibn.yml +36 -0
- dnt/third_party/fast-reid/configs/__init__.py +0 -0
- dnt/third_party/fast-reid/fast_reid_interfece.py +175 -0
- dnt/third_party/fast-reid/fastreid/__init__.py +6 -0
- dnt/third_party/fast-reid/fastreid/config/__init__.py +15 -0
- dnt/third_party/fast-reid/fastreid/config/config.py +319 -0
- dnt/third_party/fast-reid/fastreid/config/defaults.py +329 -0
- dnt/third_party/fast-reid/fastreid/data/__init__.py +17 -0
- dnt/third_party/fast-reid/fastreid/data/build.py +194 -0
- dnt/third_party/fast-reid/fastreid/data/common.py +58 -0
- dnt/third_party/fast-reid/fastreid/data/data_utils.py +202 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/AirportALERT.py +50 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/__init__.py +43 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/bases.py +183 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/caviara.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/cuhk03.py +274 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/cuhk_sysu.py +58 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/dukemtmcreid.py +70 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/grid.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/iLIDS.py +45 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/lpw.py +49 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/market1501.py +89 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/msmt17.py +114 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/pes3d.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/pku.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/prai.py +43 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/prid.py +41 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/saivt.py +47 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/sensereid.py +47 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/shinpuhkan.py +48 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/sysu_mm.py +47 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/thermalworld.py +43 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/vehicleid.py +126 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/veri.py +69 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/veriwild.py +140 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/viper.py +45 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/wildtracker.py +59 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/__init__.py +18 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/data_sampler.py +85 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/imbalance_sampler.py +67 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/triplet_sampler.py +260 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/__init__.py +11 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/autoaugment.py +806 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/build.py +100 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/functional.py +180 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/transforms.py +161 -0
- dnt/third_party/fast-reid/fastreid/engine/__init__.py +15 -0
- dnt/third_party/fast-reid/fastreid/engine/defaults.py +490 -0
- dnt/third_party/fast-reid/fastreid/engine/hooks.py +534 -0
- dnt/third_party/fast-reid/fastreid/engine/launch.py +103 -0
- dnt/third_party/fast-reid/fastreid/engine/train_loop.py +357 -0
- dnt/third_party/fast-reid/fastreid/evaluation/__init__.py +6 -0
- dnt/third_party/fast-reid/fastreid/evaluation/clas_evaluator.py +81 -0
- dnt/third_party/fast-reid/fastreid/evaluation/evaluator.py +176 -0
- dnt/third_party/fast-reid/fastreid/evaluation/query_expansion.py +46 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank.py +200 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank_cylib/__init__.py +20 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank_cylib/setup.py +32 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank_cylib/test_cython.py +106 -0
- dnt/third_party/fast-reid/fastreid/evaluation/reid_evaluation.py +143 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rerank.py +73 -0
- dnt/third_party/fast-reid/fastreid/evaluation/roc.py +90 -0
- dnt/third_party/fast-reid/fastreid/evaluation/testing.py +88 -0
- dnt/third_party/fast-reid/fastreid/layers/__init__.py +19 -0
- dnt/third_party/fast-reid/fastreid/layers/activation.py +59 -0
- dnt/third_party/fast-reid/fastreid/layers/any_softmax.py +80 -0
- dnt/third_party/fast-reid/fastreid/layers/batch_norm.py +205 -0
- dnt/third_party/fast-reid/fastreid/layers/context_block.py +113 -0
- dnt/third_party/fast-reid/fastreid/layers/drop.py +161 -0
- dnt/third_party/fast-reid/fastreid/layers/frn.py +199 -0
- dnt/third_party/fast-reid/fastreid/layers/gather_layer.py +30 -0
- dnt/third_party/fast-reid/fastreid/layers/helpers.py +31 -0
- dnt/third_party/fast-reid/fastreid/layers/non_local.py +54 -0
- dnt/third_party/fast-reid/fastreid/layers/pooling.py +124 -0
- dnt/third_party/fast-reid/fastreid/layers/se_layer.py +25 -0
- dnt/third_party/fast-reid/fastreid/layers/splat.py +109 -0
- dnt/third_party/fast-reid/fastreid/layers/weight_init.py +122 -0
- dnt/third_party/fast-reid/fastreid/modeling/__init__.py +23 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/__init__.py +18 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/build.py +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/mobilenet.py +195 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/mobilenetv3.py +283 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/osnet.py +525 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/__init__.py +4 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/config.py +396 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B0_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B1_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B2_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B3_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B4_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B5_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet.py +281 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnet.py +596 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-1.6GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-12GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-16GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-200MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-3.2GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-32GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-4.0GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-400MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-6.4GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-600MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-8.0GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-800MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-1.6GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-12GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-16GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-200MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-3.2GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-32GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-4.0GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-400MF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-6.4GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-600MF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-8.0GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-800MF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/repvgg.py +309 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/resnest.py +365 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/resnet.py +364 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/resnext.py +335 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/shufflenet.py +203 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/vision_transformer.py +399 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/__init__.py +11 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/build.py +25 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/clas_head.py +36 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/embedding_head.py +151 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/__init__.py +12 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/circle_loss.py +71 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/cross_entroy_loss.py +54 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/focal_loss.py +92 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/triplet_loss.py +113 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/utils.py +48 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/__init__.py +14 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/baseline.py +188 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/build.py +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/distiller.py +140 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/mgn.py +394 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/moco.py +126 -0
- dnt/third_party/fast-reid/fastreid/solver/__init__.py +8 -0
- dnt/third_party/fast-reid/fastreid/solver/build.py +348 -0
- dnt/third_party/fast-reid/fastreid/solver/lr_scheduler.py +66 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/__init__.py +10 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/lamb.py +123 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/radam.py +149 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/swa.py +246 -0
- dnt/third_party/fast-reid/fastreid/utils/__init__.py +6 -0
- dnt/third_party/fast-reid/fastreid/utils/checkpoint.py +503 -0
- dnt/third_party/fast-reid/fastreid/utils/collect_env.py +158 -0
- dnt/third_party/fast-reid/fastreid/utils/comm.py +255 -0
- dnt/third_party/fast-reid/fastreid/utils/compute_dist.py +200 -0
- dnt/third_party/fast-reid/fastreid/utils/env.py +119 -0
- dnt/third_party/fast-reid/fastreid/utils/events.py +461 -0
- dnt/third_party/fast-reid/fastreid/utils/faiss_utils.py +127 -0
- dnt/third_party/fast-reid/fastreid/utils/file_io.py +520 -0
- dnt/third_party/fast-reid/fastreid/utils/history_buffer.py +71 -0
- dnt/third_party/fast-reid/fastreid/utils/logger.py +211 -0
- dnt/third_party/fast-reid/fastreid/utils/params.py +103 -0
- dnt/third_party/fast-reid/fastreid/utils/precision_bn.py +94 -0
- dnt/third_party/fast-reid/fastreid/utils/registry.py +66 -0
- dnt/third_party/fast-reid/fastreid/utils/summary.py +120 -0
- dnt/third_party/fast-reid/fastreid/utils/timer.py +68 -0
- dnt/third_party/fast-reid/fastreid/utils/visualizer.py +278 -0
- dnt/track/__init__.py +3 -1
- dnt/track/botsort/__init__.py +4 -0
- dnt/track/botsort/bot_tracker/__init__.py +3 -0
- dnt/track/botsort/bot_tracker/basetrack.py +60 -0
- dnt/track/botsort/bot_tracker/bot_sort.py +473 -0
- dnt/track/botsort/bot_tracker/gmc.py +316 -0
- dnt/track/botsort/bot_tracker/kalman_filter.py +269 -0
- dnt/track/botsort/bot_tracker/matching.py +194 -0
- dnt/track/botsort/bot_tracker/mc_bot_sort.py +505 -0
- dnt/track/{dsort/utils → botsort/bot_tracker/tracking_utils}/evaluation.py +14 -4
- dnt/track/{dsort/utils → botsort/bot_tracker/tracking_utils}/io.py +19 -36
- dnt/track/botsort/bot_tracker/tracking_utils/timer.py +37 -0
- dnt/track/botsort/inference.py +96 -0
- dnt/track/config.py +120 -0
- dnt/track/dsort/configs/bagtricks_R50.yml +7 -0
- dnt/track/dsort/configs/deep_sort.yaml +0 -0
- dnt/track/dsort/configs/fastreid.yaml +1 -1
- dnt/track/dsort/deep_sort/deep/checkpoint/ckpt.t7 +0 -0
- dnt/track/dsort/deep_sort/deep/feature_extractor.py +87 -8
- dnt/track/dsort/deep_sort/deep_sort.py +31 -21
- dnt/track/dsort/deep_sort/sort/detection.py +2 -1
- dnt/track/dsort/deep_sort/sort/iou_matching.py +0 -2
- dnt/track/dsort/deep_sort/sort/linear_assignment.py +0 -3
- dnt/track/dsort/deep_sort/sort/nn_matching.py +5 -5
- dnt/track/dsort/deep_sort/sort/preprocessing.py +1 -2
- dnt/track/dsort/deep_sort/sort/track.py +2 -1
- dnt/track/dsort/deep_sort/sort/tracker.py +1 -1
- dnt/track/dsort/dsort.py +44 -27
- dnt/track/re_class.py +117 -0
- dnt/track/sort/sort.py +9 -7
- dnt/track/tracker.py +225 -20
- dnt-0.3.1.8.dist-info/METADATA +117 -0
- dnt-0.3.1.8.dist-info/RECORD +315 -0
- {dnt-0.2.1.dist-info → dnt-0.3.1.8.dist-info}/WHEEL +1 -1
- dnt/track/dsort/deep_sort/deep/evaluate.py +0 -15
- dnt/track/dsort/deep_sort/deep/original_model.py +0 -106
- dnt/track/dsort/deep_sort/deep/test.py +0 -77
- dnt/track/dsort/deep_sort/deep/train.py +0 -189
- dnt/track/dsort/utils/asserts.py +0 -13
- dnt/track/dsort/utils/draw.py +0 -36
- dnt/track/dsort/utils/json_logger.py +0 -383
- dnt/track/dsort/utils/log.py +0 -17
- dnt/track/dsort/utils/parser.py +0 -35
- dnt/track/dsort/utils/tools.py +0 -39
- dnt-0.2.1.dist-info/METADATA +0 -35
- dnt-0.2.1.dist-info/RECORD +0 -60
- /dnt/{track/dsort/utils → third_party/fast-reid/checkpoint}/__init__.py +0 -0
- {dnt-0.2.1.dist-info → dnt-0.3.1.8.dist-info/licenses}/LICENSE +0 -0
- {dnt-0.2.1.dist-info → dnt-0.3.1.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
""" DropBlock, DropPath
|
|
2
|
+
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
|
|
3
|
+
Papers:
|
|
4
|
+
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
|
|
5
|
+
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
|
|
6
|
+
Code:
|
|
7
|
+
DropBlock impl inspired by two Tensorflow impl that I liked:
|
|
8
|
+
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
|
|
9
|
+
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
|
|
10
|
+
Hacked together by / Copyright 2020 Ross Wightman
|
|
11
|
+
"""
|
|
12
|
+
import torch
|
|
13
|
+
import torch.nn as nn
|
|
14
|
+
import torch.nn.functional as F
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def drop_block_2d(
|
|
18
|
+
x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0,
|
|
19
|
+
with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
|
|
20
|
+
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
|
|
21
|
+
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
|
|
22
|
+
runs with success, but needs further validation and possibly optimization for lower runtime impact.
|
|
23
|
+
"""
|
|
24
|
+
B, C, H, W = x.shape
|
|
25
|
+
total_size = W * H
|
|
26
|
+
clipped_block_size = min(block_size, min(W, H))
|
|
27
|
+
# seed_drop_rate, the gamma parameter
|
|
28
|
+
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
|
|
29
|
+
(W - block_size + 1) * (H - block_size + 1))
|
|
30
|
+
|
|
31
|
+
# Forces the block to be inside the feature map.
|
|
32
|
+
w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device))
|
|
33
|
+
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
|
|
34
|
+
((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
|
|
35
|
+
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
|
|
36
|
+
|
|
37
|
+
if batchwise:
|
|
38
|
+
# one mask for whole batch, quite a bit faster
|
|
39
|
+
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
|
|
40
|
+
else:
|
|
41
|
+
uniform_noise = torch.rand_like(x)
|
|
42
|
+
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
|
|
43
|
+
block_mask = -F.max_pool2d(
|
|
44
|
+
-block_mask,
|
|
45
|
+
kernel_size=clipped_block_size, # block_size,
|
|
46
|
+
stride=1,
|
|
47
|
+
padding=clipped_block_size // 2)
|
|
48
|
+
|
|
49
|
+
if with_noise:
|
|
50
|
+
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
|
|
51
|
+
if inplace:
|
|
52
|
+
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
|
|
53
|
+
else:
|
|
54
|
+
x = x * block_mask + normal_noise * (1 - block_mask)
|
|
55
|
+
else:
|
|
56
|
+
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
|
|
57
|
+
if inplace:
|
|
58
|
+
x.mul_(block_mask * normalize_scale)
|
|
59
|
+
else:
|
|
60
|
+
x = x * block_mask * normalize_scale
|
|
61
|
+
return x
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def drop_block_fast_2d(
|
|
65
|
+
x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7,
|
|
66
|
+
gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
|
|
67
|
+
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
|
|
68
|
+
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
|
|
69
|
+
block mask at edges.
|
|
70
|
+
"""
|
|
71
|
+
B, C, H, W = x.shape
|
|
72
|
+
total_size = W * H
|
|
73
|
+
clipped_block_size = min(block_size, min(W, H))
|
|
74
|
+
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
|
|
75
|
+
(W - block_size + 1) * (H - block_size + 1))
|
|
76
|
+
|
|
77
|
+
if batchwise:
|
|
78
|
+
# one mask for whole batch, quite a bit faster
|
|
79
|
+
block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma
|
|
80
|
+
else:
|
|
81
|
+
# mask per batch element
|
|
82
|
+
block_mask = torch.rand_like(x) < gamma
|
|
83
|
+
block_mask = F.max_pool2d(
|
|
84
|
+
block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
|
|
85
|
+
|
|
86
|
+
if with_noise:
|
|
87
|
+
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
|
|
88
|
+
if inplace:
|
|
89
|
+
x.mul_(1. - block_mask).add_(normal_noise * block_mask)
|
|
90
|
+
else:
|
|
91
|
+
x = x * (1. - block_mask) + normal_noise * block_mask
|
|
92
|
+
else:
|
|
93
|
+
block_mask = 1 - block_mask
|
|
94
|
+
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype)
|
|
95
|
+
if inplace:
|
|
96
|
+
x.mul_(block_mask * normalize_scale)
|
|
97
|
+
else:
|
|
98
|
+
x = x * block_mask * normalize_scale
|
|
99
|
+
return x
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class DropBlock2d(nn.Module):
|
|
103
|
+
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
def __init__(self,
|
|
107
|
+
drop_prob=0.1,
|
|
108
|
+
block_size=7,
|
|
109
|
+
gamma_scale=1.0,
|
|
110
|
+
with_noise=False,
|
|
111
|
+
inplace=False,
|
|
112
|
+
batchwise=False,
|
|
113
|
+
fast=True):
|
|
114
|
+
super(DropBlock2d, self).__init__()
|
|
115
|
+
self.drop_prob = drop_prob
|
|
116
|
+
self.gamma_scale = gamma_scale
|
|
117
|
+
self.block_size = block_size
|
|
118
|
+
self.with_noise = with_noise
|
|
119
|
+
self.inplace = inplace
|
|
120
|
+
self.batchwise = batchwise
|
|
121
|
+
self.fast = fast # FIXME finish comparisons of fast vs not
|
|
122
|
+
|
|
123
|
+
def forward(self, x):
|
|
124
|
+
if not self.training or not self.drop_prob:
|
|
125
|
+
return x
|
|
126
|
+
if self.fast:
|
|
127
|
+
return drop_block_fast_2d(
|
|
128
|
+
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
|
|
129
|
+
else:
|
|
130
|
+
return drop_block_2d(
|
|
131
|
+
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def drop_path(x, drop_prob: float = 0., training: bool = False):
|
|
135
|
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
|
136
|
+
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
|
|
137
|
+
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
|
138
|
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
|
|
139
|
+
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
|
|
140
|
+
'survival rate' as the argument.
|
|
141
|
+
"""
|
|
142
|
+
if drop_prob == 0. or not training:
|
|
143
|
+
return x
|
|
144
|
+
keep_prob = 1 - drop_prob
|
|
145
|
+
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
|
146
|
+
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
|
|
147
|
+
random_tensor.floor_() # binarize
|
|
148
|
+
output = x.div(keep_prob) * random_tensor
|
|
149
|
+
return output
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class DropPath(nn.Module):
|
|
153
|
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
|
154
|
+
"""
|
|
155
|
+
|
|
156
|
+
def __init__(self, drop_prob=None):
|
|
157
|
+
super(DropPath, self).__init__()
|
|
158
|
+
self.drop_prob = drop_prob
|
|
159
|
+
|
|
160
|
+
def forward(self, x):
|
|
161
|
+
return drop_path(x, self.drop_prob, self.training)
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: liaoxingyu
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
from torch import nn
|
|
9
|
+
from torch.nn.modules.batchnorm import BatchNorm2d
|
|
10
|
+
from torch.nn import ReLU, LeakyReLU
|
|
11
|
+
from torch.nn.parameter import Parameter
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TLU(nn.Module):
|
|
15
|
+
def __init__(self, num_features):
|
|
16
|
+
"""max(y, tau) = max(y - tau, 0) + tau = ReLU(y - tau) + tau"""
|
|
17
|
+
super(TLU, self).__init__()
|
|
18
|
+
self.num_features = num_features
|
|
19
|
+
self.tau = Parameter(torch.Tensor(num_features))
|
|
20
|
+
self.reset_parameters()
|
|
21
|
+
|
|
22
|
+
def reset_parameters(self):
|
|
23
|
+
nn.init.zeros_(self.tau)
|
|
24
|
+
|
|
25
|
+
def extra_repr(self):
|
|
26
|
+
return 'num_features={num_features}'.format(**self.__dict__)
|
|
27
|
+
|
|
28
|
+
def forward(self, x):
|
|
29
|
+
return torch.max(x, self.tau.view(1, self.num_features, 1, 1))
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class FRN(nn.Module):
|
|
33
|
+
def __init__(self, num_features, eps=1e-6, is_eps_leanable=False):
|
|
34
|
+
"""
|
|
35
|
+
weight = gamma, bias = beta
|
|
36
|
+
beta, gamma:
|
|
37
|
+
Variables of shape [1, 1, 1, C]. if TensorFlow
|
|
38
|
+
Variables of shape [1, C, 1, 1]. if PyTorch
|
|
39
|
+
eps: A scalar constant or learnable variable.
|
|
40
|
+
"""
|
|
41
|
+
super(FRN, self).__init__()
|
|
42
|
+
|
|
43
|
+
self.num_features = num_features
|
|
44
|
+
self.init_eps = eps
|
|
45
|
+
self.is_eps_leanable = is_eps_leanable
|
|
46
|
+
|
|
47
|
+
self.weight = Parameter(torch.Tensor(num_features))
|
|
48
|
+
self.bias = Parameter(torch.Tensor(num_features))
|
|
49
|
+
if is_eps_leanable:
|
|
50
|
+
self.eps = Parameter(torch.Tensor(1))
|
|
51
|
+
else:
|
|
52
|
+
self.register_buffer('eps', torch.Tensor([eps]))
|
|
53
|
+
self.reset_parameters()
|
|
54
|
+
|
|
55
|
+
def reset_parameters(self):
|
|
56
|
+
nn.init.ones_(self.weight)
|
|
57
|
+
nn.init.zeros_(self.bias)
|
|
58
|
+
if self.is_eps_leanable:
|
|
59
|
+
nn.init.constant_(self.eps, self.init_eps)
|
|
60
|
+
|
|
61
|
+
def extra_repr(self):
|
|
62
|
+
return 'num_features={num_features}, eps={init_eps}'.format(**self.__dict__)
|
|
63
|
+
|
|
64
|
+
def forward(self, x):
|
|
65
|
+
"""
|
|
66
|
+
0, 1, 2, 3 -> (B, H, W, C) in TensorFlow
|
|
67
|
+
0, 1, 2, 3 -> (B, C, H, W) in PyTorch
|
|
68
|
+
TensorFlow code
|
|
69
|
+
nu2 = tf.reduce_mean(tf.square(x), axis=[1, 2], keepdims=True)
|
|
70
|
+
x = x * tf.rsqrt(nu2 + tf.abs(eps))
|
|
71
|
+
# This Code include TLU function max(y, tau)
|
|
72
|
+
return tf.maximum(gamma * x + beta, tau)
|
|
73
|
+
"""
|
|
74
|
+
# Compute the mean norm of activations per channel.
|
|
75
|
+
nu2 = x.pow(2).mean(dim=[2, 3], keepdim=True)
|
|
76
|
+
|
|
77
|
+
# Perform FRN.
|
|
78
|
+
x = x * torch.rsqrt(nu2 + self.eps.abs())
|
|
79
|
+
|
|
80
|
+
# Scale and Bias
|
|
81
|
+
x = self.weight.view(1, self.num_features, 1, 1) * x + self.bias.view(1, self.num_features, 1, 1)
|
|
82
|
+
# x = self.weight * x + self.bias
|
|
83
|
+
return x
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def bnrelu_to_frn(module):
|
|
87
|
+
"""
|
|
88
|
+
Convert 'BatchNorm2d + ReLU' to 'FRN + TLU'
|
|
89
|
+
"""
|
|
90
|
+
mod = module
|
|
91
|
+
before_name = None
|
|
92
|
+
before_child = None
|
|
93
|
+
is_before_bn = False
|
|
94
|
+
|
|
95
|
+
for name, child in module.named_children():
|
|
96
|
+
if is_before_bn and isinstance(child, (ReLU, LeakyReLU)):
|
|
97
|
+
# Convert BN to FRN
|
|
98
|
+
if isinstance(before_child, BatchNorm2d):
|
|
99
|
+
mod.add_module(
|
|
100
|
+
before_name, FRN(num_features=before_child.num_features))
|
|
101
|
+
else:
|
|
102
|
+
raise NotImplementedError()
|
|
103
|
+
|
|
104
|
+
# Convert ReLU to TLU
|
|
105
|
+
mod.add_module(name, TLU(num_features=before_child.num_features))
|
|
106
|
+
else:
|
|
107
|
+
mod.add_module(name, bnrelu_to_frn(child))
|
|
108
|
+
|
|
109
|
+
before_name = name
|
|
110
|
+
before_child = child
|
|
111
|
+
is_before_bn = isinstance(child, BatchNorm2d)
|
|
112
|
+
return mod
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def convert(module, flag_name):
|
|
116
|
+
mod = module
|
|
117
|
+
before_ch = None
|
|
118
|
+
for name, child in module.named_children():
|
|
119
|
+
if hasattr(child, flag_name) and getattr(child, flag_name):
|
|
120
|
+
if isinstance(child, BatchNorm2d):
|
|
121
|
+
before_ch = child.num_features
|
|
122
|
+
mod.add_module(name, FRN(num_features=child.num_features))
|
|
123
|
+
# TODO bn is no good...
|
|
124
|
+
if isinstance(child, (ReLU, LeakyReLU)):
|
|
125
|
+
mod.add_module(name, TLU(num_features=before_ch))
|
|
126
|
+
else:
|
|
127
|
+
mod.add_module(name, convert(child, flag_name))
|
|
128
|
+
return mod
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def remove_flags(module, flag_name):
|
|
132
|
+
mod = module
|
|
133
|
+
for name, child in module.named_children():
|
|
134
|
+
if hasattr(child, 'is_convert_frn'):
|
|
135
|
+
delattr(child, flag_name)
|
|
136
|
+
mod.add_module(name, remove_flags(child, flag_name))
|
|
137
|
+
else:
|
|
138
|
+
mod.add_module(name, remove_flags(child, flag_name))
|
|
139
|
+
return mod
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def bnrelu_to_frn2(model, input_size=(3, 128, 128), batch_size=2, flag_name='is_convert_frn'):
|
|
143
|
+
forard_hooks = list()
|
|
144
|
+
backward_hooks = list()
|
|
145
|
+
|
|
146
|
+
is_before_bn = [False]
|
|
147
|
+
|
|
148
|
+
def register_forward_hook(module):
|
|
149
|
+
def hook(self, input, output):
|
|
150
|
+
if isinstance(module, (nn.Sequential, nn.ModuleList)) or (module == model):
|
|
151
|
+
is_before_bn.append(False)
|
|
152
|
+
return
|
|
153
|
+
|
|
154
|
+
# input and output is required in hook def
|
|
155
|
+
is_converted = is_before_bn[-1] and isinstance(self, (ReLU, LeakyReLU))
|
|
156
|
+
if is_converted:
|
|
157
|
+
setattr(self, flag_name, True)
|
|
158
|
+
is_before_bn.append(isinstance(self, BatchNorm2d))
|
|
159
|
+
|
|
160
|
+
forard_hooks.append(module.register_forward_hook(hook))
|
|
161
|
+
|
|
162
|
+
is_before_relu = [False]
|
|
163
|
+
|
|
164
|
+
def register_backward_hook(module):
|
|
165
|
+
def hook(self, input, output):
|
|
166
|
+
if isinstance(module, (nn.Sequential, nn.ModuleList)) or (module == model):
|
|
167
|
+
is_before_relu.append(False)
|
|
168
|
+
return
|
|
169
|
+
is_converted = is_before_relu[-1] and isinstance(self, BatchNorm2d)
|
|
170
|
+
if is_converted:
|
|
171
|
+
setattr(self, flag_name, True)
|
|
172
|
+
is_before_relu.append(isinstance(self, (ReLU, LeakyReLU)))
|
|
173
|
+
|
|
174
|
+
backward_hooks.append(module.register_backward_hook(hook))
|
|
175
|
+
|
|
176
|
+
# multiple inputs to the network
|
|
177
|
+
if isinstance(input_size, tuple):
|
|
178
|
+
input_size = [input_size]
|
|
179
|
+
|
|
180
|
+
# batch_size of 2 for batchnorm
|
|
181
|
+
x = [torch.rand(batch_size, *in_size) for in_size in input_size]
|
|
182
|
+
|
|
183
|
+
# register hook
|
|
184
|
+
model.apply(register_forward_hook)
|
|
185
|
+
model.apply(register_backward_hook)
|
|
186
|
+
|
|
187
|
+
# make a forward pass
|
|
188
|
+
output = model(*x)
|
|
189
|
+
output.sum().backward() # Raw output is not enabled to use backward()
|
|
190
|
+
|
|
191
|
+
# remove these hooks
|
|
192
|
+
for h in forard_hooks:
|
|
193
|
+
h.remove()
|
|
194
|
+
for h in backward_hooks:
|
|
195
|
+
h.remove()
|
|
196
|
+
|
|
197
|
+
model = convert(model, flag_name=flag_name)
|
|
198
|
+
model = remove_flags(model, flag_name=flag_name)
|
|
199
|
+
return model
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: xingyu liao
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
# based on: https://github.com/open-mmlab/OpenSelfSup/blob/master/openselfsup/models/utils/gather_layer.py
|
|
8
|
+
|
|
9
|
+
import torch
|
|
10
|
+
import torch.distributed as dist
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class GatherLayer(torch.autograd.Function):
|
|
14
|
+
"""Gather tensors from all process, supporting backward propagation.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def forward(ctx, input):
|
|
19
|
+
ctx.save_for_backward(input)
|
|
20
|
+
output = [torch.zeros_like(input) \
|
|
21
|
+
for _ in range(dist.get_world_size())]
|
|
22
|
+
dist.all_gather(output, input)
|
|
23
|
+
return tuple(output)
|
|
24
|
+
|
|
25
|
+
@staticmethod
|
|
26
|
+
def backward(ctx, *grads):
|
|
27
|
+
input, = ctx.saved_tensors
|
|
28
|
+
grad_out = torch.zeros_like(input)
|
|
29
|
+
grad_out[:] = grads[dist.get_rank()]
|
|
30
|
+
return grad_out
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
""" Layer/Module Helpers
|
|
2
|
+
Hacked together by / Copyright 2020 Ross Wightman
|
|
3
|
+
"""
|
|
4
|
+
import collections.abc
|
|
5
|
+
from itertools import repeat
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# From PyTorch internals
|
|
9
|
+
def _ntuple(n):
|
|
10
|
+
def parse(x):
|
|
11
|
+
if isinstance(x, collections.abc.Iterable):
|
|
12
|
+
return x
|
|
13
|
+
return tuple(repeat(x, n))
|
|
14
|
+
|
|
15
|
+
return parse
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
to_1tuple = _ntuple(1)
|
|
19
|
+
to_2tuple = _ntuple(2)
|
|
20
|
+
to_3tuple = _ntuple(3)
|
|
21
|
+
to_4tuple = _ntuple(4)
|
|
22
|
+
to_ntuple = _ntuple
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def make_divisible(v, divisor=8, min_value=None):
|
|
26
|
+
min_value = min_value or divisor
|
|
27
|
+
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
|
|
28
|
+
# Make sure that round down does not go down by more than 10%.
|
|
29
|
+
if new_v < 0.9 * v:
|
|
30
|
+
new_v += divisor
|
|
31
|
+
return new_v
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
import torch
|
|
5
|
+
from torch import nn
|
|
6
|
+
from .batch_norm import get_norm
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Non_local(nn.Module):
|
|
10
|
+
def __init__(self, in_channels, bn_norm, reduc_ratio=2):
|
|
11
|
+
super(Non_local, self).__init__()
|
|
12
|
+
|
|
13
|
+
self.in_channels = in_channels
|
|
14
|
+
self.inter_channels = reduc_ratio // reduc_ratio
|
|
15
|
+
|
|
16
|
+
self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
|
|
17
|
+
kernel_size=1, stride=1, padding=0)
|
|
18
|
+
|
|
19
|
+
self.W = nn.Sequential(
|
|
20
|
+
nn.Conv2d(in_channels=self.inter_channels, out_channels=self.in_channels,
|
|
21
|
+
kernel_size=1, stride=1, padding=0),
|
|
22
|
+
get_norm(bn_norm, self.in_channels),
|
|
23
|
+
)
|
|
24
|
+
nn.init.constant_(self.W[1].weight, 0.0)
|
|
25
|
+
nn.init.constant_(self.W[1].bias, 0.0)
|
|
26
|
+
|
|
27
|
+
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
|
|
28
|
+
kernel_size=1, stride=1, padding=0)
|
|
29
|
+
|
|
30
|
+
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
|
|
31
|
+
kernel_size=1, stride=1, padding=0)
|
|
32
|
+
|
|
33
|
+
def forward(self, x):
|
|
34
|
+
"""
|
|
35
|
+
:param x: (b, t, h, w)
|
|
36
|
+
:return x: (b, t, h, w)
|
|
37
|
+
"""
|
|
38
|
+
batch_size = x.size(0)
|
|
39
|
+
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
|
|
40
|
+
g_x = g_x.permute(0, 2, 1)
|
|
41
|
+
|
|
42
|
+
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
|
|
43
|
+
theta_x = theta_x.permute(0, 2, 1)
|
|
44
|
+
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
|
|
45
|
+
f = torch.matmul(theta_x, phi_x)
|
|
46
|
+
N = f.size(-1)
|
|
47
|
+
f_div_C = f / N
|
|
48
|
+
|
|
49
|
+
y = torch.matmul(f_div_C, g_x)
|
|
50
|
+
y = y.permute(0, 2, 1).contiguous()
|
|
51
|
+
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
|
|
52
|
+
W_y = self.W(y)
|
|
53
|
+
z = W_y + x
|
|
54
|
+
return z
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: l1aoxingyu
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
import torch.nn.functional as F
|
|
9
|
+
from torch import nn
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
'Identity',
|
|
13
|
+
'Flatten',
|
|
14
|
+
'GlobalAvgPool',
|
|
15
|
+
'GlobalMaxPool',
|
|
16
|
+
'GeneralizedMeanPooling',
|
|
17
|
+
'GeneralizedMeanPoolingP',
|
|
18
|
+
'FastGlobalAvgPool',
|
|
19
|
+
'AdaptiveAvgMaxPool',
|
|
20
|
+
'ClipGlobalAvgPool',
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Identity(nn.Module):
|
|
25
|
+
def __init__(self, *args, **kwargs):
|
|
26
|
+
super().__init__()
|
|
27
|
+
|
|
28
|
+
def forward(self, input):
|
|
29
|
+
return input
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class Flatten(nn.Module):
|
|
33
|
+
def __init__(self, *args, **kwargs):
|
|
34
|
+
super().__init__()
|
|
35
|
+
|
|
36
|
+
def forward(self, input):
|
|
37
|
+
return input.view(input.size(0), -1, 1, 1)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class GlobalAvgPool(nn.AdaptiveAvgPool2d):
|
|
41
|
+
def __init__(self, output_size=1, *args, **kwargs):
|
|
42
|
+
super().__init__(output_size)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class GlobalMaxPool(nn.AdaptiveMaxPool2d):
|
|
46
|
+
def __init__(self, output_size=1, *args, **kwargs):
|
|
47
|
+
super().__init__(output_size)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class GeneralizedMeanPooling(nn.Module):
|
|
51
|
+
r"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
|
|
52
|
+
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
|
|
53
|
+
- At p = infinity, one gets Max Pooling
|
|
54
|
+
- At p = 1, one gets Average Pooling
|
|
55
|
+
The output is of size H x W, for any input size.
|
|
56
|
+
The number of output features is equal to the number of input planes.
|
|
57
|
+
Args:
|
|
58
|
+
output_size: the target output size of the image of the form H x W.
|
|
59
|
+
Can be a tuple (H, W) or a single H for a square image H x H
|
|
60
|
+
H and W can be either a ``int``, or ``None`` which means the size will
|
|
61
|
+
be the same as that of the input.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
def __init__(self, norm=3, output_size=(1, 1), eps=1e-6, *args, **kwargs):
|
|
65
|
+
super(GeneralizedMeanPooling, self).__init__()
|
|
66
|
+
assert norm > 0
|
|
67
|
+
self.p = float(norm)
|
|
68
|
+
self.output_size = output_size
|
|
69
|
+
self.eps = eps
|
|
70
|
+
|
|
71
|
+
def forward(self, x):
|
|
72
|
+
x = x.clamp(min=self.eps).pow(self.p)
|
|
73
|
+
return F.adaptive_avg_pool2d(x, self.output_size).pow(1. / self.p)
|
|
74
|
+
|
|
75
|
+
def __repr__(self):
|
|
76
|
+
return self.__class__.__name__ + '(' \
|
|
77
|
+
+ str(self.p) + ', ' \
|
|
78
|
+
+ 'output_size=' + str(self.output_size) + ')'
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class GeneralizedMeanPoolingP(GeneralizedMeanPooling):
|
|
82
|
+
""" Same, but norm is trainable
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
def __init__(self, norm=3, output_size=(1, 1), eps=1e-6, *args, **kwargs):
|
|
86
|
+
super(GeneralizedMeanPoolingP, self).__init__(norm, output_size, eps)
|
|
87
|
+
self.p = nn.Parameter(torch.ones(1) * norm)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class AdaptiveAvgMaxPool(nn.Module):
|
|
91
|
+
def __init__(self, output_size=1, *args, **kwargs):
|
|
92
|
+
super().__init__()
|
|
93
|
+
self.gap = FastGlobalAvgPool()
|
|
94
|
+
self.gmp = GlobalMaxPool(output_size)
|
|
95
|
+
|
|
96
|
+
def forward(self, x):
|
|
97
|
+
avg_feat = self.gap(x)
|
|
98
|
+
max_feat = self.gmp(x)
|
|
99
|
+
feat = avg_feat + max_feat
|
|
100
|
+
return feat
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class FastGlobalAvgPool(nn.Module):
|
|
104
|
+
def __init__(self, flatten=False, *args, **kwargs):
|
|
105
|
+
super().__init__()
|
|
106
|
+
self.flatten = flatten
|
|
107
|
+
|
|
108
|
+
def forward(self, x):
|
|
109
|
+
if self.flatten:
|
|
110
|
+
in_size = x.size()
|
|
111
|
+
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
|
|
112
|
+
else:
|
|
113
|
+
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class ClipGlobalAvgPool(nn.Module):
|
|
117
|
+
def __init__(self, *args, **kwargs):
|
|
118
|
+
super().__init__()
|
|
119
|
+
self.avgpool = FastGlobalAvgPool()
|
|
120
|
+
|
|
121
|
+
def forward(self, x):
|
|
122
|
+
x = self.avgpool(x)
|
|
123
|
+
x = torch.clamp(x, min=0., max=1.)
|
|
124
|
+
return x
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: liaoxingyu
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from torch import nn
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class SELayer(nn.Module):
|
|
11
|
+
def __init__(self, channel, reduction=16):
|
|
12
|
+
super(SELayer, self).__init__()
|
|
13
|
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
|
14
|
+
self.fc = nn.Sequential(
|
|
15
|
+
nn.Linear(channel, int(channel / reduction), bias=False),
|
|
16
|
+
nn.ReLU(inplace=True),
|
|
17
|
+
nn.Linear(int(channel / reduction), channel, bias=False),
|
|
18
|
+
nn.Sigmoid()
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
def forward(self, x):
|
|
22
|
+
b, c, _, _ = x.size()
|
|
23
|
+
y = self.avg_pool(x).view(b, c)
|
|
24
|
+
y = self.fc(y).view(b, c, 1, 1)
|
|
25
|
+
return x * y.expand_as(x)
|