dnt 0.2.4__py3-none-any.whl → 0.3.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dnt/__init__.py +3 -2
- dnt/analysis/__init__.py +3 -2
- dnt/analysis/count.py +54 -37
- dnt/analysis/interaction2.py +518 -0
- dnt/analysis/stop.py +22 -17
- dnt/analysis/stop2.py +289 -0
- dnt/analysis/stop3.py +758 -0
- dnt/detect/signal/detector.py +326 -0
- dnt/detect/timestamp.py +105 -0
- dnt/detect/yolov8/detector.py +179 -36
- dnt/detect/yolov8/segmentor.py +60 -2
- dnt/engine/__init__.py +8 -0
- dnt/engine/bbox_interp.py +83 -0
- dnt/engine/bbox_iou.py +20 -0
- dnt/engine/cluster.py +31 -0
- dnt/engine/iob.py +66 -0
- dnt/filter/filter.py +333 -2
- dnt/label/labeler.py +4 -4
- dnt/label/labeler2.py +631 -0
- dnt/shared/__init__.py +2 -1
- dnt/shared/data/coco.names +0 -0
- dnt/shared/data/openimages.names +0 -0
- dnt/shared/data/voc.names +0 -0
- dnt/shared/download.py +12 -0
- dnt/shared/synhcro.py +150 -0
- dnt/shared/util.py +17 -4
- dnt/third_party/fast-reid/__init__.py +1 -0
- dnt/third_party/fast-reid/configs/Base-AGW.yml +19 -0
- dnt/third_party/fast-reid/configs/Base-MGN.yml +12 -0
- dnt/third_party/fast-reid/configs/Base-SBS.yml +63 -0
- dnt/third_party/fast-reid/configs/Base-bagtricks.yml +76 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_R101-ibn.yml +13 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_R50-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_S50.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_vit.yml +88 -0
- dnt/third_party/fast-reid/configs/Market1501/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/VERIWild/bagtricks_R50-ibn.yml +35 -0
- dnt/third_party/fast-reid/configs/VeRi/sbs_R50-ibn.yml +35 -0
- dnt/third_party/fast-reid/configs/VehicleID/bagtricks_R50-ibn.yml +36 -0
- dnt/third_party/fast-reid/configs/__init__.py +0 -0
- dnt/third_party/fast-reid/fast_reid_interfece.py +175 -0
- dnt/third_party/fast-reid/fastreid/__init__.py +6 -0
- dnt/third_party/fast-reid/fastreid/config/__init__.py +15 -0
- dnt/third_party/fast-reid/fastreid/config/config.py +319 -0
- dnt/third_party/fast-reid/fastreid/config/defaults.py +329 -0
- dnt/third_party/fast-reid/fastreid/data/__init__.py +17 -0
- dnt/third_party/fast-reid/fastreid/data/build.py +194 -0
- dnt/third_party/fast-reid/fastreid/data/common.py +58 -0
- dnt/third_party/fast-reid/fastreid/data/data_utils.py +202 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/AirportALERT.py +50 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/__init__.py +43 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/bases.py +183 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/caviara.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/cuhk03.py +274 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/cuhk_sysu.py +58 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/dukemtmcreid.py +70 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/grid.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/iLIDS.py +45 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/lpw.py +49 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/market1501.py +89 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/msmt17.py +114 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/pes3d.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/pku.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/prai.py +43 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/prid.py +41 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/saivt.py +47 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/sensereid.py +47 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/shinpuhkan.py +48 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/sysu_mm.py +47 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/thermalworld.py +43 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/vehicleid.py +126 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/veri.py +69 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/veriwild.py +140 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/viper.py +45 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/wildtracker.py +59 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/__init__.py +18 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/data_sampler.py +85 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/imbalance_sampler.py +67 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/triplet_sampler.py +260 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/__init__.py +11 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/autoaugment.py +806 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/build.py +100 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/functional.py +180 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/transforms.py +161 -0
- dnt/third_party/fast-reid/fastreid/engine/__init__.py +15 -0
- dnt/third_party/fast-reid/fastreid/engine/defaults.py +490 -0
- dnt/third_party/fast-reid/fastreid/engine/hooks.py +534 -0
- dnt/third_party/fast-reid/fastreid/engine/launch.py +103 -0
- dnt/third_party/fast-reid/fastreid/engine/train_loop.py +357 -0
- dnt/third_party/fast-reid/fastreid/evaluation/__init__.py +6 -0
- dnt/third_party/fast-reid/fastreid/evaluation/clas_evaluator.py +81 -0
- dnt/third_party/fast-reid/fastreid/evaluation/evaluator.py +176 -0
- dnt/third_party/fast-reid/fastreid/evaluation/query_expansion.py +46 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank.py +200 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank_cylib/__init__.py +20 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank_cylib/setup.py +32 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank_cylib/test_cython.py +106 -0
- dnt/third_party/fast-reid/fastreid/evaluation/reid_evaluation.py +143 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rerank.py +73 -0
- dnt/third_party/fast-reid/fastreid/evaluation/roc.py +90 -0
- dnt/third_party/fast-reid/fastreid/evaluation/testing.py +88 -0
- dnt/third_party/fast-reid/fastreid/layers/__init__.py +19 -0
- dnt/third_party/fast-reid/fastreid/layers/activation.py +59 -0
- dnt/third_party/fast-reid/fastreid/layers/any_softmax.py +80 -0
- dnt/third_party/fast-reid/fastreid/layers/batch_norm.py +205 -0
- dnt/third_party/fast-reid/fastreid/layers/context_block.py +113 -0
- dnt/third_party/fast-reid/fastreid/layers/drop.py +161 -0
- dnt/third_party/fast-reid/fastreid/layers/frn.py +199 -0
- dnt/third_party/fast-reid/fastreid/layers/gather_layer.py +30 -0
- dnt/third_party/fast-reid/fastreid/layers/helpers.py +31 -0
- dnt/third_party/fast-reid/fastreid/layers/non_local.py +54 -0
- dnt/third_party/fast-reid/fastreid/layers/pooling.py +124 -0
- dnt/third_party/fast-reid/fastreid/layers/se_layer.py +25 -0
- dnt/third_party/fast-reid/fastreid/layers/splat.py +109 -0
- dnt/third_party/fast-reid/fastreid/layers/weight_init.py +122 -0
- dnt/third_party/fast-reid/fastreid/modeling/__init__.py +23 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/__init__.py +18 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/build.py +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/mobilenet.py +195 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/mobilenetv3.py +283 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/osnet.py +525 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/__init__.py +4 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/config.py +396 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B0_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B1_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B2_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B3_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B4_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B5_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet.py +281 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnet.py +596 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-1.6GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-12GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-16GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-200MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-3.2GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-32GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-4.0GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-400MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-6.4GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-600MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-8.0GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-800MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-1.6GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-12GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-16GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-200MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-3.2GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-32GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-4.0GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-400MF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-6.4GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-600MF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-8.0GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-800MF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/repvgg.py +309 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/resnest.py +365 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/resnet.py +364 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/resnext.py +335 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/shufflenet.py +203 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/vision_transformer.py +399 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/__init__.py +11 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/build.py +25 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/clas_head.py +36 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/embedding_head.py +151 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/__init__.py +12 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/circle_loss.py +71 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/cross_entroy_loss.py +54 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/focal_loss.py +92 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/triplet_loss.py +113 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/utils.py +48 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/__init__.py +14 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/baseline.py +188 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/build.py +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/distiller.py +140 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/mgn.py +394 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/moco.py +126 -0
- dnt/third_party/fast-reid/fastreid/solver/__init__.py +8 -0
- dnt/third_party/fast-reid/fastreid/solver/build.py +348 -0
- dnt/third_party/fast-reid/fastreid/solver/lr_scheduler.py +66 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/__init__.py +10 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/lamb.py +123 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/radam.py +149 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/swa.py +246 -0
- dnt/third_party/fast-reid/fastreid/utils/__init__.py +6 -0
- dnt/third_party/fast-reid/fastreid/utils/checkpoint.py +503 -0
- dnt/third_party/fast-reid/fastreid/utils/collect_env.py +158 -0
- dnt/third_party/fast-reid/fastreid/utils/comm.py +255 -0
- dnt/third_party/fast-reid/fastreid/utils/compute_dist.py +200 -0
- dnt/third_party/fast-reid/fastreid/utils/env.py +119 -0
- dnt/third_party/fast-reid/fastreid/utils/events.py +461 -0
- dnt/third_party/fast-reid/fastreid/utils/faiss_utils.py +127 -0
- dnt/third_party/fast-reid/fastreid/utils/file_io.py +520 -0
- dnt/third_party/fast-reid/fastreid/utils/history_buffer.py +71 -0
- dnt/third_party/fast-reid/fastreid/utils/logger.py +211 -0
- dnt/third_party/fast-reid/fastreid/utils/params.py +103 -0
- dnt/third_party/fast-reid/fastreid/utils/precision_bn.py +94 -0
- dnt/third_party/fast-reid/fastreid/utils/registry.py +66 -0
- dnt/third_party/fast-reid/fastreid/utils/summary.py +120 -0
- dnt/third_party/fast-reid/fastreid/utils/timer.py +68 -0
- dnt/third_party/fast-reid/fastreid/utils/visualizer.py +278 -0
- dnt/track/__init__.py +2 -0
- dnt/track/botsort/__init__.py +4 -0
- dnt/track/botsort/bot_tracker/__init__.py +3 -0
- dnt/track/botsort/bot_tracker/basetrack.py +60 -0
- dnt/track/botsort/bot_tracker/bot_sort.py +473 -0
- dnt/track/botsort/bot_tracker/gmc.py +316 -0
- dnt/track/botsort/bot_tracker/kalman_filter.py +269 -0
- dnt/track/botsort/bot_tracker/matching.py +194 -0
- dnt/track/botsort/bot_tracker/mc_bot_sort.py +505 -0
- dnt/track/{dsort/utils → botsort/bot_tracker/tracking_utils}/evaluation.py +14 -4
- dnt/track/{dsort/utils → botsort/bot_tracker/tracking_utils}/io.py +19 -36
- dnt/track/botsort/bot_tracker/tracking_utils/timer.py +37 -0
- dnt/track/botsort/inference.py +96 -0
- dnt/track/config.py +120 -0
- dnt/track/dsort/configs/bagtricks_R50.yml +7 -0
- dnt/track/dsort/configs/deep_sort.yaml +0 -0
- dnt/track/dsort/configs/fastreid.yaml +1 -1
- dnt/track/dsort/deep_sort/deep/checkpoint/ckpt.t7 +0 -0
- dnt/track/dsort/deep_sort/deep/feature_extractor.py +87 -8
- dnt/track/dsort/deep_sort/deep_sort.py +31 -20
- dnt/track/dsort/deep_sort/sort/detection.py +2 -1
- dnt/track/dsort/deep_sort/sort/iou_matching.py +0 -2
- dnt/track/dsort/deep_sort/sort/linear_assignment.py +0 -3
- dnt/track/dsort/deep_sort/sort/nn_matching.py +5 -5
- dnt/track/dsort/deep_sort/sort/preprocessing.py +1 -2
- dnt/track/dsort/deep_sort/sort/track.py +2 -1
- dnt/track/dsort/deep_sort/sort/tracker.py +1 -1
- dnt/track/dsort/dsort.py +43 -33
- dnt/track/re_class.py +117 -0
- dnt/track/sort/sort.py +9 -6
- dnt/track/tracker.py +213 -32
- dnt-0.3.1.8.dist-info/METADATA +117 -0
- dnt-0.3.1.8.dist-info/RECORD +315 -0
- {dnt-0.2.4.dist-info → dnt-0.3.1.8.dist-info}/WHEEL +1 -1
- dnt/analysis/yield.py +0 -9
- dnt/track/dsort/deep_sort/deep/evaluate.py +0 -15
- dnt/track/dsort/deep_sort/deep/original_model.py +0 -106
- dnt/track/dsort/deep_sort/deep/test.py +0 -77
- dnt/track/dsort/deep_sort/deep/train.py +0 -189
- dnt/track/dsort/utils/asserts.py +0 -13
- dnt/track/dsort/utils/draw.py +0 -36
- dnt/track/dsort/utils/json_logger.py +0 -383
- dnt/track/dsort/utils/log.py +0 -17
- dnt/track/dsort/utils/parser.py +0 -35
- dnt/track/dsort/utils/tools.py +0 -39
- dnt-0.2.4.dist-info/METADATA +0 -35
- dnt-0.2.4.dist-info/RECORD +0 -64
- /dnt/{track/dsort/utils → third_party/fast-reid/checkpoint}/__init__.py +0 -0
- {dnt-0.2.4.dist-info → dnt-0.3.1.8.dist-info/licenses}/LICENSE +0 -0
- {dnt-0.2.4.dist-info → dnt-0.3.1.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,534 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
|
3
|
+
|
|
4
|
+
import datetime
|
|
5
|
+
import itertools
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
import tempfile
|
|
9
|
+
import time
|
|
10
|
+
from collections import Counter
|
|
11
|
+
|
|
12
|
+
import torch
|
|
13
|
+
from torch import nn
|
|
14
|
+
from torch.nn.parallel import DistributedDataParallel
|
|
15
|
+
|
|
16
|
+
from fastreid.evaluation.testing import flatten_results_dict
|
|
17
|
+
from fastreid.solver import optim
|
|
18
|
+
from fastreid.utils import comm
|
|
19
|
+
from fastreid.utils.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
|
|
20
|
+
from fastreid.utils.events import EventStorage, EventWriter, get_event_storage
|
|
21
|
+
from fastreid.utils.file_io import PathManager
|
|
22
|
+
from fastreid.utils.precision_bn import update_bn_stats, get_bn_modules
|
|
23
|
+
from fastreid.utils.timer import Timer
|
|
24
|
+
from .train_loop import HookBase
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
"CallbackHook",
|
|
28
|
+
"IterationTimer",
|
|
29
|
+
"PeriodicWriter",
|
|
30
|
+
"PeriodicCheckpointer",
|
|
31
|
+
"LRScheduler",
|
|
32
|
+
"AutogradProfiler",
|
|
33
|
+
"EvalHook",
|
|
34
|
+
"PreciseBN",
|
|
35
|
+
"LayerFreeze",
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
"""
|
|
39
|
+
Implement some common hooks.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class CallbackHook(HookBase):
|
|
44
|
+
"""
|
|
45
|
+
Create a hook using callback functions provided by the user.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(self, *, before_train=None, after_train=None, before_epoch=None, after_epoch=None,
|
|
49
|
+
before_step=None, after_step=None):
|
|
50
|
+
"""
|
|
51
|
+
Each argument is a function that takes one argument: the trainer.
|
|
52
|
+
"""
|
|
53
|
+
self._before_train = before_train
|
|
54
|
+
self._before_epoch = before_epoch
|
|
55
|
+
self._before_step = before_step
|
|
56
|
+
self._after_step = after_step
|
|
57
|
+
self._after_epoch = after_epoch
|
|
58
|
+
self._after_train = after_train
|
|
59
|
+
|
|
60
|
+
def before_train(self):
|
|
61
|
+
if self._before_train:
|
|
62
|
+
self._before_train(self.trainer)
|
|
63
|
+
|
|
64
|
+
def after_train(self):
|
|
65
|
+
if self._after_train:
|
|
66
|
+
self._after_train(self.trainer)
|
|
67
|
+
# The functions may be closures that hold reference to the trainer
|
|
68
|
+
# Therefore, delete them to avoid circular reference.
|
|
69
|
+
del self._before_train, self._after_train
|
|
70
|
+
del self._before_step, self._after_step
|
|
71
|
+
|
|
72
|
+
def before_epoch(self):
|
|
73
|
+
if self._before_epoch:
|
|
74
|
+
self._before_epoch(self.trainer)
|
|
75
|
+
|
|
76
|
+
def after_epoch(self):
|
|
77
|
+
if self._after_epoch:
|
|
78
|
+
self._after_epoch(self.trainer)
|
|
79
|
+
|
|
80
|
+
def before_step(self):
|
|
81
|
+
if self._before_step:
|
|
82
|
+
self._before_step(self.trainer)
|
|
83
|
+
|
|
84
|
+
def after_step(self):
|
|
85
|
+
if self._after_step:
|
|
86
|
+
self._after_step(self.trainer)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class IterationTimer(HookBase):
|
|
90
|
+
"""
|
|
91
|
+
Track the time spent for each iteration (each run_step call in the trainer).
|
|
92
|
+
Print a summary in the end of training.
|
|
93
|
+
This hook uses the time between the call to its :meth:`before_step`
|
|
94
|
+
and :meth:`after_step` methods.
|
|
95
|
+
Under the convention that :meth:`before_step` of all hooks should only
|
|
96
|
+
take negligible amount of time, the :class:`IterationTimer` hook should be
|
|
97
|
+
placed at the beginning of the list of hooks to obtain accurate timing.
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
def __init__(self, warmup_iter=3):
|
|
101
|
+
"""
|
|
102
|
+
Args:
|
|
103
|
+
warmup_iter (int): the number of iterations at the beginning to exclude
|
|
104
|
+
from timing.
|
|
105
|
+
"""
|
|
106
|
+
self._warmup_iter = warmup_iter
|
|
107
|
+
self._step_timer = Timer()
|
|
108
|
+
|
|
109
|
+
def before_train(self):
|
|
110
|
+
self._start_time = time.perf_counter()
|
|
111
|
+
self._total_timer = Timer()
|
|
112
|
+
self._total_timer.pause()
|
|
113
|
+
|
|
114
|
+
def after_train(self):
|
|
115
|
+
logger = logging.getLogger(__name__)
|
|
116
|
+
total_time = time.perf_counter() - self._start_time
|
|
117
|
+
total_time_minus_hooks = self._total_timer.seconds()
|
|
118
|
+
hook_time = total_time - total_time_minus_hooks
|
|
119
|
+
|
|
120
|
+
num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter
|
|
121
|
+
|
|
122
|
+
if num_iter > 0 and total_time_minus_hooks > 0:
|
|
123
|
+
# Speed is meaningful only after warmup
|
|
124
|
+
# NOTE this format is parsed by grep in some scripts
|
|
125
|
+
logger.info(
|
|
126
|
+
"Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
|
|
127
|
+
num_iter,
|
|
128
|
+
str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
|
|
129
|
+
total_time_minus_hooks / num_iter,
|
|
130
|
+
)
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
logger.info(
|
|
134
|
+
"Total training time: {} ({} on hooks)".format(
|
|
135
|
+
str(datetime.timedelta(seconds=int(total_time))),
|
|
136
|
+
str(datetime.timedelta(seconds=int(hook_time))),
|
|
137
|
+
)
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
def before_step(self):
|
|
141
|
+
self._step_timer.reset()
|
|
142
|
+
self._total_timer.resume()
|
|
143
|
+
|
|
144
|
+
def after_step(self):
|
|
145
|
+
# +1 because we're in after_step
|
|
146
|
+
iter_done = self.trainer.iter - self.trainer.start_iter + 1
|
|
147
|
+
if iter_done >= self._warmup_iter:
|
|
148
|
+
sec = self._step_timer.seconds()
|
|
149
|
+
self.trainer.storage.put_scalars(time=sec)
|
|
150
|
+
else:
|
|
151
|
+
self._start_time = time.perf_counter()
|
|
152
|
+
self._total_timer.reset()
|
|
153
|
+
|
|
154
|
+
self._total_timer.pause()
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class PeriodicWriter(HookBase):
|
|
158
|
+
"""
|
|
159
|
+
Write events to EventStorage periodically.
|
|
160
|
+
It is executed every ``period`` iterations and after the last iteration.
|
|
161
|
+
"""
|
|
162
|
+
|
|
163
|
+
def __init__(self, writers, period=20):
|
|
164
|
+
"""
|
|
165
|
+
Args:
|
|
166
|
+
writers (list[EventWriter]): a list of EventWriter objects
|
|
167
|
+
period (int):
|
|
168
|
+
"""
|
|
169
|
+
self._writers = writers
|
|
170
|
+
for w in writers:
|
|
171
|
+
assert isinstance(w, EventWriter), w
|
|
172
|
+
self._period = period
|
|
173
|
+
|
|
174
|
+
def after_step(self):
|
|
175
|
+
if (self.trainer.iter + 1) % self._period == 0 or (
|
|
176
|
+
self.trainer.iter == self.trainer.max_iter - 1
|
|
177
|
+
):
|
|
178
|
+
for writer in self._writers:
|
|
179
|
+
writer.write()
|
|
180
|
+
|
|
181
|
+
def after_epoch(self):
|
|
182
|
+
for writer in self._writers:
|
|
183
|
+
writer.write()
|
|
184
|
+
|
|
185
|
+
def after_train(self):
|
|
186
|
+
for writer in self._writers:
|
|
187
|
+
writer.close()
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
|
|
191
|
+
"""
|
|
192
|
+
Same as :class:`fastreid.utils.checkpoint.PeriodicCheckpointer`, but as a hook.
|
|
193
|
+
Note that when used as a hook,
|
|
194
|
+
it is unable to save additional data other than what's defined
|
|
195
|
+
by the given `checkpointer`.
|
|
196
|
+
It is executed every ``period`` iterations and after the last iteration.
|
|
197
|
+
"""
|
|
198
|
+
|
|
199
|
+
def before_train(self):
|
|
200
|
+
self.max_epoch = self.trainer.max_epoch
|
|
201
|
+
if len(self.trainer.cfg.DATASETS.TESTS) == 1:
|
|
202
|
+
self.metric_name = "metric"
|
|
203
|
+
else:
|
|
204
|
+
self.metric_name = self.trainer.cfg.DATASETS.TESTS[0] + "/metric"
|
|
205
|
+
|
|
206
|
+
def after_epoch(self):
|
|
207
|
+
# No way to use **kwargs
|
|
208
|
+
storage = get_event_storage()
|
|
209
|
+
metric_dict = dict(
|
|
210
|
+
metric=storage.latest()[self.metric_name][0] if self.metric_name in storage.latest() else -1
|
|
211
|
+
)
|
|
212
|
+
self.step(self.trainer.epoch, **metric_dict)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
class LRScheduler(HookBase):
|
|
216
|
+
"""
|
|
217
|
+
A hook which executes a torch builtin LR scheduler and summarizes the LR.
|
|
218
|
+
It is executed after every iteration.
|
|
219
|
+
"""
|
|
220
|
+
|
|
221
|
+
def __init__(self, optimizer, scheduler):
|
|
222
|
+
"""
|
|
223
|
+
Args:
|
|
224
|
+
optimizer (torch.optim.Optimizer):
|
|
225
|
+
scheduler (torch.optim._LRScheduler)
|
|
226
|
+
"""
|
|
227
|
+
self._optimizer = optimizer
|
|
228
|
+
self._scheduler = scheduler
|
|
229
|
+
self._scale = 0
|
|
230
|
+
|
|
231
|
+
# NOTE: some heuristics on what LR to summarize
|
|
232
|
+
# summarize the param group with most parameters
|
|
233
|
+
largest_group = max(len(g["params"]) for g in optimizer.param_groups)
|
|
234
|
+
|
|
235
|
+
if largest_group == 1:
|
|
236
|
+
# If all groups have one parameter,
|
|
237
|
+
# then find the most common initial LR, and use it for summary
|
|
238
|
+
lr_count = Counter([g["lr"] for g in optimizer.param_groups])
|
|
239
|
+
lr = lr_count.most_common()[0][0]
|
|
240
|
+
for i, g in enumerate(optimizer.param_groups):
|
|
241
|
+
if g["lr"] == lr:
|
|
242
|
+
self._best_param_group_id = i
|
|
243
|
+
break
|
|
244
|
+
else:
|
|
245
|
+
for i, g in enumerate(optimizer.param_groups):
|
|
246
|
+
if len(g["params"]) == largest_group:
|
|
247
|
+
self._best_param_group_id = i
|
|
248
|
+
break
|
|
249
|
+
|
|
250
|
+
def before_step(self):
|
|
251
|
+
if self.trainer.grad_scaler is not None:
|
|
252
|
+
self._scale = self.trainer.grad_scaler.get_scale()
|
|
253
|
+
|
|
254
|
+
def after_step(self):
|
|
255
|
+
lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
|
|
256
|
+
self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
|
|
257
|
+
|
|
258
|
+
next_iter = self.trainer.iter + 1
|
|
259
|
+
if next_iter <= self.trainer.warmup_iters:
|
|
260
|
+
if self.trainer.grad_scaler is None or self._scale == self.trainer.grad_scaler.get_scale():
|
|
261
|
+
self._scheduler["warmup_sched"].step()
|
|
262
|
+
|
|
263
|
+
def after_epoch(self):
|
|
264
|
+
next_iter = self.trainer.iter + 1
|
|
265
|
+
next_epoch = self.trainer.epoch + 1
|
|
266
|
+
if next_iter > self.trainer.warmup_iters and next_epoch > self.trainer.delay_epochs:
|
|
267
|
+
self._scheduler["lr_sched"].step()
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
class AutogradProfiler(HookBase):
|
|
271
|
+
"""
|
|
272
|
+
A hook which runs `torch.autograd.profiler.profile`.
|
|
273
|
+
Examples:
|
|
274
|
+
.. code-block:: python
|
|
275
|
+
hooks.AutogradProfiler(
|
|
276
|
+
lambda trainer: trainer.iter > 10 and trainer.iter < 20, self.cfg.OUTPUT_DIR
|
|
277
|
+
)
|
|
278
|
+
The above example will run the profiler for iteration 10~20 and dump
|
|
279
|
+
results to ``OUTPUT_DIR``. We did not profile the first few iterations
|
|
280
|
+
because they are typically slower than the rest.
|
|
281
|
+
The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
|
|
282
|
+
Note:
|
|
283
|
+
When used together with NCCL on older version of GPUs,
|
|
284
|
+
autograd profiler may cause deadlock because it unnecessarily allocates
|
|
285
|
+
memory on every device it sees. The memory management calls, if
|
|
286
|
+
interleaved with NCCL calls, lead to deadlock on GPUs that do not
|
|
287
|
+
support `cudaLaunchCooperativeKernelMultiDevice`.
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
|
|
291
|
+
"""
|
|
292
|
+
Args:
|
|
293
|
+
enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
|
|
294
|
+
and returns whether to enable the profiler.
|
|
295
|
+
It will be called once every step, and can be used to select which steps to profile.
|
|
296
|
+
output_dir (str): the output directory to dump tracing files.
|
|
297
|
+
use_cuda (bool): same as in `torch.autograd.profiler.profile`.
|
|
298
|
+
"""
|
|
299
|
+
self._enable_predicate = enable_predicate
|
|
300
|
+
self._use_cuda = use_cuda
|
|
301
|
+
self._output_dir = output_dir
|
|
302
|
+
|
|
303
|
+
def before_step(self):
|
|
304
|
+
if self._enable_predicate(self.trainer):
|
|
305
|
+
self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
|
|
306
|
+
self._profiler.__enter__()
|
|
307
|
+
else:
|
|
308
|
+
self._profiler = None
|
|
309
|
+
|
|
310
|
+
def after_step(self):
|
|
311
|
+
if self._profiler is None:
|
|
312
|
+
return
|
|
313
|
+
self._profiler.__exit__(None, None, None)
|
|
314
|
+
out_file = os.path.join(
|
|
315
|
+
self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
|
|
316
|
+
)
|
|
317
|
+
if "://" not in out_file:
|
|
318
|
+
self._profiler.export_chrome_trace(out_file)
|
|
319
|
+
else:
|
|
320
|
+
# Support non-posix filesystems
|
|
321
|
+
with tempfile.TemporaryDirectory(prefix="fastreid_profiler") as d:
|
|
322
|
+
tmp_file = os.path.join(d, "tmp.json")
|
|
323
|
+
self._profiler.export_chrome_trace(tmp_file)
|
|
324
|
+
with open(tmp_file) as f:
|
|
325
|
+
content = f.read()
|
|
326
|
+
with PathManager.open(out_file, "w") as f:
|
|
327
|
+
f.write(content)
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
class EvalHook(HookBase):
|
|
331
|
+
"""
|
|
332
|
+
Run an evaluation function periodically, and at the end of training.
|
|
333
|
+
It is executed every ``eval_period`` iterations and after the last iteration.
|
|
334
|
+
"""
|
|
335
|
+
|
|
336
|
+
def __init__(self, eval_period, eval_function):
|
|
337
|
+
"""
|
|
338
|
+
Args:
|
|
339
|
+
eval_period (int): the period to run `eval_function`.
|
|
340
|
+
eval_function (callable): a function which takes no arguments, and
|
|
341
|
+
returns a nested dict of evaluation metrics.
|
|
342
|
+
Note:
|
|
343
|
+
This hook must be enabled in all or none workers.
|
|
344
|
+
If you would like only certain workers to perform evaluation,
|
|
345
|
+
give other workers a no-op function (`eval_function=lambda: None`).
|
|
346
|
+
"""
|
|
347
|
+
self._period = eval_period
|
|
348
|
+
self._func = eval_function
|
|
349
|
+
|
|
350
|
+
def _do_eval(self):
|
|
351
|
+
results = self._func()
|
|
352
|
+
|
|
353
|
+
if results:
|
|
354
|
+
assert isinstance(
|
|
355
|
+
results, dict
|
|
356
|
+
), "Eval function must return a dict. Got {} instead.".format(results)
|
|
357
|
+
|
|
358
|
+
flattened_results = flatten_results_dict(results)
|
|
359
|
+
for k, v in flattened_results.items():
|
|
360
|
+
try:
|
|
361
|
+
v = float(v)
|
|
362
|
+
except Exception:
|
|
363
|
+
raise ValueError(
|
|
364
|
+
"[EvalHook] eval_function should return a nested dict of float. "
|
|
365
|
+
"Got '{}: {}' instead.".format(k, v)
|
|
366
|
+
)
|
|
367
|
+
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
|
|
368
|
+
|
|
369
|
+
torch.cuda.empty_cache()
|
|
370
|
+
# Evaluation may take different time among workers.
|
|
371
|
+
# A barrier make them start the next iteration together.
|
|
372
|
+
comm.synchronize()
|
|
373
|
+
|
|
374
|
+
def after_epoch(self):
|
|
375
|
+
next_epoch = self.trainer.epoch + 1
|
|
376
|
+
if self._period > 0 and next_epoch % self._period == 0:
|
|
377
|
+
self._do_eval()
|
|
378
|
+
|
|
379
|
+
def after_train(self):
|
|
380
|
+
next_epoch = self.trainer.epoch + 1
|
|
381
|
+
# This condition is to prevent the eval from running after a failed training
|
|
382
|
+
if next_epoch % self._period != 0 and next_epoch >= self.trainer.max_epoch:
|
|
383
|
+
self._do_eval()
|
|
384
|
+
# func is likely a closure that holds reference to the trainer
|
|
385
|
+
# therefore we clean it to avoid circular reference in the end
|
|
386
|
+
del self._func
|
|
387
|
+
|
|
388
|
+
|
|
389
|
+
class PreciseBN(HookBase):
|
|
390
|
+
"""
|
|
391
|
+
The standard implementation of BatchNorm uses EMA in inference, which is
|
|
392
|
+
sometimes suboptimal.
|
|
393
|
+
This class computes the true average of statistics rather than the moving average,
|
|
394
|
+
and put true averages to every BN layer in the given model.
|
|
395
|
+
It is executed after the last iteration.
|
|
396
|
+
"""
|
|
397
|
+
|
|
398
|
+
def __init__(self, model, data_loader, num_iter):
|
|
399
|
+
"""
|
|
400
|
+
Args:
|
|
401
|
+
model (nn.Module): a module whose all BN layers in training mode will be
|
|
402
|
+
updated by precise BN.
|
|
403
|
+
Note that user is responsible for ensuring the BN layers to be
|
|
404
|
+
updated are in training mode when this hook is triggered.
|
|
405
|
+
data_loader (iterable): it will produce data to be run by `model(data)`.
|
|
406
|
+
num_iter (int): number of iterations used to compute the precise
|
|
407
|
+
statistics.
|
|
408
|
+
"""
|
|
409
|
+
self._logger = logging.getLogger(__name__)
|
|
410
|
+
if len(get_bn_modules(model)) == 0:
|
|
411
|
+
self._logger.info(
|
|
412
|
+
"PreciseBN is disabled because model does not contain BN layers in training mode."
|
|
413
|
+
)
|
|
414
|
+
self._disabled = True
|
|
415
|
+
return
|
|
416
|
+
|
|
417
|
+
self._model = model
|
|
418
|
+
self._data_loader = data_loader
|
|
419
|
+
self._num_iter = num_iter
|
|
420
|
+
self._disabled = False
|
|
421
|
+
|
|
422
|
+
self._data_iter = None
|
|
423
|
+
|
|
424
|
+
def after_epoch(self):
|
|
425
|
+
next_epoch = self.trainer.epoch + 1
|
|
426
|
+
is_final = next_epoch == self.trainer.max_epoch
|
|
427
|
+
if is_final:
|
|
428
|
+
self.update_stats()
|
|
429
|
+
|
|
430
|
+
def update_stats(self):
|
|
431
|
+
"""
|
|
432
|
+
Update the model with precise statistics. Users can manually call this method.
|
|
433
|
+
"""
|
|
434
|
+
if self._disabled:
|
|
435
|
+
return
|
|
436
|
+
|
|
437
|
+
if self._data_iter is None:
|
|
438
|
+
self._data_iter = iter(self._data_loader)
|
|
439
|
+
|
|
440
|
+
def data_loader():
|
|
441
|
+
for num_iter in itertools.count(1):
|
|
442
|
+
if num_iter % 100 == 0:
|
|
443
|
+
self._logger.info(
|
|
444
|
+
"Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
|
|
445
|
+
)
|
|
446
|
+
# This way we can reuse the same iterator
|
|
447
|
+
yield next(self._data_iter)
|
|
448
|
+
|
|
449
|
+
with EventStorage(): # capture events in a new storage to discard them
|
|
450
|
+
self._logger.info(
|
|
451
|
+
"Running precise-BN for {} iterations... ".format(self._num_iter)
|
|
452
|
+
+ "Note that this could produce different statistics every time."
|
|
453
|
+
)
|
|
454
|
+
update_bn_stats(self._model, data_loader(), self._num_iter)
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
class LayerFreeze(HookBase):
|
|
458
|
+
def __init__(self, model, freeze_layers, freeze_iters):
|
|
459
|
+
self._logger = logging.getLogger(__name__)
|
|
460
|
+
if isinstance(model, DistributedDataParallel):
|
|
461
|
+
model = model.module
|
|
462
|
+
self.model = model
|
|
463
|
+
|
|
464
|
+
self.freeze_layers = freeze_layers
|
|
465
|
+
self.freeze_iters = freeze_iters
|
|
466
|
+
|
|
467
|
+
self.is_frozen = False
|
|
468
|
+
|
|
469
|
+
def before_step(self):
|
|
470
|
+
# Freeze specific layers
|
|
471
|
+
if self.trainer.iter < self.freeze_iters and not self.is_frozen:
|
|
472
|
+
self.freeze_specific_layer()
|
|
473
|
+
|
|
474
|
+
# Recover original layers status
|
|
475
|
+
if self.trainer.iter >= self.freeze_iters and self.is_frozen:
|
|
476
|
+
self.open_all_layer()
|
|
477
|
+
|
|
478
|
+
def freeze_specific_layer(self):
|
|
479
|
+
for layer in self.freeze_layers:
|
|
480
|
+
if not hasattr(self.model, layer):
|
|
481
|
+
self._logger.info(f'{layer} is not an attribute of the model, will skip this layer')
|
|
482
|
+
|
|
483
|
+
for name, module in self.model.named_children():
|
|
484
|
+
if name in self.freeze_layers:
|
|
485
|
+
# Change BN in freeze layers to eval mode
|
|
486
|
+
module.eval()
|
|
487
|
+
|
|
488
|
+
self.is_frozen = True
|
|
489
|
+
freeze_layers = ", ".join(self.freeze_layers)
|
|
490
|
+
self._logger.info(f'Freeze layer group "{freeze_layers}" training for {self.freeze_iters:d} iterations')
|
|
491
|
+
|
|
492
|
+
def open_all_layer(self):
|
|
493
|
+
for name, module in self.model.named_children():
|
|
494
|
+
if name in self.freeze_layers:
|
|
495
|
+
module.train()
|
|
496
|
+
|
|
497
|
+
self.is_frozen = False
|
|
498
|
+
|
|
499
|
+
freeze_layers = ", ".join(self.freeze_layers)
|
|
500
|
+
self._logger.info(f'Open layer group "{freeze_layers}" training')
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
class SWA(HookBase):
|
|
504
|
+
def __init__(self, swa_start: int, swa_freq: int, swa_lr_factor: float, eta_min: float, lr_sched=False, ):
|
|
505
|
+
self.swa_start = swa_start
|
|
506
|
+
self.swa_freq = swa_freq
|
|
507
|
+
self.swa_lr_factor = swa_lr_factor
|
|
508
|
+
self.eta_min = eta_min
|
|
509
|
+
self.lr_sched = lr_sched
|
|
510
|
+
|
|
511
|
+
def before_step(self):
|
|
512
|
+
is_swa = self.trainer.iter == self.swa_start
|
|
513
|
+
if is_swa:
|
|
514
|
+
# Wrapper optimizer with SWA
|
|
515
|
+
self.trainer.optimizer = optim.SWA(self.trainer.optimizer, self.swa_freq, self.swa_lr_factor)
|
|
516
|
+
self.trainer.optimizer.reset_lr_to_swa()
|
|
517
|
+
|
|
518
|
+
if self.lr_sched:
|
|
519
|
+
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
|
|
520
|
+
optimizer=self.trainer.optimizer,
|
|
521
|
+
T_0=self.swa_freq,
|
|
522
|
+
eta_min=self.eta_min,
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
def after_step(self):
|
|
526
|
+
next_iter = self.trainer.iter + 1
|
|
527
|
+
|
|
528
|
+
# Use Cyclic learning rate scheduler
|
|
529
|
+
if next_iter > self.swa_start and self.lr_sched:
|
|
530
|
+
self.scheduler.step()
|
|
531
|
+
|
|
532
|
+
is_final = next_iter == self.trainer.max_iter
|
|
533
|
+
if is_final:
|
|
534
|
+
self.trainer.optimizer.swap_swa_param()
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: xingyu liao
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
# based on:
|
|
8
|
+
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/launch.py
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
|
|
13
|
+
import torch
|
|
14
|
+
import torch.distributed as dist
|
|
15
|
+
import torch.multiprocessing as mp
|
|
16
|
+
|
|
17
|
+
from fastreid.utils import comm
|
|
18
|
+
|
|
19
|
+
__all__ = ["launch"]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _find_free_port():
|
|
23
|
+
import socket
|
|
24
|
+
|
|
25
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
26
|
+
# Binding to port 0 will cause the OS to find an available port for us
|
|
27
|
+
sock.bind(("", 0))
|
|
28
|
+
port = sock.getsockname()[1]
|
|
29
|
+
sock.close()
|
|
30
|
+
# NOTE: there is still a chance the port could be taken by other processes.
|
|
31
|
+
return port
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def launch(main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=()):
|
|
35
|
+
"""
|
|
36
|
+
Launch multi-gpu or distributed training.
|
|
37
|
+
This function must be called on all machines involved in the training.
|
|
38
|
+
It will spawn child processes (defined by ``num_gpus_per_machine`) on each machine.
|
|
39
|
+
Args:
|
|
40
|
+
main_func: a function that will be called by `main_func(*args)`
|
|
41
|
+
num_gpus_per_machine (int): number of GPUs per machine
|
|
42
|
+
num_machines (int): the total number of machines
|
|
43
|
+
machine_rank (int): the rank of this machine
|
|
44
|
+
dist_url (str): url to connect to for distributed jobs, including protocol
|
|
45
|
+
e.g. "tcp://127.0.0.1:8686".
|
|
46
|
+
Can be set to "auto" to automatically select a free port on localhost
|
|
47
|
+
args (tuple): arguments passed to main_func
|
|
48
|
+
"""
|
|
49
|
+
world_size = num_machines * num_gpus_per_machine
|
|
50
|
+
if world_size > 1:
|
|
51
|
+
# https://github.com/pytorch/pytorch/pull/14391
|
|
52
|
+
# TODO prctl in spawned processes
|
|
53
|
+
|
|
54
|
+
if dist_url == "auto":
|
|
55
|
+
assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs."
|
|
56
|
+
port = _find_free_port()
|
|
57
|
+
dist_url = f"tcp://127.0.0.1:{port}"
|
|
58
|
+
if num_machines > 1 and dist_url.startswith("file://"):
|
|
59
|
+
logger = logging.getLogger(__name__)
|
|
60
|
+
logger.warning(
|
|
61
|
+
"file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://"
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
mp.spawn(
|
|
65
|
+
_distributed_worker,
|
|
66
|
+
nprocs=num_gpus_per_machine,
|
|
67
|
+
args=(main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args),
|
|
68
|
+
daemon=False,
|
|
69
|
+
)
|
|
70
|
+
else:
|
|
71
|
+
main_func(*args)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _distributed_worker(
|
|
75
|
+
local_rank, main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args
|
|
76
|
+
):
|
|
77
|
+
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
|
|
78
|
+
global_rank = machine_rank * num_gpus_per_machine + local_rank
|
|
79
|
+
try:
|
|
80
|
+
dist.init_process_group(
|
|
81
|
+
backend="NCCL", init_method=dist_url, world_size=world_size, rank=global_rank
|
|
82
|
+
)
|
|
83
|
+
except Exception as e:
|
|
84
|
+
logger = logging.getLogger(__name__)
|
|
85
|
+
logger.error("Process group URL: {}".format(dist_url))
|
|
86
|
+
raise e
|
|
87
|
+
# synchronize is needed here to prevent a possible timeout after calling init_process_group
|
|
88
|
+
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
|
|
89
|
+
comm.synchronize()
|
|
90
|
+
|
|
91
|
+
assert num_gpus_per_machine <= torch.cuda.device_count()
|
|
92
|
+
torch.cuda.set_device(local_rank)
|
|
93
|
+
|
|
94
|
+
# Setup the local process group (which contains ranks within the same machine)
|
|
95
|
+
assert comm._LOCAL_PROCESS_GROUP is None
|
|
96
|
+
num_machines = world_size // num_gpus_per_machine
|
|
97
|
+
for i in range(num_machines):
|
|
98
|
+
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
|
|
99
|
+
pg = dist.new_group(ranks_on_i)
|
|
100
|
+
if i == machine_rank:
|
|
101
|
+
comm._LOCAL_PROCESS_GROUP = pg
|
|
102
|
+
|
|
103
|
+
main_func(*args)
|