dnt 0.2.4__py3-none-any.whl → 0.3.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dnt/__init__.py +3 -2
- dnt/analysis/__init__.py +3 -2
- dnt/analysis/count.py +54 -37
- dnt/analysis/interaction2.py +518 -0
- dnt/analysis/stop.py +22 -17
- dnt/analysis/stop2.py +289 -0
- dnt/analysis/stop3.py +758 -0
- dnt/detect/signal/detector.py +326 -0
- dnt/detect/timestamp.py +105 -0
- dnt/detect/yolov8/detector.py +179 -36
- dnt/detect/yolov8/segmentor.py +60 -2
- dnt/engine/__init__.py +8 -0
- dnt/engine/bbox_interp.py +83 -0
- dnt/engine/bbox_iou.py +20 -0
- dnt/engine/cluster.py +31 -0
- dnt/engine/iob.py +66 -0
- dnt/filter/filter.py +333 -2
- dnt/label/labeler.py +4 -4
- dnt/label/labeler2.py +631 -0
- dnt/shared/__init__.py +2 -1
- dnt/shared/data/coco.names +0 -0
- dnt/shared/data/openimages.names +0 -0
- dnt/shared/data/voc.names +0 -0
- dnt/shared/download.py +12 -0
- dnt/shared/synhcro.py +150 -0
- dnt/shared/util.py +17 -4
- dnt/third_party/fast-reid/__init__.py +1 -0
- dnt/third_party/fast-reid/configs/Base-AGW.yml +19 -0
- dnt/third_party/fast-reid/configs/Base-MGN.yml +12 -0
- dnt/third_party/fast-reid/configs/Base-SBS.yml +63 -0
- dnt/third_party/fast-reid/configs/Base-bagtricks.yml +76 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/DukeMTMC/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT17/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT17/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT17/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT20/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT20/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MOT20/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MSMT17/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_R101-ibn.yml +13 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_R50-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MSMT17/bagtricks_S50.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/MSMT17/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/Market1501/AGW_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/bagtricks_vit.yml +88 -0
- dnt/third_party/fast-reid/configs/Market1501/mgn_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_R101-ibn.yml +12 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_R50-ibn.yml +11 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_R50.yml +7 -0
- dnt/third_party/fast-reid/configs/Market1501/sbs_S50.yml +11 -0
- dnt/third_party/fast-reid/configs/VERIWild/bagtricks_R50-ibn.yml +35 -0
- dnt/third_party/fast-reid/configs/VeRi/sbs_R50-ibn.yml +35 -0
- dnt/third_party/fast-reid/configs/VehicleID/bagtricks_R50-ibn.yml +36 -0
- dnt/third_party/fast-reid/configs/__init__.py +0 -0
- dnt/third_party/fast-reid/fast_reid_interfece.py +175 -0
- dnt/third_party/fast-reid/fastreid/__init__.py +6 -0
- dnt/third_party/fast-reid/fastreid/config/__init__.py +15 -0
- dnt/third_party/fast-reid/fastreid/config/config.py +319 -0
- dnt/third_party/fast-reid/fastreid/config/defaults.py +329 -0
- dnt/third_party/fast-reid/fastreid/data/__init__.py +17 -0
- dnt/third_party/fast-reid/fastreid/data/build.py +194 -0
- dnt/third_party/fast-reid/fastreid/data/common.py +58 -0
- dnt/third_party/fast-reid/fastreid/data/data_utils.py +202 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/AirportALERT.py +50 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/__init__.py +43 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/bases.py +183 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/caviara.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/cuhk03.py +274 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/cuhk_sysu.py +58 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/dukemtmcreid.py +70 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/grid.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/iLIDS.py +45 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/lpw.py +49 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/market1501.py +89 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/msmt17.py +114 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/pes3d.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/pku.py +44 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/prai.py +43 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/prid.py +41 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/saivt.py +47 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/sensereid.py +47 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/shinpuhkan.py +48 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/sysu_mm.py +47 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/thermalworld.py +43 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/vehicleid.py +126 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/veri.py +69 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/veriwild.py +140 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/viper.py +45 -0
- dnt/third_party/fast-reid/fastreid/data/datasets/wildtracker.py +59 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/__init__.py +18 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/data_sampler.py +85 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/imbalance_sampler.py +67 -0
- dnt/third_party/fast-reid/fastreid/data/samplers/triplet_sampler.py +260 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/__init__.py +11 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/autoaugment.py +806 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/build.py +100 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/functional.py +180 -0
- dnt/third_party/fast-reid/fastreid/data/transforms/transforms.py +161 -0
- dnt/third_party/fast-reid/fastreid/engine/__init__.py +15 -0
- dnt/third_party/fast-reid/fastreid/engine/defaults.py +490 -0
- dnt/third_party/fast-reid/fastreid/engine/hooks.py +534 -0
- dnt/third_party/fast-reid/fastreid/engine/launch.py +103 -0
- dnt/third_party/fast-reid/fastreid/engine/train_loop.py +357 -0
- dnt/third_party/fast-reid/fastreid/evaluation/__init__.py +6 -0
- dnt/third_party/fast-reid/fastreid/evaluation/clas_evaluator.py +81 -0
- dnt/third_party/fast-reid/fastreid/evaluation/evaluator.py +176 -0
- dnt/third_party/fast-reid/fastreid/evaluation/query_expansion.py +46 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank.py +200 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank_cylib/__init__.py +20 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank_cylib/setup.py +32 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rank_cylib/test_cython.py +106 -0
- dnt/third_party/fast-reid/fastreid/evaluation/reid_evaluation.py +143 -0
- dnt/third_party/fast-reid/fastreid/evaluation/rerank.py +73 -0
- dnt/third_party/fast-reid/fastreid/evaluation/roc.py +90 -0
- dnt/third_party/fast-reid/fastreid/evaluation/testing.py +88 -0
- dnt/third_party/fast-reid/fastreid/layers/__init__.py +19 -0
- dnt/third_party/fast-reid/fastreid/layers/activation.py +59 -0
- dnt/third_party/fast-reid/fastreid/layers/any_softmax.py +80 -0
- dnt/third_party/fast-reid/fastreid/layers/batch_norm.py +205 -0
- dnt/third_party/fast-reid/fastreid/layers/context_block.py +113 -0
- dnt/third_party/fast-reid/fastreid/layers/drop.py +161 -0
- dnt/third_party/fast-reid/fastreid/layers/frn.py +199 -0
- dnt/third_party/fast-reid/fastreid/layers/gather_layer.py +30 -0
- dnt/third_party/fast-reid/fastreid/layers/helpers.py +31 -0
- dnt/third_party/fast-reid/fastreid/layers/non_local.py +54 -0
- dnt/third_party/fast-reid/fastreid/layers/pooling.py +124 -0
- dnt/third_party/fast-reid/fastreid/layers/se_layer.py +25 -0
- dnt/third_party/fast-reid/fastreid/layers/splat.py +109 -0
- dnt/third_party/fast-reid/fastreid/layers/weight_init.py +122 -0
- dnt/third_party/fast-reid/fastreid/modeling/__init__.py +23 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/__init__.py +18 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/build.py +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/mobilenet.py +195 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/mobilenetv3.py +283 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/osnet.py +525 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/__init__.py +4 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/config.py +396 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B0_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B1_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B2_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B3_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B4_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet/EN-B5_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/effnet.py +281 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnet.py +596 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-1.6GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-12GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-16GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-200MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-3.2GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-32GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-4.0GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-400MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-6.4GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-600MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-8.0GF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnetx/RegNetX-800MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-1.6GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-12GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-16GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-200MF_dds_8gpu.yaml +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-3.2GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-32GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-4.0GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-400MF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-6.4GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-600MF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-8.0GF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/regnet/regnety/RegNetY-800MF_dds_8gpu.yaml +27 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/repvgg.py +309 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/resnest.py +365 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/resnet.py +364 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/resnext.py +335 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/shufflenet.py +203 -0
- dnt/third_party/fast-reid/fastreid/modeling/backbones/vision_transformer.py +399 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/__init__.py +11 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/build.py +25 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/clas_head.py +36 -0
- dnt/third_party/fast-reid/fastreid/modeling/heads/embedding_head.py +151 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/__init__.py +12 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/circle_loss.py +71 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/cross_entroy_loss.py +54 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/focal_loss.py +92 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/triplet_loss.py +113 -0
- dnt/third_party/fast-reid/fastreid/modeling/losses/utils.py +48 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/__init__.py +14 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/baseline.py +188 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/build.py +26 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/distiller.py +140 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/mgn.py +394 -0
- dnt/third_party/fast-reid/fastreid/modeling/meta_arch/moco.py +126 -0
- dnt/third_party/fast-reid/fastreid/solver/__init__.py +8 -0
- dnt/third_party/fast-reid/fastreid/solver/build.py +348 -0
- dnt/third_party/fast-reid/fastreid/solver/lr_scheduler.py +66 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/__init__.py +10 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/lamb.py +123 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/radam.py +149 -0
- dnt/third_party/fast-reid/fastreid/solver/optim/swa.py +246 -0
- dnt/third_party/fast-reid/fastreid/utils/__init__.py +6 -0
- dnt/third_party/fast-reid/fastreid/utils/checkpoint.py +503 -0
- dnt/third_party/fast-reid/fastreid/utils/collect_env.py +158 -0
- dnt/third_party/fast-reid/fastreid/utils/comm.py +255 -0
- dnt/third_party/fast-reid/fastreid/utils/compute_dist.py +200 -0
- dnt/third_party/fast-reid/fastreid/utils/env.py +119 -0
- dnt/third_party/fast-reid/fastreid/utils/events.py +461 -0
- dnt/third_party/fast-reid/fastreid/utils/faiss_utils.py +127 -0
- dnt/third_party/fast-reid/fastreid/utils/file_io.py +520 -0
- dnt/third_party/fast-reid/fastreid/utils/history_buffer.py +71 -0
- dnt/third_party/fast-reid/fastreid/utils/logger.py +211 -0
- dnt/third_party/fast-reid/fastreid/utils/params.py +103 -0
- dnt/third_party/fast-reid/fastreid/utils/precision_bn.py +94 -0
- dnt/third_party/fast-reid/fastreid/utils/registry.py +66 -0
- dnt/third_party/fast-reid/fastreid/utils/summary.py +120 -0
- dnt/third_party/fast-reid/fastreid/utils/timer.py +68 -0
- dnt/third_party/fast-reid/fastreid/utils/visualizer.py +278 -0
- dnt/track/__init__.py +2 -0
- dnt/track/botsort/__init__.py +4 -0
- dnt/track/botsort/bot_tracker/__init__.py +3 -0
- dnt/track/botsort/bot_tracker/basetrack.py +60 -0
- dnt/track/botsort/bot_tracker/bot_sort.py +473 -0
- dnt/track/botsort/bot_tracker/gmc.py +316 -0
- dnt/track/botsort/bot_tracker/kalman_filter.py +269 -0
- dnt/track/botsort/bot_tracker/matching.py +194 -0
- dnt/track/botsort/bot_tracker/mc_bot_sort.py +505 -0
- dnt/track/{dsort/utils → botsort/bot_tracker/tracking_utils}/evaluation.py +14 -4
- dnt/track/{dsort/utils → botsort/bot_tracker/tracking_utils}/io.py +19 -36
- dnt/track/botsort/bot_tracker/tracking_utils/timer.py +37 -0
- dnt/track/botsort/inference.py +96 -0
- dnt/track/config.py +120 -0
- dnt/track/dsort/configs/bagtricks_R50.yml +7 -0
- dnt/track/dsort/configs/deep_sort.yaml +0 -0
- dnt/track/dsort/configs/fastreid.yaml +1 -1
- dnt/track/dsort/deep_sort/deep/checkpoint/ckpt.t7 +0 -0
- dnt/track/dsort/deep_sort/deep/feature_extractor.py +87 -8
- dnt/track/dsort/deep_sort/deep_sort.py +31 -20
- dnt/track/dsort/deep_sort/sort/detection.py +2 -1
- dnt/track/dsort/deep_sort/sort/iou_matching.py +0 -2
- dnt/track/dsort/deep_sort/sort/linear_assignment.py +0 -3
- dnt/track/dsort/deep_sort/sort/nn_matching.py +5 -5
- dnt/track/dsort/deep_sort/sort/preprocessing.py +1 -2
- dnt/track/dsort/deep_sort/sort/track.py +2 -1
- dnt/track/dsort/deep_sort/sort/tracker.py +1 -1
- dnt/track/dsort/dsort.py +43 -33
- dnt/track/re_class.py +117 -0
- dnt/track/sort/sort.py +9 -6
- dnt/track/tracker.py +213 -32
- dnt-0.3.1.8.dist-info/METADATA +117 -0
- dnt-0.3.1.8.dist-info/RECORD +315 -0
- {dnt-0.2.4.dist-info → dnt-0.3.1.8.dist-info}/WHEEL +1 -1
- dnt/analysis/yield.py +0 -9
- dnt/track/dsort/deep_sort/deep/evaluate.py +0 -15
- dnt/track/dsort/deep_sort/deep/original_model.py +0 -106
- dnt/track/dsort/deep_sort/deep/test.py +0 -77
- dnt/track/dsort/deep_sort/deep/train.py +0 -189
- dnt/track/dsort/utils/asserts.py +0 -13
- dnt/track/dsort/utils/draw.py +0 -36
- dnt/track/dsort/utils/json_logger.py +0 -383
- dnt/track/dsort/utils/log.py +0 -17
- dnt/track/dsort/utils/parser.py +0 -35
- dnt/track/dsort/utils/tools.py +0 -39
- dnt-0.2.4.dist-info/METADATA +0 -35
- dnt-0.2.4.dist-info/RECORD +0 -64
- /dnt/{track/dsort/utils → third_party/fast-reid/checkpoint}/__init__.py +0 -0
- {dnt-0.2.4.dist-info → dnt-0.3.1.8.dist-info/licenses}/LICENSE +0 -0
- {dnt-0.2.4.dist-info → dnt-0.3.1.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: liaoxingyu
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
import torch.nn.functional as F
|
|
9
|
+
from torch import nn
|
|
10
|
+
|
|
11
|
+
from fastreid.config import configurable
|
|
12
|
+
from fastreid.layers import *
|
|
13
|
+
from fastreid.layers import pooling, any_softmax
|
|
14
|
+
from fastreid.layers.weight_init import weights_init_kaiming
|
|
15
|
+
from .build import REID_HEADS_REGISTRY
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@REID_HEADS_REGISTRY.register()
|
|
19
|
+
class EmbeddingHead(nn.Module):
|
|
20
|
+
"""
|
|
21
|
+
EmbeddingHead perform all feature aggregation in an embedding task, such as reid, image retrieval
|
|
22
|
+
and face recognition
|
|
23
|
+
|
|
24
|
+
It typically contains logic to
|
|
25
|
+
|
|
26
|
+
1. feature aggregation via global average pooling and generalized mean pooling
|
|
27
|
+
2. (optional) batchnorm, dimension reduction and etc.
|
|
28
|
+
2. (in training only) margin-based softmax logits computation
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
@configurable
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
*,
|
|
35
|
+
feat_dim,
|
|
36
|
+
embedding_dim,
|
|
37
|
+
num_classes,
|
|
38
|
+
neck_feat,
|
|
39
|
+
pool_type,
|
|
40
|
+
cls_type,
|
|
41
|
+
scale,
|
|
42
|
+
margin,
|
|
43
|
+
with_bnneck,
|
|
44
|
+
norm_type
|
|
45
|
+
):
|
|
46
|
+
"""
|
|
47
|
+
NOTE: this interface is experimental.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
feat_dim:
|
|
51
|
+
embedding_dim:
|
|
52
|
+
num_classes:
|
|
53
|
+
neck_feat:
|
|
54
|
+
pool_type:
|
|
55
|
+
cls_type:
|
|
56
|
+
scale:
|
|
57
|
+
margin:
|
|
58
|
+
with_bnneck:
|
|
59
|
+
norm_type:
|
|
60
|
+
"""
|
|
61
|
+
super().__init__()
|
|
62
|
+
|
|
63
|
+
# Pooling layer
|
|
64
|
+
assert hasattr(pooling, pool_type), "Expected pool types are {}, " \
|
|
65
|
+
"but got {}".format(pooling.__all__, pool_type)
|
|
66
|
+
self.pool_layer = getattr(pooling, pool_type)()
|
|
67
|
+
|
|
68
|
+
self.neck_feat = neck_feat
|
|
69
|
+
|
|
70
|
+
neck = []
|
|
71
|
+
if embedding_dim > 0:
|
|
72
|
+
neck.append(nn.Conv2d(feat_dim, embedding_dim, 1, 1, bias=False))
|
|
73
|
+
feat_dim = embedding_dim
|
|
74
|
+
|
|
75
|
+
if with_bnneck:
|
|
76
|
+
neck.append(get_norm(norm_type, feat_dim, bias_freeze=True))
|
|
77
|
+
|
|
78
|
+
self.bottleneck = nn.Sequential(*neck)
|
|
79
|
+
|
|
80
|
+
# Classification head
|
|
81
|
+
assert hasattr(any_softmax, cls_type), "Expected cls types are {}, " \
|
|
82
|
+
"but got {}".format(any_softmax.__all__, cls_type)
|
|
83
|
+
self.weight = nn.Parameter(torch.Tensor(num_classes, feat_dim))
|
|
84
|
+
self.cls_layer = getattr(any_softmax, cls_type)(num_classes, scale, margin)
|
|
85
|
+
|
|
86
|
+
self.reset_parameters()
|
|
87
|
+
|
|
88
|
+
def reset_parameters(self) -> None:
|
|
89
|
+
self.bottleneck.apply(weights_init_kaiming)
|
|
90
|
+
nn.init.normal_(self.weight, std=0.01)
|
|
91
|
+
|
|
92
|
+
@classmethod
|
|
93
|
+
def from_config(cls, cfg):
|
|
94
|
+
# fmt: off
|
|
95
|
+
feat_dim = cfg.MODEL.BACKBONE.FEAT_DIM
|
|
96
|
+
embedding_dim = cfg.MODEL.HEADS.EMBEDDING_DIM
|
|
97
|
+
num_classes = cfg.MODEL.HEADS.NUM_CLASSES
|
|
98
|
+
neck_feat = cfg.MODEL.HEADS.NECK_FEAT
|
|
99
|
+
pool_type = cfg.MODEL.HEADS.POOL_LAYER
|
|
100
|
+
cls_type = cfg.MODEL.HEADS.CLS_LAYER
|
|
101
|
+
scale = cfg.MODEL.HEADS.SCALE
|
|
102
|
+
margin = cfg.MODEL.HEADS.MARGIN
|
|
103
|
+
with_bnneck = cfg.MODEL.HEADS.WITH_BNNECK
|
|
104
|
+
norm_type = cfg.MODEL.HEADS.NORM
|
|
105
|
+
# fmt: on
|
|
106
|
+
return {
|
|
107
|
+
'feat_dim': feat_dim,
|
|
108
|
+
'embedding_dim': embedding_dim,
|
|
109
|
+
'num_classes': num_classes,
|
|
110
|
+
'neck_feat': neck_feat,
|
|
111
|
+
'pool_type': pool_type,
|
|
112
|
+
'cls_type': cls_type,
|
|
113
|
+
'scale': scale,
|
|
114
|
+
'margin': margin,
|
|
115
|
+
'with_bnneck': with_bnneck,
|
|
116
|
+
'norm_type': norm_type
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
def forward(self, features, targets=None):
|
|
120
|
+
"""
|
|
121
|
+
See :class:`ReIDHeads.forward`.
|
|
122
|
+
"""
|
|
123
|
+
pool_feat = self.pool_layer(features)
|
|
124
|
+
neck_feat = self.bottleneck(pool_feat)
|
|
125
|
+
neck_feat = neck_feat[..., 0, 0]
|
|
126
|
+
|
|
127
|
+
# Evaluation
|
|
128
|
+
# fmt: off
|
|
129
|
+
if not self.training: return neck_feat
|
|
130
|
+
# fmt: on
|
|
131
|
+
|
|
132
|
+
# Training
|
|
133
|
+
if self.cls_layer.__class__.__name__ == 'Linear':
|
|
134
|
+
logits = F.linear(neck_feat, self.weight)
|
|
135
|
+
else:
|
|
136
|
+
logits = F.linear(F.normalize(neck_feat), F.normalize(self.weight))
|
|
137
|
+
|
|
138
|
+
# Pass logits.clone() into cls_layer, because there is in-place operations
|
|
139
|
+
cls_outputs = self.cls_layer(logits.clone(), targets)
|
|
140
|
+
|
|
141
|
+
# fmt: off
|
|
142
|
+
if self.neck_feat == 'before': feat = pool_feat[..., 0, 0]
|
|
143
|
+
elif self.neck_feat == 'after': feat = neck_feat
|
|
144
|
+
else: raise KeyError(f"{self.neck_feat} is invalid for MODEL.HEADS.NECK_FEAT")
|
|
145
|
+
# fmt: on
|
|
146
|
+
|
|
147
|
+
return {
|
|
148
|
+
"cls_outputs": cls_outputs,
|
|
149
|
+
"pred_class_logits": logits.mul(self.cls_layer.s),
|
|
150
|
+
"features": feat,
|
|
151
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: l1aoxingyu
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .circle_loss import *
|
|
8
|
+
from .cross_entroy_loss import cross_entropy_loss, log_accuracy
|
|
9
|
+
from .focal_loss import focal_loss
|
|
10
|
+
from .triplet_loss import triplet_loss
|
|
11
|
+
|
|
12
|
+
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: xingyu liao
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
import torch.nn.functional as F
|
|
9
|
+
|
|
10
|
+
__all__ = ["pairwise_circleloss", "pairwise_cosface"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def pairwise_circleloss(
|
|
14
|
+
embedding: torch.Tensor,
|
|
15
|
+
targets: torch.Tensor,
|
|
16
|
+
margin: float,
|
|
17
|
+
gamma: float, ) -> torch.Tensor:
|
|
18
|
+
embedding = F.normalize(embedding, dim=1)
|
|
19
|
+
|
|
20
|
+
dist_mat = torch.matmul(embedding, embedding.t())
|
|
21
|
+
|
|
22
|
+
N = dist_mat.size(0)
|
|
23
|
+
|
|
24
|
+
is_pos = targets.view(N, 1).expand(N, N).eq(targets.view(N, 1).expand(N, N).t()).float()
|
|
25
|
+
is_neg = targets.view(N, 1).expand(N, N).ne(targets.view(N, 1).expand(N, N).t()).float()
|
|
26
|
+
|
|
27
|
+
# Mask scores related to itself
|
|
28
|
+
is_pos = is_pos - torch.eye(N, N, device=is_pos.device)
|
|
29
|
+
|
|
30
|
+
s_p = dist_mat * is_pos
|
|
31
|
+
s_n = dist_mat * is_neg
|
|
32
|
+
|
|
33
|
+
alpha_p = torch.clamp_min(-s_p.detach() + 1 + margin, min=0.)
|
|
34
|
+
alpha_n = torch.clamp_min(s_n.detach() + margin, min=0.)
|
|
35
|
+
delta_p = 1 - margin
|
|
36
|
+
delta_n = margin
|
|
37
|
+
|
|
38
|
+
logit_p = - gamma * alpha_p * (s_p - delta_p) + (-99999999.) * (1 - is_pos)
|
|
39
|
+
logit_n = gamma * alpha_n * (s_n - delta_n) + (-99999999.) * (1 - is_neg)
|
|
40
|
+
|
|
41
|
+
loss = F.softplus(torch.logsumexp(logit_p, dim=1) + torch.logsumexp(logit_n, dim=1)).mean()
|
|
42
|
+
|
|
43
|
+
return loss
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def pairwise_cosface(
|
|
47
|
+
embedding: torch.Tensor,
|
|
48
|
+
targets: torch.Tensor,
|
|
49
|
+
margin: float,
|
|
50
|
+
gamma: float, ) -> torch.Tensor:
|
|
51
|
+
# Normalize embedding features
|
|
52
|
+
embedding = F.normalize(embedding, dim=1)
|
|
53
|
+
|
|
54
|
+
dist_mat = torch.matmul(embedding, embedding.t())
|
|
55
|
+
|
|
56
|
+
N = dist_mat.size(0)
|
|
57
|
+
is_pos = targets.view(N, 1).expand(N, N).eq(targets.view(N, 1).expand(N, N).t()).float()
|
|
58
|
+
is_neg = targets.view(N, 1).expand(N, N).ne(targets.view(N, 1).expand(N, N).t()).float()
|
|
59
|
+
|
|
60
|
+
# Mask scores related to itself
|
|
61
|
+
is_pos = is_pos - torch.eye(N, N, device=is_pos.device)
|
|
62
|
+
|
|
63
|
+
s_p = dist_mat * is_pos
|
|
64
|
+
s_n = dist_mat * is_neg
|
|
65
|
+
|
|
66
|
+
logit_p = -gamma * s_p + (-99999999.) * (1 - is_pos)
|
|
67
|
+
logit_n = gamma * (s_n + margin) + (-99999999.) * (1 - is_neg)
|
|
68
|
+
|
|
69
|
+
loss = F.softplus(torch.logsumexp(logit_p, dim=1) + torch.logsumexp(logit_n, dim=1)).mean()
|
|
70
|
+
|
|
71
|
+
return loss
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: l1aoxingyu
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
import torch
|
|
7
|
+
import torch.nn.functional as F
|
|
8
|
+
|
|
9
|
+
from fastreid.utils.events import get_event_storage
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def log_accuracy(pred_class_logits, gt_classes, topk=(1,)):
|
|
13
|
+
"""
|
|
14
|
+
Log the accuracy metrics to EventStorage.
|
|
15
|
+
"""
|
|
16
|
+
bsz = pred_class_logits.size(0)
|
|
17
|
+
maxk = max(topk)
|
|
18
|
+
_, pred_class = pred_class_logits.topk(maxk, 1, True, True)
|
|
19
|
+
pred_class = pred_class.t()
|
|
20
|
+
correct = pred_class.eq(gt_classes.view(1, -1).expand_as(pred_class))
|
|
21
|
+
|
|
22
|
+
ret = []
|
|
23
|
+
for k in topk:
|
|
24
|
+
correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True)
|
|
25
|
+
ret.append(correct_k.mul_(1. / bsz))
|
|
26
|
+
|
|
27
|
+
storage = get_event_storage()
|
|
28
|
+
storage.put_scalar("cls_accuracy", ret[0])
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def cross_entropy_loss(pred_class_outputs, gt_classes, eps, alpha=0.2):
|
|
32
|
+
num_classes = pred_class_outputs.size(1)
|
|
33
|
+
|
|
34
|
+
if eps >= 0:
|
|
35
|
+
smooth_param = eps
|
|
36
|
+
else:
|
|
37
|
+
# Adaptive label smooth regularization
|
|
38
|
+
soft_label = F.softmax(pred_class_outputs, dim=1)
|
|
39
|
+
smooth_param = alpha * soft_label[torch.arange(soft_label.size(0)), gt_classes].unsqueeze(1)
|
|
40
|
+
|
|
41
|
+
log_probs = F.log_softmax(pred_class_outputs, dim=1)
|
|
42
|
+
with torch.no_grad():
|
|
43
|
+
targets = torch.ones_like(log_probs)
|
|
44
|
+
targets *= smooth_param / (num_classes - 1)
|
|
45
|
+
targets.scatter_(1, gt_classes.data.unsqueeze(1), (1 - smooth_param))
|
|
46
|
+
|
|
47
|
+
loss = (-targets * log_probs).sum(dim=1)
|
|
48
|
+
|
|
49
|
+
with torch.no_grad():
|
|
50
|
+
non_zero_cnt = max(loss.nonzero(as_tuple=False).size(0), 1)
|
|
51
|
+
|
|
52
|
+
loss = loss.sum() / non_zero_cnt
|
|
53
|
+
|
|
54
|
+
return loss
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: xingyu liao
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
import torch.nn.functional as F
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# based on:
|
|
12
|
+
# https://github.com/kornia/kornia/blob/master/kornia/losses/focal.py
|
|
13
|
+
|
|
14
|
+
def focal_loss(
|
|
15
|
+
input: torch.Tensor,
|
|
16
|
+
target: torch.Tensor,
|
|
17
|
+
alpha: float,
|
|
18
|
+
gamma: float = 2.0,
|
|
19
|
+
reduction: str = 'mean') -> torch.Tensor:
|
|
20
|
+
r"""Criterion that computes Focal loss.
|
|
21
|
+
See :class:`fastreid.modeling.losses.FocalLoss` for details.
|
|
22
|
+
According to [1], the Focal loss is computed as follows:
|
|
23
|
+
.. math::
|
|
24
|
+
\text{FL}(p_t) = -\alpha_t (1 - p_t)^{\gamma} \, \text{log}(p_t)
|
|
25
|
+
where:
|
|
26
|
+
- :math:`p_t` is the model's estimated probability for each class.
|
|
27
|
+
Arguments:
|
|
28
|
+
alpha (float): Weighting factor :math:`\alpha \in [0, 1]`.
|
|
29
|
+
gamma (float): Focusing parameter :math:`\gamma >= 0`.
|
|
30
|
+
reduction (str, optional): Specifies the reduction to apply to the
|
|
31
|
+
output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied,
|
|
32
|
+
‘mean’: the sum of the output will be divided by the number of elements
|
|
33
|
+
in the output, ‘sum’: the output will be summed. Default: ‘none’.
|
|
34
|
+
Shape:
|
|
35
|
+
- Input: :math:`(N, C, *)` where C = number of classes.
|
|
36
|
+
- Target: :math:`(N, *)` where each value is
|
|
37
|
+
:math:`0 ≤ targets[i] ≤ C−1`.
|
|
38
|
+
Examples:
|
|
39
|
+
>>> N = 5 # num_classes
|
|
40
|
+
>>> loss = FocalLoss(cfg)
|
|
41
|
+
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
|
|
42
|
+
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
|
|
43
|
+
>>> output = loss(input, target)
|
|
44
|
+
>>> output.backward()
|
|
45
|
+
References:
|
|
46
|
+
[1] https://arxiv.org/abs/1708.02002
|
|
47
|
+
"""
|
|
48
|
+
if not torch.is_tensor(input):
|
|
49
|
+
raise TypeError("Input type is not a torch.Tensor. Got {}"
|
|
50
|
+
.format(type(input)))
|
|
51
|
+
|
|
52
|
+
if not len(input.shape) >= 2:
|
|
53
|
+
raise ValueError("Invalid input shape, we expect BxCx*. Got: {}"
|
|
54
|
+
.format(input.shape))
|
|
55
|
+
|
|
56
|
+
if input.size(0) != target.size(0):
|
|
57
|
+
raise ValueError('Expected input batch_size ({}) to match target batch_size ({}).'
|
|
58
|
+
.format(input.size(0), target.size(0)))
|
|
59
|
+
|
|
60
|
+
n = input.size(0)
|
|
61
|
+
out_size = (n,) + input.size()[2:]
|
|
62
|
+
if target.size()[1:] != input.size()[2:]:
|
|
63
|
+
raise ValueError('Expected target size {}, got {}'.format(
|
|
64
|
+
out_size, target.size()))
|
|
65
|
+
|
|
66
|
+
if not input.device == target.device:
|
|
67
|
+
raise ValueError(
|
|
68
|
+
"input and target must be in the same device. Got: {}".format(
|
|
69
|
+
input.device, target.device))
|
|
70
|
+
|
|
71
|
+
# compute softmax over the classes axis
|
|
72
|
+
input_soft = F.softmax(input, dim=1)
|
|
73
|
+
|
|
74
|
+
# create the labels one hot tensor
|
|
75
|
+
target_one_hot = F.one_hot(target, num_classes=input.shape[1])
|
|
76
|
+
|
|
77
|
+
# compute the actual focal loss
|
|
78
|
+
weight = torch.pow(-input_soft + 1., gamma)
|
|
79
|
+
|
|
80
|
+
focal = -alpha * weight * torch.log(input_soft)
|
|
81
|
+
loss_tmp = torch.sum(target_one_hot * focal, dim=1)
|
|
82
|
+
|
|
83
|
+
if reduction == 'none':
|
|
84
|
+
loss = loss_tmp
|
|
85
|
+
elif reduction == 'mean':
|
|
86
|
+
loss = torch.mean(loss_tmp)
|
|
87
|
+
elif reduction == 'sum':
|
|
88
|
+
loss = torch.sum(loss_tmp)
|
|
89
|
+
else:
|
|
90
|
+
raise NotImplementedError("Invalid reduction mode: {}"
|
|
91
|
+
.format(reduction))
|
|
92
|
+
return loss
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: liaoxingyu
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
import torch.nn.functional as F
|
|
9
|
+
|
|
10
|
+
from .utils import euclidean_dist, cosine_dist
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def softmax_weights(dist, mask):
|
|
14
|
+
max_v = torch.max(dist * mask, dim=1, keepdim=True)[0]
|
|
15
|
+
diff = dist - max_v
|
|
16
|
+
Z = torch.sum(torch.exp(diff) * mask, dim=1, keepdim=True) + 1e-6 # avoid division by zero
|
|
17
|
+
W = torch.exp(diff) * mask / Z
|
|
18
|
+
return W
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def hard_example_mining(dist_mat, is_pos, is_neg):
|
|
22
|
+
"""For each anchor, find the hardest positive and negative sample.
|
|
23
|
+
Args:
|
|
24
|
+
dist_mat: pair wise distance between samples, shape [N, M]
|
|
25
|
+
is_pos: positive index with shape [N, M]
|
|
26
|
+
is_neg: negative index with shape [N, M]
|
|
27
|
+
Returns:
|
|
28
|
+
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
|
|
29
|
+
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
|
|
30
|
+
p_inds: pytorch LongTensor, with shape [N];
|
|
31
|
+
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
|
|
32
|
+
n_inds: pytorch LongTensor, with shape [N];
|
|
33
|
+
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
|
|
34
|
+
NOTE: Only consider the case in which all labels have same num of samples,
|
|
35
|
+
thus we can cope with all anchors in parallel.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
assert len(dist_mat.size()) == 2
|
|
39
|
+
|
|
40
|
+
# `dist_ap` means distance(anchor, positive)
|
|
41
|
+
# both `dist_ap` and `relative_p_inds` with shape [N]
|
|
42
|
+
dist_ap, _ = torch.max(dist_mat * is_pos, dim=1)
|
|
43
|
+
# `dist_an` means distance(anchor, negative)
|
|
44
|
+
# both `dist_an` and `relative_n_inds` with shape [N]
|
|
45
|
+
dist_an, _ = torch.min(dist_mat * is_neg + is_pos * 1e9, dim=1)
|
|
46
|
+
|
|
47
|
+
return dist_ap, dist_an
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def weighted_example_mining(dist_mat, is_pos, is_neg):
|
|
51
|
+
"""For each anchor, find the weighted positive and negative sample.
|
|
52
|
+
Args:
|
|
53
|
+
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
|
|
54
|
+
is_pos:
|
|
55
|
+
is_neg:
|
|
56
|
+
Returns:
|
|
57
|
+
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
|
|
58
|
+
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
|
|
59
|
+
"""
|
|
60
|
+
assert len(dist_mat.size()) == 2
|
|
61
|
+
|
|
62
|
+
is_pos = is_pos
|
|
63
|
+
is_neg = is_neg
|
|
64
|
+
dist_ap = dist_mat * is_pos
|
|
65
|
+
dist_an = dist_mat * is_neg
|
|
66
|
+
|
|
67
|
+
weights_ap = softmax_weights(dist_ap, is_pos)
|
|
68
|
+
weights_an = softmax_weights(-dist_an, is_neg)
|
|
69
|
+
|
|
70
|
+
dist_ap = torch.sum(dist_ap * weights_ap, dim=1)
|
|
71
|
+
dist_an = torch.sum(dist_an * weights_an, dim=1)
|
|
72
|
+
|
|
73
|
+
return dist_ap, dist_an
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def triplet_loss(embedding, targets, margin, norm_feat, hard_mining):
|
|
77
|
+
r"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
|
|
78
|
+
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
|
|
79
|
+
Loss for Person Re-Identification'."""
|
|
80
|
+
|
|
81
|
+
if norm_feat:
|
|
82
|
+
dist_mat = cosine_dist(embedding, embedding)
|
|
83
|
+
else:
|
|
84
|
+
dist_mat = euclidean_dist(embedding, embedding)
|
|
85
|
+
|
|
86
|
+
# For distributed training, gather all features from different process.
|
|
87
|
+
# if comm.get_world_size() > 1:
|
|
88
|
+
# all_embedding = torch.cat(GatherLayer.apply(embedding), dim=0)
|
|
89
|
+
# all_targets = concat_all_gather(targets)
|
|
90
|
+
# else:
|
|
91
|
+
# all_embedding = embedding
|
|
92
|
+
# all_targets = targets
|
|
93
|
+
|
|
94
|
+
N = dist_mat.size(0)
|
|
95
|
+
is_pos = targets.view(N, 1).expand(N, N).eq(targets.view(N, 1).expand(N, N).t()).float()
|
|
96
|
+
is_neg = targets.view(N, 1).expand(N, N).ne(targets.view(N, 1).expand(N, N).t()).float()
|
|
97
|
+
|
|
98
|
+
if hard_mining:
|
|
99
|
+
dist_ap, dist_an = hard_example_mining(dist_mat, is_pos, is_neg)
|
|
100
|
+
else:
|
|
101
|
+
dist_ap, dist_an = weighted_example_mining(dist_mat, is_pos, is_neg)
|
|
102
|
+
|
|
103
|
+
y = dist_an.new().resize_as_(dist_an).fill_(1)
|
|
104
|
+
|
|
105
|
+
if margin > 0:
|
|
106
|
+
loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=margin)
|
|
107
|
+
else:
|
|
108
|
+
loss = F.soft_margin_loss(dist_an - dist_ap, y)
|
|
109
|
+
# fmt: off
|
|
110
|
+
if loss == float('Inf'): loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3)
|
|
111
|
+
# fmt: on
|
|
112
|
+
|
|
113
|
+
return loss
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: xingyu liao
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
import torch.nn.functional as F
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def concat_all_gather(tensor):
|
|
12
|
+
"""
|
|
13
|
+
Performs all_gather operation on the provided tensors.
|
|
14
|
+
*** Warning ***: torch.distributed.all_gather has no gradient.
|
|
15
|
+
"""
|
|
16
|
+
tensors_gather = [torch.ones_like(tensor)
|
|
17
|
+
for _ in range(torch.distributed.get_world_size())]
|
|
18
|
+
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
|
|
19
|
+
|
|
20
|
+
output = torch.cat(tensors_gather, dim=0)
|
|
21
|
+
return output
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def normalize(x, axis=-1):
|
|
25
|
+
"""Normalizing to unit length along the specified dimension.
|
|
26
|
+
Args:
|
|
27
|
+
x: pytorch Variable
|
|
28
|
+
Returns:
|
|
29
|
+
x: pytorch Variable, same shape as input
|
|
30
|
+
"""
|
|
31
|
+
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
|
|
32
|
+
return x
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def euclidean_dist(x, y):
|
|
36
|
+
m, n = x.size(0), y.size(0)
|
|
37
|
+
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
|
|
38
|
+
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
|
|
39
|
+
dist = xx + yy - 2 * torch.matmul(x, y.t())
|
|
40
|
+
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
|
|
41
|
+
return dist
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def cosine_dist(x, y):
|
|
45
|
+
x = F.normalize(x, dim=1)
|
|
46
|
+
y = F.normalize(y, dim=1)
|
|
47
|
+
dist = 2 - 2 * torch.mm(x, y.t())
|
|
48
|
+
return dist
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# encoding: utf-8
|
|
2
|
+
"""
|
|
3
|
+
@author: liaoxingyu
|
|
4
|
+
@contact: sherlockliao01@gmail.com
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .build import META_ARCH_REGISTRY, build_model
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# import all the meta_arch, so they will be registered
|
|
11
|
+
from .baseline import Baseline
|
|
12
|
+
from .mgn import MGN
|
|
13
|
+
from .moco import MoCo
|
|
14
|
+
from .distiller import Distiller
|