brainscore-vision 2.2.3__py3-none-any.whl → 2.2.5__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/data/baker2022/__init__.py +10 -10
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/barbumayo2019/__init__.py +3 -3
- brainscore_vision/data/bashivankar2019/__init__.py +10 -10
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
- brainscore_vision/data/bmd2024/__init__.py +20 -20
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
- brainscore_vision/data/bracci2019/__init__.py +5 -5
- brainscore_vision/data/bracci2019/data_packaging.py +1 -1
- brainscore_vision/data/cadena2017/__init__.py +5 -5
- brainscore_vision/data/cichy2019/__init__.py +5 -5
- brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
- brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
- brainscore_vision/data/david2004/__init__.py +5 -5
- brainscore_vision/data/deng2009/__init__.py +3 -3
- brainscore_vision/data/ferguson2024/__init__.py +112 -112
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
- brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
- brainscore_vision/data/geirhos2021/__init__.py +85 -85
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
- brainscore_vision/data/hebart2023/__init__.py +5 -5
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
- brainscore_vision/data/hendrycks2019/__init__.py +12 -12
- brainscore_vision/data/igustibagus2024/__init__.py +5 -5
- brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
- brainscore_vision/data/islam2021/__init__.py +3 -3
- brainscore_vision/data/kar2018/__init__.py +7 -7
- brainscore_vision/data/kar2019/__init__.py +5 -5
- brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
- brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
- brainscore_vision/data/majajhong2015/__init__.py +23 -23
- brainscore_vision/data/malania2007/__init__.py +77 -77
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
- brainscore_vision/data/maniquet2024/__init__.py +11 -11
- brainscore_vision/data/marques2020/__init__.py +30 -30
- brainscore_vision/data/rajalingham2018/__init__.py +10 -10
- brainscore_vision/data/rajalingham2020/__init__.py +5 -5
- brainscore_vision/data/rust2012/__init__.py +7 -7
- brainscore_vision/data/sanghavi2020/__init__.py +19 -19
- brainscore_vision/data/scialom2024/__init__.py +110 -110
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
- brainscore_vision/data/seibert2019/__init__.py +2 -2
- brainscore_vision/data/zhang2018/__init__.py +5 -5
- brainscore_vision/data_helpers/s3.py +25 -6
- brainscore_vision/model_helpers/activations/pytorch.py +34 -12
- brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
- brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
- brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
- brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
- brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
- brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
- brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
- brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
- brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
- brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
- brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
- brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
- brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
- brainscore_vision/models/ReAlnet/__init__.py +64 -0
- brainscore_vision/models/ReAlnet/model.py +237 -0
- brainscore_vision/models/ReAlnet/requirements.txt +7 -0
- brainscore_vision/models/ReAlnet/test.py +0 -0
- brainscore_vision/models/ReAlnet/weights.json +26 -0
- brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
- brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
- brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
- brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
- brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
- brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
- brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
- brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
- brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
- brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
- brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
- brainscore_vision/models/VOneCORnet_S/model.py +25 -0
- brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
- brainscore_vision/models/VOneCORnet_S/test.py +8 -0
- brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
- brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
- brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
- brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
- brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
- brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
- brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
- brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
- brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
- brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
- brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
- brainscore_vision/models/antialiased-r50/__init__.py +7 -0
- brainscore_vision/models/antialiased-r50/model.py +62 -0
- brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
- brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
- brainscore_vision/models/antialiased-r50/test.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
- brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
- brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
- brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
- brainscore_vision/models/cornet_s/model.py +2 -2
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
- brainscore_vision/models/densenet_121/__init__.py +7 -0
- brainscore_vision/models/densenet_121/model.py +63 -0
- brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
- brainscore_vision/models/densenet_121/requirements.txt +1 -0
- brainscore_vision/models/densenet_121/test.py +8 -0
- brainscore_vision/models/densenet_169/__init__.py +7 -0
- brainscore_vision/models/densenet_169/model.py +63 -0
- brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
- brainscore_vision/models/densenet_169/requirements.txt +1 -0
- brainscore_vision/models/densenet_169/test.py +9 -0
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
- brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
- brainscore_vision/models/densenet_201/test.py +8 -0
- brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b0/model.py +45 -0
- brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
- brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
- brainscore_vision/models/efficientnet_b0/test.py +8 -0
- brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b7/model.py +61 -0
- brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
- brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/efficientnet_b7/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
- brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_1/model.py +62 -0
- brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/evresnet_50_1/test.py +8 -0
- brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4/model.py +67 -0
- brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4/test.py +8 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
- brainscore_vision/models/grcnn/__init__.py +7 -0
- brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
- brainscore_vision/models/grcnn/model.py +54 -0
- brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
- brainscore_vision/models/grcnn/requirements.txt +2 -0
- brainscore_vision/models/grcnn/test.py +9 -0
- brainscore_vision/models/grcnn_109/__init__.py +5 -0
- brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
- brainscore_vision/models/grcnn_109/model.py +53 -0
- brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
- brainscore_vision/models/grcnn_109/requirements.txt +2 -0
- brainscore_vision/models/grcnn_109/test.py +9 -0
- brainscore_vision/models/hmax/model.py +2 -2
- brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
- brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
- brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
- brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
- brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
- brainscore_vision/models/inception_v1/__init__.py +7 -0
- brainscore_vision/models/inception_v1/model.py +67 -0
- brainscore_vision/models/inception_v1/requirements.txt +1 -0
- brainscore_vision/models/inception_v1/test.py +8 -0
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
- brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
- brainscore_vision/models/inception_v3/test.py +8 -0
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
- brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
- brainscore_vision/models/inception_v4/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
- brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
- brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
- brainscore_vision/models/nasnet_large/__init__.py +7 -0
- brainscore_vision/models/nasnet_large/model.py +60 -0
- brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
- brainscore_vision/models/nasnet_large/test.py +8 -0
- brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
- brainscore_vision/models/nasnet_mobile/model.py +685 -0
- brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
- brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
- brainscore_vision/models/nasnet_mobile/test.py +8 -0
- brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinB/model.py +79 -0
- brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
- brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
- brainscore_vision/models/omnivore_swinB/test.py +9 -0
- brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinS/model.py +79 -0
- brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
- brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
- brainscore_vision/models/omnivore_swinS/test.py +9 -0
- brainscore_vision/models/pnasnet_large/__init__.py +7 -0
- brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
- brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
- brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large/test.py +8 -0
- brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN/model.py +63 -0
- brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
- brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
- brainscore_vision/models/resnet50_SIN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
- brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
- brainscore_vision/models/resnet50_barlow/model.py +53 -0
- brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
- brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
- brainscore_vision/models/resnet50_barlow/test.py +9 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
- brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
- brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
- brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
- brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
- brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
- brainscore_vision/models/resnet50_sup/__init__.py +5 -0
- brainscore_vision/models/resnet50_sup/model.py +55 -0
- brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
- brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
- brainscore_vision/models/resnet50_sup/test.py +8 -0
- brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
- brainscore_vision/models/resnet50_vicreg/model.py +62 -0
- brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
- brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
- brainscore_vision/models/resnet50_vicreg/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
- brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
- brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
- brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_101_v1/model.py +42 -0
- brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
- brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_101_v1/test.py +8 -0
- brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_101_v2/model.py +33 -0
- brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
- brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_101_v2/test.py +8 -0
- brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_152_v1/model.py +42 -0
- brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
- brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_152_v1/test.py +8 -0
- brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
- brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
- brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
- brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2/test.py +8 -0
- brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
- brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
- brainscore_vision/models/resnet_18_test_m/model.py +80 -0
- brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
- brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
- brainscore_vision/models/resnet_18_test_m/test.py +8 -0
- brainscore_vision/models/resnet_50_2/__init__.py +9 -0
- brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
- brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
- brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
- brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
- brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
- brainscore_vision/models/resnet_50_2/model.py +46 -0
- brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
- brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
- brainscore_vision/models/resnet_50_2/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/model.py +2 -2
- brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
- brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_50_v1/model.py +42 -0
- brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
- brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_50_v1/test.py +8 -0
- brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_50_v2/model.py +33 -0
- brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
- brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_50_v2/test.py +8 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
- brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
- brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
- brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
- brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
- brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
- brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
- brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
- brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
- brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
- brainscore_vision/models/timm_models/__init__.py +193 -0
- brainscore_vision/models/timm_models/model.py +90 -0
- brainscore_vision/models/timm_models/model_configs.json +464 -0
- brainscore_vision/models/timm_models/requirements.txt +3 -0
- brainscore_vision/models/timm_models/test.py +0 -0
- brainscore_vision/models/vgg_16/__init__.py +7 -0
- brainscore_vision/models/vgg_16/model.py +52 -0
- brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
- brainscore_vision/models/vgg_16/requirements.txt +1 -0
- brainscore_vision/models/vgg_16/test.py +8 -0
- brainscore_vision/models/vgg_19/__init__.py +7 -0
- brainscore_vision/models/vgg_19/model.py +52 -0
- brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
- brainscore_vision/models/vgg_19/requirements.txt +1 -0
- brainscore_vision/models/vgg_19/test.py +8 -0
- brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
- brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
- brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
- brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
- brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
- brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
- brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
- brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
- brainscore_vision/models/voneresnet_50/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50/model.py +37 -0
- brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
- brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50/test.py +8 -0
- brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_1/model.py +68 -0
- brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/voneresnet_50_1/test.py +7 -0
- brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_3/model.py +66 -0
- brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_3/test.py +7 -0
- brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
- brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
- brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
- brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
- brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
- brainscore_vision/models/xception/__init__.py +7 -0
- brainscore_vision/models/xception/model.py +64 -0
- brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
- brainscore_vision/models/xception/requirements.txt +2 -0
- brainscore_vision/models/xception/test.py +8 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
- brainscore_vision/submission/actions_helpers.py +2 -3
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/METADATA +6 -6
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/RECORD +714 -130
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/WHEEL +1 -1
- docs/source/index.rst +1 -0
- docs/source/modules/submission.rst +1 -1
- docs/source/modules/version_bumping.rst +43 -0
- tests/test_submission/test_actions_helpers.py +2 -6
- brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
- brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
- /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
- /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
- /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
- /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1061 @@
|
|
1
|
+
from functools import partial
|
2
|
+
from typing import Any, Callable, List, Optional, Type, Union
|
3
|
+
|
4
|
+
import torch
|
5
|
+
import torch.nn as nn
|
6
|
+
from torch import Tensor
|
7
|
+
import torch.nn.functional as F
|
8
|
+
from torchvision.transforms._presets import ImageClassification
|
9
|
+
from torchvision.utils import _log_api_usage_once
|
10
|
+
from torchvision.models._api import Weights, WeightsEnum
|
11
|
+
from torchvision.models._meta import _IMAGENET_CATEGORIES
|
12
|
+
from torchvision.models._utils import _ovewrite_named_param, handle_legacy_interface
|
13
|
+
from .spatialattn import SpatialAttn
|
14
|
+
from timm.models.layers import Conv2dSame
|
15
|
+
|
16
|
+
__all__ = [
|
17
|
+
"ResNet",
|
18
|
+
"ResNet18_Weights",
|
19
|
+
"ResNet34_Weights",
|
20
|
+
"ResNet50_Weights",
|
21
|
+
"ResNet101_Weights",
|
22
|
+
"ResNet152_Weights",
|
23
|
+
"ResNeXt50_32X4D_Weights",
|
24
|
+
"ResNeXt101_32X8D_Weights",
|
25
|
+
"ResNeXt101_64X4D_Weights",
|
26
|
+
"Wide_ResNet50_2_Weights",
|
27
|
+
"Wide_ResNet101_2_Weights",
|
28
|
+
"resnet18",
|
29
|
+
"resnet34",
|
30
|
+
"resnet50",
|
31
|
+
"resnet101",
|
32
|
+
"resnet152",
|
33
|
+
"resnext50_32x4d",
|
34
|
+
"resnext101_32x8d",
|
35
|
+
"resnext101_64x4d",
|
36
|
+
"wide_resnet50_2",
|
37
|
+
"wide_resnet101_2",
|
38
|
+
]
|
39
|
+
|
40
|
+
class BlurPool2d(nn.Module):
|
41
|
+
def __init__(self, kernel_size, stride, blur_kernel_learnable=False):
|
42
|
+
super(BlurPool2d, self).__init__()
|
43
|
+
self.blur_kernel = nn.Parameter(self._get_blur_kernel(kernel_size))
|
44
|
+
self.stride = stride
|
45
|
+
self.padding = (kernel_size - 1) // 2
|
46
|
+
self.kernel_size = kernel_size
|
47
|
+
if not blur_kernel_learnable:
|
48
|
+
self.blur_kernel.requires_grad_(False)
|
49
|
+
|
50
|
+
def forward(self, x):
|
51
|
+
B, C, H, W = x.shape
|
52
|
+
x = x.view(-1, H, W).unsqueeze(1)
|
53
|
+
x = F.conv2d(x, self.blur_kernel, stride=self.stride, padding=self.padding)
|
54
|
+
H, W = x.shape[2:]
|
55
|
+
return x.view(B, C, H, W)
|
56
|
+
|
57
|
+
def _get_blur_kernel(self, kernel_size):
|
58
|
+
blur_kernel_dict = {
|
59
|
+
2: [1, 1],
|
60
|
+
3: [1, 2, 1],
|
61
|
+
4: [1, 3, 3, 1],
|
62
|
+
5: [1, 4, 6, 4, 1],
|
63
|
+
6: [1, 5, 10, 10, 5, 1],
|
64
|
+
7: [1, 6, 15, 20, 15, 6, 1]
|
65
|
+
}
|
66
|
+
if kernel_size in blur_kernel_dict.keys():
|
67
|
+
blur_kernel_1d = torch.FloatTensor(blur_kernel_dict[kernel_size]).view(-1, 1)
|
68
|
+
blur_kernel = torch.matmul(blur_kernel_1d, blur_kernel_1d.t())
|
69
|
+
blur_kernel.div_(blur_kernel.sum())
|
70
|
+
return blur_kernel.unsqueeze(0).unsqueeze(1)
|
71
|
+
else:
|
72
|
+
raise ValueError("invalid blur kernel size: {}".format(kernel_size))
|
73
|
+
|
74
|
+
def __repr__(self):
|
75
|
+
return 'BlurPool2d(kernel_size=({}, {}), stride=({}, {}), padding=({}, {}))'.format(
|
76
|
+
self.kernel_size, self.kernel_size, self.stride,
|
77
|
+
self.stride, self.padding, self.padding
|
78
|
+
)
|
79
|
+
|
80
|
+
|
81
|
+
class MaxBlurPool2d(nn.Module):
|
82
|
+
def __init__(self, kernel_size=2, stride=2, padding=0, blur_kernel_size=3, blur_kernel_learnable=False, blur_position='after'):
|
83
|
+
super(MaxBlurPool2d, self).__init__()
|
84
|
+
self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=padding)
|
85
|
+
self.blurpool = BlurPool2d(kernel_size=blur_kernel_size, stride=stride, blur_kernel_learnable=blur_kernel_learnable)
|
86
|
+
|
87
|
+
if blur_position == 'after':
|
88
|
+
self.layer = [self.maxpool, self.blurpool]
|
89
|
+
elif blur_position == 'before':
|
90
|
+
self.layer = [self.blurpool, self.maxpool]
|
91
|
+
else:
|
92
|
+
raise ValueError('invalid blur postion: {}'.format(blur_position))
|
93
|
+
|
94
|
+
self.main = nn.Sequential(self.maxpool, self.blurpool)
|
95
|
+
|
96
|
+
def forward(self, x):
|
97
|
+
return self.main(x)
|
98
|
+
|
99
|
+
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> Conv2dSame:
|
100
|
+
"""3x3 convolution with padding"""
|
101
|
+
return Conv2dSame(
|
102
|
+
in_planes,
|
103
|
+
out_planes,
|
104
|
+
kernel_size=3,
|
105
|
+
stride=stride,
|
106
|
+
padding=dilation,
|
107
|
+
groups=groups,
|
108
|
+
bias=False,
|
109
|
+
dilation=dilation,
|
110
|
+
)
|
111
|
+
|
112
|
+
|
113
|
+
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> Conv2dSame:
|
114
|
+
"""1x1 convolution"""
|
115
|
+
return Conv2dSame(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
116
|
+
|
117
|
+
|
118
|
+
class BasicBlock(nn.Module):
|
119
|
+
expansion: int = 1
|
120
|
+
|
121
|
+
def __init__(
|
122
|
+
self,
|
123
|
+
inplanes: int,
|
124
|
+
planes: int,
|
125
|
+
stride: int = 1,
|
126
|
+
downsample: Optional[nn.Module] = None,
|
127
|
+
groups: int = 1,
|
128
|
+
base_width: int = 64,
|
129
|
+
dilation: int = 1,
|
130
|
+
norm_layer: Optional[Callable[..., nn.Module]] = None,
|
131
|
+
) -> None:
|
132
|
+
super().__init__()
|
133
|
+
if norm_layer is None:
|
134
|
+
norm_layer = nn.BatchNorm2d
|
135
|
+
if groups != 1 or base_width != 64:
|
136
|
+
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
|
137
|
+
if dilation > 1:
|
138
|
+
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
|
139
|
+
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
|
140
|
+
self.conv1 = conv3x3(inplanes, planes, stride)
|
141
|
+
self.bn1 = norm_layer(planes)
|
142
|
+
self.relu = nn.ReLU(inplace=True)
|
143
|
+
self.conv2 = conv3x3(planes, planes)
|
144
|
+
self.bn2 = norm_layer(planes)
|
145
|
+
self.downsample = downsample
|
146
|
+
self.stride = stride
|
147
|
+
|
148
|
+
def forward(self, x: Tensor) -> Tensor:
|
149
|
+
identity = x
|
150
|
+
|
151
|
+
out = self.conv1(x)
|
152
|
+
out = self.bn1(out)
|
153
|
+
out = self.relu(out)
|
154
|
+
|
155
|
+
out = self.conv2(out)
|
156
|
+
out = self.bn2(out)
|
157
|
+
|
158
|
+
if self.downsample is not None:
|
159
|
+
identity = self.downsample(x)
|
160
|
+
|
161
|
+
out += identity
|
162
|
+
out = self.relu(out)
|
163
|
+
|
164
|
+
return out
|
165
|
+
|
166
|
+
|
167
|
+
class Bottleneck(nn.Module):
|
168
|
+
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
|
169
|
+
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
|
170
|
+
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
|
171
|
+
# This variant is also known as ResNet V1.5 and improves accuracy according to
|
172
|
+
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
|
173
|
+
|
174
|
+
expansion: int = 4
|
175
|
+
|
176
|
+
def __init__(
|
177
|
+
self,
|
178
|
+
inplanes: int,
|
179
|
+
planes: int,
|
180
|
+
stride: int = 1,
|
181
|
+
downsample: Optional[nn.Module] = None,
|
182
|
+
groups: int = 1,
|
183
|
+
base_width: int = 64,
|
184
|
+
dilation: int = 1,
|
185
|
+
norm_layer: Optional[Callable[..., nn.Module]] = None,
|
186
|
+
) -> None:
|
187
|
+
super().__init__()
|
188
|
+
if norm_layer is None:
|
189
|
+
norm_layer = nn.BatchNorm2d
|
190
|
+
width = int(planes * (base_width / 64.0)) * groups
|
191
|
+
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
|
192
|
+
self.conv1 = conv1x1(inplanes, width)
|
193
|
+
self.bn1 = norm_layer(width)
|
194
|
+
self.conv2 = conv3x3(width, width, stride, groups, dilation)
|
195
|
+
self.bn2 = norm_layer(width)
|
196
|
+
self.conv3 = conv1x1(width, planes * self.expansion)
|
197
|
+
self.bn3 = norm_layer(planes * self.expansion)
|
198
|
+
self.relu = nn.ReLU(inplace=True)
|
199
|
+
self.downsample = downsample
|
200
|
+
self.stride = stride
|
201
|
+
|
202
|
+
def forward(self, x: Tensor) -> Tensor:
|
203
|
+
identity = x
|
204
|
+
|
205
|
+
out = self.conv1(x)
|
206
|
+
out = self.bn1(out)
|
207
|
+
out = self.relu(out)
|
208
|
+
|
209
|
+
out = self.conv2(out)
|
210
|
+
out = self.bn2(out)
|
211
|
+
out = self.relu(out)
|
212
|
+
|
213
|
+
out = self.conv3(out)
|
214
|
+
out = self.bn3(out)
|
215
|
+
|
216
|
+
if self.downsample is not None:
|
217
|
+
identity = self.downsample(x)
|
218
|
+
|
219
|
+
out += identity
|
220
|
+
out = self.relu(out)
|
221
|
+
|
222
|
+
return out
|
223
|
+
|
224
|
+
|
225
|
+
class ResNet(nn.Module):
|
226
|
+
def __init__(
|
227
|
+
self,
|
228
|
+
block: Type[Union[BasicBlock, Bottleneck]],
|
229
|
+
layers: List[int],
|
230
|
+
num_classes: int = 1000,
|
231
|
+
zero_init_residual: bool = False,
|
232
|
+
groups: int = 1,
|
233
|
+
width_per_group: int = 64,
|
234
|
+
replace_stride_with_dilation: Optional[List[bool]] = None,
|
235
|
+
norm_layer: Optional[Callable[..., nn.Module]] = None,
|
236
|
+
attn_pool: bool = False,
|
237
|
+
) -> None:
|
238
|
+
super().__init__()
|
239
|
+
_log_api_usage_once(self)
|
240
|
+
if norm_layer is None:
|
241
|
+
norm_layer = nn.BatchNorm2d
|
242
|
+
self._norm_layer = norm_layer
|
243
|
+
|
244
|
+
self.inplanes = 64
|
245
|
+
self.dilation = 1
|
246
|
+
if replace_stride_with_dilation is None:
|
247
|
+
# each element in the tuple indicates if we should replace
|
248
|
+
# the 2x2 stride with a dilated convolution instead
|
249
|
+
replace_stride_with_dilation = [False, False, False]
|
250
|
+
if len(replace_stride_with_dilation) != 3:
|
251
|
+
raise ValueError(
|
252
|
+
"replace_stride_with_dilation should be None "
|
253
|
+
f"or a 3-element tuple, got {replace_stride_with_dilation}"
|
254
|
+
)
|
255
|
+
self.groups = groups
|
256
|
+
self.base_width = width_per_group
|
257
|
+
self.conv1 = Conv2dSame(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
|
258
|
+
self.bn1 = norm_layer(self.inplanes)
|
259
|
+
self.relu = nn.ReLU(inplace=True)
|
260
|
+
self.attnpool = attn_pool
|
261
|
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
262
|
+
self.layer1 = self._make_layer(block, 64, layers[0])
|
263
|
+
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
|
264
|
+
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
|
265
|
+
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
|
266
|
+
if attn_pool:
|
267
|
+
self.avgpool = SpatialAttn(2048, [4096, 2048], 1)
|
268
|
+
self.avgpool2 = nn.AdaptiveAvgPool2d((1, 1))
|
269
|
+
else:
|
270
|
+
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
271
|
+
self.fc = nn.Linear(512 * block.expansion, num_classes)
|
272
|
+
|
273
|
+
for m in self.modules():
|
274
|
+
if isinstance(m, Conv2dSame):
|
275
|
+
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
|
276
|
+
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
|
277
|
+
nn.init.constant_(m.weight, 1)
|
278
|
+
nn.init.constant_(m.bias, 0)
|
279
|
+
|
280
|
+
# Zero-initialize the last BN in each residual branch,
|
281
|
+
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
|
282
|
+
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
|
283
|
+
if zero_init_residual:
|
284
|
+
for m in self.modules():
|
285
|
+
if isinstance(m, Bottleneck) and m.bn3.weight is not None:
|
286
|
+
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
|
287
|
+
elif isinstance(m, BasicBlock) and m.bn2.weight is not None:
|
288
|
+
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
|
289
|
+
|
290
|
+
def _make_layer(
|
291
|
+
self,
|
292
|
+
block: Type[Union[BasicBlock, Bottleneck]],
|
293
|
+
planes: int,
|
294
|
+
blocks: int,
|
295
|
+
stride: int = 1,
|
296
|
+
dilate: bool = False,
|
297
|
+
) -> nn.Sequential:
|
298
|
+
norm_layer = self._norm_layer
|
299
|
+
downsample = None
|
300
|
+
previous_dilation = self.dilation
|
301
|
+
if dilate:
|
302
|
+
self.dilation *= stride
|
303
|
+
stride = 1
|
304
|
+
if stride != 1 or self.inplanes != planes * block.expansion:
|
305
|
+
downsample = nn.Sequential(
|
306
|
+
conv1x1(self.inplanes, planes * block.expansion, stride),
|
307
|
+
norm_layer(planes * block.expansion),
|
308
|
+
)
|
309
|
+
|
310
|
+
layers = []
|
311
|
+
layers.append(
|
312
|
+
block(
|
313
|
+
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
|
314
|
+
)
|
315
|
+
)
|
316
|
+
self.inplanes = planes * block.expansion
|
317
|
+
for _ in range(1, blocks):
|
318
|
+
layers.append(
|
319
|
+
block(
|
320
|
+
self.inplanes,
|
321
|
+
planes,
|
322
|
+
groups=self.groups,
|
323
|
+
base_width=self.base_width,
|
324
|
+
dilation=self.dilation,
|
325
|
+
norm_layer=norm_layer,
|
326
|
+
)
|
327
|
+
)
|
328
|
+
|
329
|
+
return nn.Sequential(*layers)
|
330
|
+
|
331
|
+
def _forward_impl(self, x: Tensor) -> Tensor:
|
332
|
+
# See note [TorchScript super()]
|
333
|
+
x = self.conv1(x)
|
334
|
+
x = self.bn1(x)
|
335
|
+
x = self.relu(x)
|
336
|
+
x = self.maxpool(x)
|
337
|
+
|
338
|
+
x = self.layer1(x)
|
339
|
+
x = self.layer2(x)
|
340
|
+
x = self.layer3(x)
|
341
|
+
x = self.layer4(x)
|
342
|
+
#if self.attnpool:
|
343
|
+
# x2 = self.avgpool(x)
|
344
|
+
#x = self.avgpool2(x)
|
345
|
+
x = self.avgpool(x)
|
346
|
+
x = x.squeeze()
|
347
|
+
x = self.fc(x)
|
348
|
+
|
349
|
+
return x
|
350
|
+
|
351
|
+
def forward(self, x: Tensor) -> Tensor:
|
352
|
+
return self._forward_impl(x)
|
353
|
+
|
354
|
+
|
355
|
+
def _resnet(
|
356
|
+
block: Type[Union[BasicBlock, Bottleneck]],
|
357
|
+
layers: List[int],
|
358
|
+
weights: Optional[WeightsEnum],
|
359
|
+
progress: bool,
|
360
|
+
**kwargs: Any,
|
361
|
+
) -> ResNet:
|
362
|
+
if weights is not None:
|
363
|
+
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
|
364
|
+
|
365
|
+
model = ResNet(block, layers, **kwargs)
|
366
|
+
|
367
|
+
if weights is not None:
|
368
|
+
model.load_state_dict(weights.get_state_dict(progress=progress))
|
369
|
+
|
370
|
+
return model
|
371
|
+
|
372
|
+
|
373
|
+
_COMMON_META = {
|
374
|
+
"min_size": (1, 1),
|
375
|
+
"categories": _IMAGENET_CATEGORIES,
|
376
|
+
}
|
377
|
+
|
378
|
+
|
379
|
+
class ResNet18_Weights(WeightsEnum):
|
380
|
+
IMAGENET1K_V1 = Weights(
|
381
|
+
url="https://download.pytorch.org/models/resnet18-f37072fd.pth",
|
382
|
+
transforms=partial(ImageClassification, crop_size=224),
|
383
|
+
meta={
|
384
|
+
**_COMMON_META,
|
385
|
+
"num_params": 11689512,
|
386
|
+
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
|
387
|
+
"_metrics": {
|
388
|
+
"ImageNet-1K": {
|
389
|
+
"acc@1": 69.758,
|
390
|
+
"acc@5": 89.078,
|
391
|
+
}
|
392
|
+
},
|
393
|
+
"_ops": 1.814,
|
394
|
+
"_weight_size": 44.661,
|
395
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
396
|
+
},
|
397
|
+
)
|
398
|
+
DEFAULT = IMAGENET1K_V1
|
399
|
+
|
400
|
+
|
401
|
+
class ResNet34_Weights(WeightsEnum):
|
402
|
+
IMAGENET1K_V1 = Weights(
|
403
|
+
url="https://download.pytorch.org/models/resnet34-b627a593.pth",
|
404
|
+
transforms=partial(ImageClassification, crop_size=224),
|
405
|
+
meta={
|
406
|
+
**_COMMON_META,
|
407
|
+
"num_params": 21797672,
|
408
|
+
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
|
409
|
+
"_metrics": {
|
410
|
+
"ImageNet-1K": {
|
411
|
+
"acc@1": 73.314,
|
412
|
+
"acc@5": 91.420,
|
413
|
+
}
|
414
|
+
},
|
415
|
+
"_ops": 3.664,
|
416
|
+
"_weight_size": 83.275,
|
417
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
418
|
+
},
|
419
|
+
)
|
420
|
+
DEFAULT = IMAGENET1K_V1
|
421
|
+
|
422
|
+
|
423
|
+
class ResNet50_Weights(WeightsEnum):
|
424
|
+
IMAGENET1K_V1 = Weights(
|
425
|
+
url="https://download.pytorch.org/models/resnet50-0676ba61.pth",
|
426
|
+
transforms=partial(ImageClassification, crop_size=224),
|
427
|
+
meta={
|
428
|
+
**_COMMON_META,
|
429
|
+
"num_params": 25557032,
|
430
|
+
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
|
431
|
+
"_metrics": {
|
432
|
+
"ImageNet-1K": {
|
433
|
+
"acc@1": 76.130,
|
434
|
+
"acc@5": 92.862,
|
435
|
+
}
|
436
|
+
},
|
437
|
+
"_ops": 4.089,
|
438
|
+
"_weight_size": 97.781,
|
439
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
440
|
+
},
|
441
|
+
)
|
442
|
+
IMAGENET1K_V2 = Weights(
|
443
|
+
url="https://download.pytorch.org/models/resnet50-11ad3fa6.pth",
|
444
|
+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
|
445
|
+
meta={
|
446
|
+
**_COMMON_META,
|
447
|
+
"num_params": 25557032,
|
448
|
+
"recipe": "https://github.com/pytorch/vision/issues/3995#issuecomment-1013906621",
|
449
|
+
"_metrics": {
|
450
|
+
"ImageNet-1K": {
|
451
|
+
"acc@1": 80.858,
|
452
|
+
"acc@5": 95.434,
|
453
|
+
}
|
454
|
+
},
|
455
|
+
"_ops": 4.089,
|
456
|
+
"_weight_size": 97.79,
|
457
|
+
"_docs": """
|
458
|
+
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
|
459
|
+
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
|
460
|
+
""",
|
461
|
+
},
|
462
|
+
)
|
463
|
+
DEFAULT = IMAGENET1K_V2
|
464
|
+
|
465
|
+
|
466
|
+
class ResNet101_Weights(WeightsEnum):
|
467
|
+
IMAGENET1K_V1 = Weights(
|
468
|
+
url="https://download.pytorch.org/models/resnet101-63fe2227.pth",
|
469
|
+
transforms=partial(ImageClassification, crop_size=224),
|
470
|
+
meta={
|
471
|
+
**_COMMON_META,
|
472
|
+
"num_params": 44549160,
|
473
|
+
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
|
474
|
+
"_metrics": {
|
475
|
+
"ImageNet-1K": {
|
476
|
+
"acc@1": 77.374,
|
477
|
+
"acc@5": 93.546,
|
478
|
+
}
|
479
|
+
},
|
480
|
+
"_ops": 7.801,
|
481
|
+
"_weight_size": 170.511,
|
482
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
483
|
+
},
|
484
|
+
)
|
485
|
+
IMAGENET1K_V2 = Weights(
|
486
|
+
url="https://download.pytorch.org/models/resnet101-cd907fc2.pth",
|
487
|
+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
|
488
|
+
meta={
|
489
|
+
**_COMMON_META,
|
490
|
+
"num_params": 44549160,
|
491
|
+
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
|
492
|
+
"_metrics": {
|
493
|
+
"ImageNet-1K": {
|
494
|
+
"acc@1": 81.886,
|
495
|
+
"acc@5": 95.780,
|
496
|
+
}
|
497
|
+
},
|
498
|
+
"_ops": 7.801,
|
499
|
+
"_weight_size": 170.53,
|
500
|
+
"_docs": """
|
501
|
+
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
|
502
|
+
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
|
503
|
+
""",
|
504
|
+
},
|
505
|
+
)
|
506
|
+
DEFAULT = IMAGENET1K_V2
|
507
|
+
|
508
|
+
|
509
|
+
class ResNet152_Weights(WeightsEnum):
|
510
|
+
IMAGENET1K_V1 = Weights(
|
511
|
+
url="https://download.pytorch.org/models/resnet152-394f9c45.pth",
|
512
|
+
transforms=partial(ImageClassification, crop_size=224),
|
513
|
+
meta={
|
514
|
+
**_COMMON_META,
|
515
|
+
"num_params": 60192808,
|
516
|
+
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
|
517
|
+
"_metrics": {
|
518
|
+
"ImageNet-1K": {
|
519
|
+
"acc@1": 78.312,
|
520
|
+
"acc@5": 94.046,
|
521
|
+
}
|
522
|
+
},
|
523
|
+
"_ops": 11.514,
|
524
|
+
"_weight_size": 230.434,
|
525
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
526
|
+
},
|
527
|
+
)
|
528
|
+
IMAGENET1K_V2 = Weights(
|
529
|
+
url="https://download.pytorch.org/models/resnet152-f82ba261.pth",
|
530
|
+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
|
531
|
+
meta={
|
532
|
+
**_COMMON_META,
|
533
|
+
"num_params": 60192808,
|
534
|
+
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
|
535
|
+
"_metrics": {
|
536
|
+
"ImageNet-1K": {
|
537
|
+
"acc@1": 82.284,
|
538
|
+
"acc@5": 96.002,
|
539
|
+
}
|
540
|
+
},
|
541
|
+
"_ops": 11.514,
|
542
|
+
"_weight_size": 230.474,
|
543
|
+
"_docs": """
|
544
|
+
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
|
545
|
+
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
|
546
|
+
""",
|
547
|
+
},
|
548
|
+
)
|
549
|
+
DEFAULT = IMAGENET1K_V2
|
550
|
+
|
551
|
+
|
552
|
+
class ResNeXt50_32X4D_Weights(WeightsEnum):
|
553
|
+
IMAGENET1K_V1 = Weights(
|
554
|
+
url="https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
|
555
|
+
transforms=partial(ImageClassification, crop_size=224),
|
556
|
+
meta={
|
557
|
+
**_COMMON_META,
|
558
|
+
"num_params": 25028904,
|
559
|
+
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
|
560
|
+
"_metrics": {
|
561
|
+
"ImageNet-1K": {
|
562
|
+
"acc@1": 77.618,
|
563
|
+
"acc@5": 93.698,
|
564
|
+
}
|
565
|
+
},
|
566
|
+
"_ops": 4.23,
|
567
|
+
"_weight_size": 95.789,
|
568
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
569
|
+
},
|
570
|
+
)
|
571
|
+
IMAGENET1K_V2 = Weights(
|
572
|
+
url="https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth",
|
573
|
+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
|
574
|
+
meta={
|
575
|
+
**_COMMON_META,
|
576
|
+
"num_params": 25028904,
|
577
|
+
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
|
578
|
+
"_metrics": {
|
579
|
+
"ImageNet-1K": {
|
580
|
+
"acc@1": 81.198,
|
581
|
+
"acc@5": 95.340,
|
582
|
+
}
|
583
|
+
},
|
584
|
+
"_ops": 4.23,
|
585
|
+
"_weight_size": 95.833,
|
586
|
+
"_docs": """
|
587
|
+
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
|
588
|
+
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
|
589
|
+
""",
|
590
|
+
},
|
591
|
+
)
|
592
|
+
DEFAULT = IMAGENET1K_V2
|
593
|
+
|
594
|
+
|
595
|
+
class ResNeXt101_32X8D_Weights(WeightsEnum):
|
596
|
+
IMAGENET1K_V1 = Weights(
|
597
|
+
url="https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
|
598
|
+
transforms=partial(ImageClassification, crop_size=224),
|
599
|
+
meta={
|
600
|
+
**_COMMON_META,
|
601
|
+
"num_params": 88791336,
|
602
|
+
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
|
603
|
+
"_metrics": {
|
604
|
+
"ImageNet-1K": {
|
605
|
+
"acc@1": 79.312,
|
606
|
+
"acc@5": 94.526,
|
607
|
+
}
|
608
|
+
},
|
609
|
+
"_ops": 16.414,
|
610
|
+
"_weight_size": 339.586,
|
611
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
612
|
+
},
|
613
|
+
)
|
614
|
+
IMAGENET1K_V2 = Weights(
|
615
|
+
url="https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth",
|
616
|
+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
|
617
|
+
meta={
|
618
|
+
**_COMMON_META,
|
619
|
+
"num_params": 88791336,
|
620
|
+
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
|
621
|
+
"_metrics": {
|
622
|
+
"ImageNet-1K": {
|
623
|
+
"acc@1": 82.834,
|
624
|
+
"acc@5": 96.228,
|
625
|
+
}
|
626
|
+
},
|
627
|
+
"_ops": 16.414,
|
628
|
+
"_weight_size": 339.673,
|
629
|
+
"_docs": """
|
630
|
+
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
|
631
|
+
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
|
632
|
+
""",
|
633
|
+
},
|
634
|
+
)
|
635
|
+
DEFAULT = IMAGENET1K_V2
|
636
|
+
|
637
|
+
|
638
|
+
class ResNeXt101_64X4D_Weights(WeightsEnum):
|
639
|
+
IMAGENET1K_V1 = Weights(
|
640
|
+
url="https://download.pytorch.org/models/resnext101_64x4d-173b62eb.pth",
|
641
|
+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
|
642
|
+
meta={
|
643
|
+
**_COMMON_META,
|
644
|
+
"num_params": 83455272,
|
645
|
+
"recipe": "https://github.com/pytorch/vision/pull/5935",
|
646
|
+
"_metrics": {
|
647
|
+
"ImageNet-1K": {
|
648
|
+
"acc@1": 83.246,
|
649
|
+
"acc@5": 96.454,
|
650
|
+
}
|
651
|
+
},
|
652
|
+
"_ops": 15.46,
|
653
|
+
"_weight_size": 319.318,
|
654
|
+
"_docs": """
|
655
|
+
These weights were trained from scratch by using TorchVision's `new training recipe
|
656
|
+
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
|
657
|
+
""",
|
658
|
+
},
|
659
|
+
)
|
660
|
+
DEFAULT = IMAGENET1K_V1
|
661
|
+
|
662
|
+
|
663
|
+
class Wide_ResNet50_2_Weights(WeightsEnum):
|
664
|
+
IMAGENET1K_V1 = Weights(
|
665
|
+
url="https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
|
666
|
+
transforms=partial(ImageClassification, crop_size=224),
|
667
|
+
meta={
|
668
|
+
**_COMMON_META,
|
669
|
+
"num_params": 68883240,
|
670
|
+
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
|
671
|
+
"_metrics": {
|
672
|
+
"ImageNet-1K": {
|
673
|
+
"acc@1": 78.468,
|
674
|
+
"acc@5": 94.086,
|
675
|
+
}
|
676
|
+
},
|
677
|
+
"_ops": 11.398,
|
678
|
+
"_weight_size": 131.82,
|
679
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
680
|
+
},
|
681
|
+
)
|
682
|
+
IMAGENET1K_V2 = Weights(
|
683
|
+
url="https://download.pytorch.org/models/wide_resnet50_2-9ba9bcbe.pth",
|
684
|
+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
|
685
|
+
meta={
|
686
|
+
**_COMMON_META,
|
687
|
+
"num_params": 68883240,
|
688
|
+
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
|
689
|
+
"_metrics": {
|
690
|
+
"ImageNet-1K": {
|
691
|
+
"acc@1": 81.602,
|
692
|
+
"acc@5": 95.758,
|
693
|
+
}
|
694
|
+
},
|
695
|
+
"_ops": 11.398,
|
696
|
+
"_weight_size": 263.124,
|
697
|
+
"_docs": """
|
698
|
+
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
|
699
|
+
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
|
700
|
+
""",
|
701
|
+
},
|
702
|
+
)
|
703
|
+
DEFAULT = IMAGENET1K_V2
|
704
|
+
|
705
|
+
|
706
|
+
class Wide_ResNet101_2_Weights(WeightsEnum):
|
707
|
+
IMAGENET1K_V1 = Weights(
|
708
|
+
url="https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
|
709
|
+
transforms=partial(ImageClassification, crop_size=224),
|
710
|
+
meta={
|
711
|
+
**_COMMON_META,
|
712
|
+
"num_params": 126886696,
|
713
|
+
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
|
714
|
+
"_metrics": {
|
715
|
+
"ImageNet-1K": {
|
716
|
+
"acc@1": 78.848,
|
717
|
+
"acc@5": 94.284,
|
718
|
+
}
|
719
|
+
},
|
720
|
+
"_ops": 22.753,
|
721
|
+
"_weight_size": 242.896,
|
722
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
723
|
+
},
|
724
|
+
)
|
725
|
+
IMAGENET1K_V2 = Weights(
|
726
|
+
url="https://download.pytorch.org/models/wide_resnet101_2-d733dc28.pth",
|
727
|
+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
|
728
|
+
meta={
|
729
|
+
**_COMMON_META,
|
730
|
+
"num_params": 126886696,
|
731
|
+
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
|
732
|
+
"_metrics": {
|
733
|
+
"ImageNet-1K": {
|
734
|
+
"acc@1": 82.510,
|
735
|
+
"acc@5": 96.020,
|
736
|
+
}
|
737
|
+
},
|
738
|
+
"_ops": 22.753,
|
739
|
+
"_weight_size": 484.747,
|
740
|
+
"_docs": """
|
741
|
+
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
|
742
|
+
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
|
743
|
+
""",
|
744
|
+
},
|
745
|
+
)
|
746
|
+
DEFAULT = IMAGENET1K_V2
|
747
|
+
|
748
|
+
|
749
|
+
@handle_legacy_interface(weights=("pretrained", ResNet18_Weights.IMAGENET1K_V1))
|
750
|
+
def resnet18(*, weights: Optional[ResNet18_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
|
751
|
+
"""ResNet-18 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.
|
752
|
+
|
753
|
+
Args:
|
754
|
+
weights (:class:`~torchvision.models.ResNet18_Weights`, optional): The
|
755
|
+
pretrained weights to use. See
|
756
|
+
:class:`~torchvision.models.ResNet18_Weights` below for
|
757
|
+
more details, and possible values. By default, no pre-trained
|
758
|
+
weights are used.
|
759
|
+
progress (bool, optional): If True, displays a progress bar of the
|
760
|
+
download to stderr. Default is True.
|
761
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
762
|
+
base class. Please refer to the `source code
|
763
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
764
|
+
for more details about this class.
|
765
|
+
|
766
|
+
.. autoclass:: torchvision.models.ResNet18_Weights
|
767
|
+
:members:
|
768
|
+
"""
|
769
|
+
weights = ResNet18_Weights.verify(weights)
|
770
|
+
|
771
|
+
return _resnet(BasicBlock, [2, 2, 2, 2], weights, progress, **kwargs)
|
772
|
+
|
773
|
+
|
774
|
+
@handle_legacy_interface(weights=("pretrained", ResNet34_Weights.IMAGENET1K_V1))
|
775
|
+
def resnet34(*, weights: Optional[ResNet34_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
|
776
|
+
"""ResNet-34 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.
|
777
|
+
|
778
|
+
Args:
|
779
|
+
weights (:class:`~torchvision.models.ResNet34_Weights`, optional): The
|
780
|
+
pretrained weights to use. See
|
781
|
+
:class:`~torchvision.models.ResNet34_Weights` below for
|
782
|
+
more details, and possible values. By default, no pre-trained
|
783
|
+
weights are used.
|
784
|
+
progress (bool, optional): If True, displays a progress bar of the
|
785
|
+
download to stderr. Default is True.
|
786
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
787
|
+
base class. Please refer to the `source code
|
788
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
789
|
+
for more details about this class.
|
790
|
+
|
791
|
+
.. autoclass:: torchvision.models.ResNet34_Weights
|
792
|
+
:members:
|
793
|
+
"""
|
794
|
+
weights = ResNet34_Weights.verify(weights)
|
795
|
+
|
796
|
+
return _resnet(BasicBlock, [3, 4, 6, 3], weights, progress, **kwargs)
|
797
|
+
|
798
|
+
|
799
|
+
@handle_legacy_interface(weights=("pretrained", ResNet50_Weights.IMAGENET1K_V1))
|
800
|
+
def resnet50(*, weights: Optional[ResNet50_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
|
801
|
+
"""ResNet-50 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.
|
802
|
+
|
803
|
+
.. note::
|
804
|
+
The bottleneck of TorchVision places the stride for downsampling to the second 3x3
|
805
|
+
convolution while the original paper places it to the first 1x1 convolution.
|
806
|
+
This variant improves the accuracy and is known as `ResNet V1.5
|
807
|
+
<https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
|
808
|
+
|
809
|
+
Args:
|
810
|
+
weights (:class:`~torchvision.models.ResNet50_Weights`, optional): The
|
811
|
+
pretrained weights to use. See
|
812
|
+
:class:`~torchvision.models.ResNet50_Weights` below for
|
813
|
+
more details, and possible values. By default, no pre-trained
|
814
|
+
weights are used.
|
815
|
+
progress (bool, optional): If True, displays a progress bar of the
|
816
|
+
download to stderr. Default is True.
|
817
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
818
|
+
base class. Please refer to the `source code
|
819
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
820
|
+
for more details about this class.
|
821
|
+
|
822
|
+
.. autoclass:: torchvision.models.ResNet50_Weights
|
823
|
+
:members:
|
824
|
+
"""
|
825
|
+
weights = ResNet50_Weights.verify(weights)
|
826
|
+
|
827
|
+
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
|
828
|
+
|
829
|
+
|
830
|
+
@handle_legacy_interface(weights=("pretrained", ResNet101_Weights.IMAGENET1K_V1))
|
831
|
+
def resnet101(*, weights: Optional[ResNet101_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
|
832
|
+
"""ResNet-101 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.
|
833
|
+
|
834
|
+
.. note::
|
835
|
+
The bottleneck of TorchVision places the stride for downsampling to the second 3x3
|
836
|
+
convolution while the original paper places it to the first 1x1 convolution.
|
837
|
+
This variant improves the accuracy and is known as `ResNet V1.5
|
838
|
+
<https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
|
839
|
+
|
840
|
+
Args:
|
841
|
+
weights (:class:`~torchvision.models.ResNet101_Weights`, optional): The
|
842
|
+
pretrained weights to use. See
|
843
|
+
:class:`~torchvision.models.ResNet101_Weights` below for
|
844
|
+
more details, and possible values. By default, no pre-trained
|
845
|
+
weights are used.
|
846
|
+
progress (bool, optional): If True, displays a progress bar of the
|
847
|
+
download to stderr. Default is True.
|
848
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
849
|
+
base class. Please refer to the `source code
|
850
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
851
|
+
for more details about this class.
|
852
|
+
|
853
|
+
.. autoclass:: torchvision.models.ResNet101_Weights
|
854
|
+
:members:
|
855
|
+
"""
|
856
|
+
weights = ResNet101_Weights.verify(weights)
|
857
|
+
|
858
|
+
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
|
859
|
+
|
860
|
+
|
861
|
+
@handle_legacy_interface(weights=("pretrained", ResNet152_Weights.IMAGENET1K_V1))
|
862
|
+
def resnet152(*, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
|
863
|
+
"""ResNet-152 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.
|
864
|
+
|
865
|
+
.. note::
|
866
|
+
The bottleneck of TorchVision places the stride for downsampling to the second 3x3
|
867
|
+
convolution while the original paper places it to the first 1x1 convolution.
|
868
|
+
This variant improves the accuracy and is known as `ResNet V1.5
|
869
|
+
<https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
|
870
|
+
|
871
|
+
Args:
|
872
|
+
weights (:class:`~torchvision.models.ResNet152_Weights`, optional): The
|
873
|
+
pretrained weights to use. See
|
874
|
+
:class:`~torchvision.models.ResNet152_Weights` below for
|
875
|
+
more details, and possible values. By default, no pre-trained
|
876
|
+
weights are used.
|
877
|
+
progress (bool, optional): If True, displays a progress bar of the
|
878
|
+
download to stderr. Default is True.
|
879
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
880
|
+
base class. Please refer to the `source code
|
881
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
882
|
+
for more details about this class.
|
883
|
+
|
884
|
+
.. autoclass:: torchvision.models.ResNet152_Weights
|
885
|
+
:members:
|
886
|
+
"""
|
887
|
+
weights = ResNet152_Weights.verify(weights)
|
888
|
+
|
889
|
+
return _resnet(Bottleneck, [3, 8, 36, 3], weights, progress, **kwargs)
|
890
|
+
|
891
|
+
|
892
|
+
@handle_legacy_interface(weights=("pretrained", ResNeXt50_32X4D_Weights.IMAGENET1K_V1))
|
893
|
+
def resnext50_32x4d(
|
894
|
+
*, weights: Optional[ResNeXt50_32X4D_Weights] = None, progress: bool = True, **kwargs: Any
|
895
|
+
) -> ResNet:
|
896
|
+
"""ResNeXt-50 32x4d model from
|
897
|
+
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
|
898
|
+
|
899
|
+
Args:
|
900
|
+
weights (:class:`~torchvision.models.ResNeXt50_32X4D_Weights`, optional): The
|
901
|
+
pretrained weights to use. See
|
902
|
+
:class:`~torchvision.models.ResNext50_32X4D_Weights` below for
|
903
|
+
more details, and possible values. By default, no pre-trained
|
904
|
+
weights are used.
|
905
|
+
progress (bool, optional): If True, displays a progress bar of the
|
906
|
+
download to stderr. Default is True.
|
907
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
908
|
+
base class. Please refer to the `source code
|
909
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
910
|
+
for more details about this class.
|
911
|
+
.. autoclass:: torchvision.models.ResNeXt50_32X4D_Weights
|
912
|
+
:members:
|
913
|
+
"""
|
914
|
+
weights = ResNeXt50_32X4D_Weights.verify(weights)
|
915
|
+
|
916
|
+
_ovewrite_named_param(kwargs, "groups", 32)
|
917
|
+
_ovewrite_named_param(kwargs, "width_per_group", 4)
|
918
|
+
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
|
919
|
+
|
920
|
+
|
921
|
+
@handle_legacy_interface(weights=("pretrained", ResNeXt101_32X8D_Weights.IMAGENET1K_V1))
|
922
|
+
def resnext101_32x8d(
|
923
|
+
*, weights: Optional[ResNeXt101_32X8D_Weights] = None, progress: bool = True, **kwargs: Any
|
924
|
+
) -> ResNet:
|
925
|
+
"""ResNeXt-101 32x8d model from
|
926
|
+
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
|
927
|
+
|
928
|
+
Args:
|
929
|
+
weights (:class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
|
930
|
+
pretrained weights to use. See
|
931
|
+
:class:`~torchvision.models.ResNeXt101_32X8D_Weights` below for
|
932
|
+
more details, and possible values. By default, no pre-trained
|
933
|
+
weights are used.
|
934
|
+
progress (bool, optional): If True, displays a progress bar of the
|
935
|
+
download to stderr. Default is True.
|
936
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
937
|
+
base class. Please refer to the `source code
|
938
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
939
|
+
for more details about this class.
|
940
|
+
.. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
|
941
|
+
:members:
|
942
|
+
"""
|
943
|
+
weights = ResNeXt101_32X8D_Weights.verify(weights)
|
944
|
+
|
945
|
+
_ovewrite_named_param(kwargs, "groups", 32)
|
946
|
+
_ovewrite_named_param(kwargs, "width_per_group", 8)
|
947
|
+
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
|
948
|
+
|
949
|
+
|
950
|
+
@handle_legacy_interface(weights=("pretrained", ResNeXt101_64X4D_Weights.IMAGENET1K_V1))
|
951
|
+
def resnext101_64x4d(
|
952
|
+
*, weights: Optional[ResNeXt101_64X4D_Weights] = None, progress: bool = True, **kwargs: Any
|
953
|
+
) -> ResNet:
|
954
|
+
"""ResNeXt-101 64x4d model from
|
955
|
+
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
|
956
|
+
|
957
|
+
Args:
|
958
|
+
weights (:class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
|
959
|
+
pretrained weights to use. See
|
960
|
+
:class:`~torchvision.models.ResNeXt101_64X4D_Weights` below for
|
961
|
+
more details, and possible values. By default, no pre-trained
|
962
|
+
weights are used.
|
963
|
+
progress (bool, optional): If True, displays a progress bar of the
|
964
|
+
download to stderr. Default is True.
|
965
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
966
|
+
base class. Please refer to the `source code
|
967
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
968
|
+
for more details about this class.
|
969
|
+
.. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
|
970
|
+
:members:
|
971
|
+
"""
|
972
|
+
weights = ResNeXt101_64X4D_Weights.verify(weights)
|
973
|
+
|
974
|
+
_ovewrite_named_param(kwargs, "groups", 64)
|
975
|
+
_ovewrite_named_param(kwargs, "width_per_group", 4)
|
976
|
+
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
|
977
|
+
|
978
|
+
|
979
|
+
@handle_legacy_interface(weights=("pretrained", Wide_ResNet50_2_Weights.IMAGENET1K_V1))
|
980
|
+
def wide_resnet50_2(
|
981
|
+
*, weights: Optional[Wide_ResNet50_2_Weights] = None, progress: bool = True, **kwargs: Any
|
982
|
+
) -> ResNet:
|
983
|
+
"""Wide ResNet-50-2 model from
|
984
|
+
`Wide Residual Networks <https://arxiv.org/abs/1605.07146>`_.
|
985
|
+
|
986
|
+
The model is the same as ResNet except for the bottleneck number of channels
|
987
|
+
which is twice larger in every block. The number of channels in outer 1x1
|
988
|
+
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
|
989
|
+
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
|
990
|
+
|
991
|
+
Args:
|
992
|
+
weights (:class:`~torchvision.models.Wide_ResNet50_2_Weights`, optional): The
|
993
|
+
pretrained weights to use. See
|
994
|
+
:class:`~torchvision.models.Wide_ResNet50_2_Weights` below for
|
995
|
+
more details, and possible values. By default, no pre-trained
|
996
|
+
weights are used.
|
997
|
+
progress (bool, optional): If True, displays a progress bar of the
|
998
|
+
download to stderr. Default is True.
|
999
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
1000
|
+
base class. Please refer to the `source code
|
1001
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
1002
|
+
for more details about this class.
|
1003
|
+
.. autoclass:: torchvision.models.Wide_ResNet50_2_Weights
|
1004
|
+
:members:
|
1005
|
+
"""
|
1006
|
+
weights = Wide_ResNet50_2_Weights.verify(weights)
|
1007
|
+
|
1008
|
+
_ovewrite_named_param(kwargs, "width_per_group", 64 * 2)
|
1009
|
+
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
|
1010
|
+
|
1011
|
+
|
1012
|
+
@handle_legacy_interface(weights=("pretrained", Wide_ResNet101_2_Weights.IMAGENET1K_V1))
|
1013
|
+
def wide_resnet101_2(
|
1014
|
+
*, weights: Optional[Wide_ResNet101_2_Weights] = None, progress: bool = True, **kwargs: Any
|
1015
|
+
) -> ResNet:
|
1016
|
+
"""Wide ResNet-101-2 model from
|
1017
|
+
`Wide Residual Networks <https://arxiv.org/abs/1605.07146>`_.
|
1018
|
+
|
1019
|
+
The model is the same as ResNet except for the bottleneck number of channels
|
1020
|
+
which is twice larger in every block. The number of channels in outer 1x1
|
1021
|
+
convolutions is the same, e.g. last block in ResNet-101 has 2048-512-2048
|
1022
|
+
channels, and in Wide ResNet-101-2 has 2048-1024-2048.
|
1023
|
+
|
1024
|
+
Args:
|
1025
|
+
weights (:class:`~torchvision.models.Wide_ResNet101_2_Weights`, optional): The
|
1026
|
+
pretrained weights to use. See
|
1027
|
+
:class:`~torchvision.models.Wide_ResNet101_2_Weights` below for
|
1028
|
+
more details, and possible values. By default, no pre-trained
|
1029
|
+
weights are used.
|
1030
|
+
progress (bool, optional): If True, displays a progress bar of the
|
1031
|
+
download to stderr. Default is True.
|
1032
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
1033
|
+
base class. Please refer to the `source code
|
1034
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
1035
|
+
for more details about this class.
|
1036
|
+
.. autoclass:: torchvision.models.Wide_ResNet101_2_Weights
|
1037
|
+
:members:
|
1038
|
+
"""
|
1039
|
+
weights = Wide_ResNet101_2_Weights.verify(weights)
|
1040
|
+
|
1041
|
+
_ovewrite_named_param(kwargs, "width_per_group", 64 * 2)
|
1042
|
+
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
|
1043
|
+
|
1044
|
+
|
1045
|
+
# The dictionary below is internal implementation detail and will be removed in v0.15
|
1046
|
+
from torchvision.models._utils import _ModelURLs
|
1047
|
+
|
1048
|
+
|
1049
|
+
model_urls = _ModelURLs(
|
1050
|
+
{
|
1051
|
+
"resnet18": ResNet18_Weights.IMAGENET1K_V1.url,
|
1052
|
+
"resnet34": ResNet34_Weights.IMAGENET1K_V1.url,
|
1053
|
+
"resnet50": ResNet50_Weights.IMAGENET1K_V1.url,
|
1054
|
+
"resnet101": ResNet101_Weights.IMAGENET1K_V1.url,
|
1055
|
+
"resnet152": ResNet152_Weights.IMAGENET1K_V1.url,
|
1056
|
+
"resnext50_32x4d": ResNeXt50_32X4D_Weights.IMAGENET1K_V1.url,
|
1057
|
+
"resnext101_32x8d": ResNeXt101_32X8D_Weights.IMAGENET1K_V1.url,
|
1058
|
+
"wide_resnet50_2": Wide_ResNet50_2_Weights.IMAGENET1K_V1.url,
|
1059
|
+
"wide_resnet101_2": Wide_ResNet101_2_Weights.IMAGENET1K_V1.url,
|
1060
|
+
}
|
1061
|
+
)
|