brainscore-vision 2.2.3__py3-none-any.whl → 2.2.5__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/data/baker2022/__init__.py +10 -10
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/barbumayo2019/__init__.py +3 -3
- brainscore_vision/data/bashivankar2019/__init__.py +10 -10
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
- brainscore_vision/data/bmd2024/__init__.py +20 -20
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
- brainscore_vision/data/bracci2019/__init__.py +5 -5
- brainscore_vision/data/bracci2019/data_packaging.py +1 -1
- brainscore_vision/data/cadena2017/__init__.py +5 -5
- brainscore_vision/data/cichy2019/__init__.py +5 -5
- brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
- brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
- brainscore_vision/data/david2004/__init__.py +5 -5
- brainscore_vision/data/deng2009/__init__.py +3 -3
- brainscore_vision/data/ferguson2024/__init__.py +112 -112
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
- brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
- brainscore_vision/data/geirhos2021/__init__.py +85 -85
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
- brainscore_vision/data/hebart2023/__init__.py +5 -5
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
- brainscore_vision/data/hendrycks2019/__init__.py +12 -12
- brainscore_vision/data/igustibagus2024/__init__.py +5 -5
- brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
- brainscore_vision/data/islam2021/__init__.py +3 -3
- brainscore_vision/data/kar2018/__init__.py +7 -7
- brainscore_vision/data/kar2019/__init__.py +5 -5
- brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
- brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
- brainscore_vision/data/majajhong2015/__init__.py +23 -23
- brainscore_vision/data/malania2007/__init__.py +77 -77
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
- brainscore_vision/data/maniquet2024/__init__.py +11 -11
- brainscore_vision/data/marques2020/__init__.py +30 -30
- brainscore_vision/data/rajalingham2018/__init__.py +10 -10
- brainscore_vision/data/rajalingham2020/__init__.py +5 -5
- brainscore_vision/data/rust2012/__init__.py +7 -7
- brainscore_vision/data/sanghavi2020/__init__.py +19 -19
- brainscore_vision/data/scialom2024/__init__.py +110 -110
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
- brainscore_vision/data/seibert2019/__init__.py +2 -2
- brainscore_vision/data/zhang2018/__init__.py +5 -5
- brainscore_vision/data_helpers/s3.py +25 -6
- brainscore_vision/model_helpers/activations/pytorch.py +34 -12
- brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
- brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
- brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
- brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
- brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
- brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
- brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
- brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
- brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
- brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
- brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
- brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
- brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
- brainscore_vision/models/ReAlnet/__init__.py +64 -0
- brainscore_vision/models/ReAlnet/model.py +237 -0
- brainscore_vision/models/ReAlnet/requirements.txt +7 -0
- brainscore_vision/models/ReAlnet/test.py +0 -0
- brainscore_vision/models/ReAlnet/weights.json +26 -0
- brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
- brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
- brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
- brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
- brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
- brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
- brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
- brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
- brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
- brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
- brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
- brainscore_vision/models/VOneCORnet_S/model.py +25 -0
- brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
- brainscore_vision/models/VOneCORnet_S/test.py +8 -0
- brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
- brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
- brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
- brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
- brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
- brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
- brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
- brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
- brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
- brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
- brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
- brainscore_vision/models/antialiased-r50/__init__.py +7 -0
- brainscore_vision/models/antialiased-r50/model.py +62 -0
- brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
- brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
- brainscore_vision/models/antialiased-r50/test.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
- brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
- brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
- brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
- brainscore_vision/models/cornet_s/model.py +2 -2
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
- brainscore_vision/models/densenet_121/__init__.py +7 -0
- brainscore_vision/models/densenet_121/model.py +63 -0
- brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
- brainscore_vision/models/densenet_121/requirements.txt +1 -0
- brainscore_vision/models/densenet_121/test.py +8 -0
- brainscore_vision/models/densenet_169/__init__.py +7 -0
- brainscore_vision/models/densenet_169/model.py +63 -0
- brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
- brainscore_vision/models/densenet_169/requirements.txt +1 -0
- brainscore_vision/models/densenet_169/test.py +9 -0
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
- brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
- brainscore_vision/models/densenet_201/test.py +8 -0
- brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b0/model.py +45 -0
- brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
- brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
- brainscore_vision/models/efficientnet_b0/test.py +8 -0
- brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b7/model.py +61 -0
- brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
- brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/efficientnet_b7/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
- brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_1/model.py +62 -0
- brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/evresnet_50_1/test.py +8 -0
- brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4/model.py +67 -0
- brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4/test.py +8 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
- brainscore_vision/models/grcnn/__init__.py +7 -0
- brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
- brainscore_vision/models/grcnn/model.py +54 -0
- brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
- brainscore_vision/models/grcnn/requirements.txt +2 -0
- brainscore_vision/models/grcnn/test.py +9 -0
- brainscore_vision/models/grcnn_109/__init__.py +5 -0
- brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
- brainscore_vision/models/grcnn_109/model.py +53 -0
- brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
- brainscore_vision/models/grcnn_109/requirements.txt +2 -0
- brainscore_vision/models/grcnn_109/test.py +9 -0
- brainscore_vision/models/hmax/model.py +2 -2
- brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
- brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
- brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
- brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
- brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
- brainscore_vision/models/inception_v1/__init__.py +7 -0
- brainscore_vision/models/inception_v1/model.py +67 -0
- brainscore_vision/models/inception_v1/requirements.txt +1 -0
- brainscore_vision/models/inception_v1/test.py +8 -0
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
- brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
- brainscore_vision/models/inception_v3/test.py +8 -0
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
- brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
- brainscore_vision/models/inception_v4/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
- brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
- brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
- brainscore_vision/models/nasnet_large/__init__.py +7 -0
- brainscore_vision/models/nasnet_large/model.py +60 -0
- brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
- brainscore_vision/models/nasnet_large/test.py +8 -0
- brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
- brainscore_vision/models/nasnet_mobile/model.py +685 -0
- brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
- brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
- brainscore_vision/models/nasnet_mobile/test.py +8 -0
- brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinB/model.py +79 -0
- brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
- brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
- brainscore_vision/models/omnivore_swinB/test.py +9 -0
- brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinS/model.py +79 -0
- brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
- brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
- brainscore_vision/models/omnivore_swinS/test.py +9 -0
- brainscore_vision/models/pnasnet_large/__init__.py +7 -0
- brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
- brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
- brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large/test.py +8 -0
- brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN/model.py +63 -0
- brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
- brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
- brainscore_vision/models/resnet50_SIN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
- brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
- brainscore_vision/models/resnet50_barlow/model.py +53 -0
- brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
- brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
- brainscore_vision/models/resnet50_barlow/test.py +9 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
- brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
- brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
- brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
- brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
- brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
- brainscore_vision/models/resnet50_sup/__init__.py +5 -0
- brainscore_vision/models/resnet50_sup/model.py +55 -0
- brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
- brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
- brainscore_vision/models/resnet50_sup/test.py +8 -0
- brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
- brainscore_vision/models/resnet50_vicreg/model.py +62 -0
- brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
- brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
- brainscore_vision/models/resnet50_vicreg/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
- brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
- brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
- brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_101_v1/model.py +42 -0
- brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
- brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_101_v1/test.py +8 -0
- brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_101_v2/model.py +33 -0
- brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
- brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_101_v2/test.py +8 -0
- brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_152_v1/model.py +42 -0
- brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
- brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_152_v1/test.py +8 -0
- brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
- brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
- brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
- brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2/test.py +8 -0
- brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
- brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
- brainscore_vision/models/resnet_18_test_m/model.py +80 -0
- brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
- brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
- brainscore_vision/models/resnet_18_test_m/test.py +8 -0
- brainscore_vision/models/resnet_50_2/__init__.py +9 -0
- brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
- brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
- brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
- brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
- brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
- brainscore_vision/models/resnet_50_2/model.py +46 -0
- brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
- brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
- brainscore_vision/models/resnet_50_2/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/model.py +2 -2
- brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
- brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_50_v1/model.py +42 -0
- brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
- brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_50_v1/test.py +8 -0
- brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_50_v2/model.py +33 -0
- brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
- brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_50_v2/test.py +8 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
- brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
- brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
- brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
- brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
- brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
- brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
- brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
- brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
- brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
- brainscore_vision/models/timm_models/__init__.py +193 -0
- brainscore_vision/models/timm_models/model.py +90 -0
- brainscore_vision/models/timm_models/model_configs.json +464 -0
- brainscore_vision/models/timm_models/requirements.txt +3 -0
- brainscore_vision/models/timm_models/test.py +0 -0
- brainscore_vision/models/vgg_16/__init__.py +7 -0
- brainscore_vision/models/vgg_16/model.py +52 -0
- brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
- brainscore_vision/models/vgg_16/requirements.txt +1 -0
- brainscore_vision/models/vgg_16/test.py +8 -0
- brainscore_vision/models/vgg_19/__init__.py +7 -0
- brainscore_vision/models/vgg_19/model.py +52 -0
- brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
- brainscore_vision/models/vgg_19/requirements.txt +1 -0
- brainscore_vision/models/vgg_19/test.py +8 -0
- brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
- brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
- brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
- brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
- brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
- brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
- brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
- brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
- brainscore_vision/models/voneresnet_50/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50/model.py +37 -0
- brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
- brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50/test.py +8 -0
- brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_1/model.py +68 -0
- brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/voneresnet_50_1/test.py +7 -0
- brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_3/model.py +66 -0
- brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_3/test.py +7 -0
- brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
- brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
- brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
- brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
- brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
- brainscore_vision/models/xception/__init__.py +7 -0
- brainscore_vision/models/xception/model.py +64 -0
- brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
- brainscore_vision/models/xception/requirements.txt +2 -0
- brainscore_vision/models/xception/test.py +8 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
- brainscore_vision/submission/actions_helpers.py +2 -3
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/METADATA +6 -6
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/RECORD +714 -130
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/WHEEL +1 -1
- docs/source/index.rst +1 -0
- docs/source/modules/submission.rst +1 -1
- docs/source/modules/version_bumping.rst +43 -0
- tests/test_submission/test_actions_helpers.py +2 -6
- brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
- brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
- /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
- /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
- /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
- /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,496 @@
|
|
1
|
+
import torch
|
2
|
+
import os
|
3
|
+
from functools import partial
|
4
|
+
from typing import Any, Callable, List, Optional, Type, Union
|
5
|
+
import torch.nn as nn
|
6
|
+
from torch import Tensor
|
7
|
+
import torch.nn.functional as F
|
8
|
+
from torchvision.transforms._presets import ImageClassification
|
9
|
+
from torchvision.utils import _log_api_usage_once
|
10
|
+
from torchvision.models._api import Weights, WeightsEnum
|
11
|
+
from torchvision.models._meta import _IMAGENET_CATEGORIES
|
12
|
+
from torchvision.models._utils import _ovewrite_named_param, handle_legacy_interface
|
13
|
+
from timm.models.layers import Conv2dSame
|
14
|
+
|
15
|
+
|
16
|
+
|
17
|
+
dir_path = os.path.dirname(os.path.realpath(__file__))
|
18
|
+
|
19
|
+
__all__ = [
|
20
|
+
"resnet50"
|
21
|
+
]
|
22
|
+
|
23
|
+
|
24
|
+
class DSConv2d(nn.Module):
|
25
|
+
def __init__(self, nin, kernels_per_layer, nout):
|
26
|
+
super(DSConv2d, self).__init__()
|
27
|
+
self.depthwise = nn.Conv2d(nin, nin * kernels_per_layer, kernel_size=3, padding=1, groups=nin,
|
28
|
+
padding_mode='reflect')
|
29
|
+
self.pointwise = nn.Conv2d(nin * kernels_per_layer, nout, kernel_size=1)
|
30
|
+
|
31
|
+
def forward(self, x):
|
32
|
+
out = self.depthwise(x)
|
33
|
+
out = self.pointwise(out)
|
34
|
+
return out
|
35
|
+
|
36
|
+
|
37
|
+
class SpatialAttn(nn.Module):
|
38
|
+
def __init__(self, inp_dim, hidden_sizes, num_heads, atttype='channel'):
|
39
|
+
super(SpatialAttn, self).__init__()
|
40
|
+
self.inp_dim = inp_dim
|
41
|
+
self.hidden_sizes = hidden_sizes
|
42
|
+
self.num_heads = num_heads
|
43
|
+
|
44
|
+
self.module_list = nn.ModuleList([])
|
45
|
+
if atttype == 'channel':
|
46
|
+
first_pool = nn.Conv2d(self.inp_dim, hidden_sizes[0], kernel_size=1, bias=True)
|
47
|
+
elif atttype == 'dw':
|
48
|
+
first_pool = DSConv2d(self.inp_dim, 1, hidden_sizes[0])
|
49
|
+
for i in range(len(hidden_sizes) + 1):
|
50
|
+
if i == 0:
|
51
|
+
self.module_list.append(first_pool)
|
52
|
+
self.module_list.append(nn.ReLU(inplace=True))
|
53
|
+
elif i < len(hidden_sizes):
|
54
|
+
self.module_list.append(nn.Conv2d(hidden_sizes[i - 1], hidden_sizes[i], kernel_size=1, bias=True))
|
55
|
+
self.module_list.append(nn.ReLU(inplace=True))
|
56
|
+
else:
|
57
|
+
self.module_list.append(nn.Conv2d(hidden_sizes[i - 1], num_heads, kernel_size=1, bias=False))
|
58
|
+
|
59
|
+
def forward(self, x):
|
60
|
+
inp = x
|
61
|
+
for m in self.module_list:
|
62
|
+
x = m(x)
|
63
|
+
assert self.inp_dim % x.shape[1] == 0, "Output channels must be divisible by input number of channels"
|
64
|
+
x = nn.Softmax(dim=-1)(x.view(x.shape[0], x.shape[1], -1)).view(x.shape)
|
65
|
+
if x.shape[1] != self.inp_dim:
|
66
|
+
x = x.repeat(1, self.inp_dim // x.shape[1], 1, 1)
|
67
|
+
w = x
|
68
|
+
x = w * inp
|
69
|
+
x = x.sum((-1, -2))
|
70
|
+
return x
|
71
|
+
|
72
|
+
|
73
|
+
class BlurPool2d(nn.Module):
|
74
|
+
def __init__(self, kernel_size, stride, blur_kernel_learnable=False):
|
75
|
+
super(BlurPool2d, self).__init__()
|
76
|
+
self.blur_kernel = nn.Parameter(self._get_blur_kernel(kernel_size))
|
77
|
+
self.stride = stride
|
78
|
+
self.padding = (kernel_size - 1) // 2
|
79
|
+
self.kernel_size = kernel_size
|
80
|
+
if not blur_kernel_learnable:
|
81
|
+
self.blur_kernel.requires_grad_(False)
|
82
|
+
|
83
|
+
def forward(self, x):
|
84
|
+
B, C, H, W = x.shape
|
85
|
+
x = x.view(-1, H, W).unsqueeze(1)
|
86
|
+
x = F.conv2d(x, self.blur_kernel, stride=self.stride, padding=self.padding)
|
87
|
+
H, W = x.shape[2:]
|
88
|
+
return x.view(B, C, H, W)
|
89
|
+
|
90
|
+
def _get_blur_kernel(self, kernel_size):
|
91
|
+
blur_kernel_dict = {
|
92
|
+
2: [1, 1],
|
93
|
+
3: [1, 2, 1],
|
94
|
+
4: [1, 3, 3, 1],
|
95
|
+
5: [1, 4, 6, 4, 1],
|
96
|
+
6: [1, 5, 10, 10, 5, 1],
|
97
|
+
7: [1, 6, 15, 20, 15, 6, 1]
|
98
|
+
}
|
99
|
+
if kernel_size in blur_kernel_dict.keys():
|
100
|
+
blur_kernel_1d = torch.FloatTensor(blur_kernel_dict[kernel_size]).view(-1, 1)
|
101
|
+
blur_kernel = torch.matmul(blur_kernel_1d, blur_kernel_1d.t())
|
102
|
+
blur_kernel.div_(blur_kernel.sum())
|
103
|
+
return blur_kernel.unsqueeze(0).unsqueeze(1)
|
104
|
+
else:
|
105
|
+
raise ValueError("invalid blur kernel size: {}".format(kernel_size))
|
106
|
+
|
107
|
+
def __repr__(self):
|
108
|
+
return 'BlurPool2d(kernel_size=({}, {}), stride=({}, {}), padding=({}, {}))'.format(
|
109
|
+
self.kernel_size, self.kernel_size, self.stride,
|
110
|
+
self.stride, self.padding, self.padding
|
111
|
+
)
|
112
|
+
|
113
|
+
|
114
|
+
class MaxBlurPool2d(nn.Module):
|
115
|
+
def __init__(self, kernel_size=2, stride=2, padding=0, blur_kernel_size=3, blur_kernel_learnable=False,
|
116
|
+
blur_position='after'):
|
117
|
+
super(MaxBlurPool2d, self).__init__()
|
118
|
+
self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=padding)
|
119
|
+
self.blurpool = BlurPool2d(kernel_size=blur_kernel_size, stride=stride,
|
120
|
+
blur_kernel_learnable=blur_kernel_learnable)
|
121
|
+
|
122
|
+
if blur_position == 'after':
|
123
|
+
self.layer = [self.maxpool, self.blurpool]
|
124
|
+
elif blur_position == 'before':
|
125
|
+
self.layer = [self.blurpool, self.maxpool]
|
126
|
+
else:
|
127
|
+
raise ValueError('invalid blur postion: {}'.format(blur_position))
|
128
|
+
|
129
|
+
self.main = nn.Sequential(self.maxpool, self.blurpool)
|
130
|
+
|
131
|
+
def forward(self, x):
|
132
|
+
return self.main(x)
|
133
|
+
|
134
|
+
|
135
|
+
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> Conv2dSame:
|
136
|
+
"""3x3 convolution with padding"""
|
137
|
+
return Conv2dSame(
|
138
|
+
in_planes,
|
139
|
+
out_planes,
|
140
|
+
kernel_size=3,
|
141
|
+
stride=stride,
|
142
|
+
padding=dilation,
|
143
|
+
groups=groups,
|
144
|
+
bias=False,
|
145
|
+
dilation=dilation,
|
146
|
+
)
|
147
|
+
|
148
|
+
|
149
|
+
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> Conv2dSame:
|
150
|
+
"""1x1 convolution"""
|
151
|
+
return Conv2dSame(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
152
|
+
|
153
|
+
|
154
|
+
class BasicBlock(nn.Module):
|
155
|
+
expansion: int = 1
|
156
|
+
|
157
|
+
def __init__(
|
158
|
+
self,
|
159
|
+
inplanes: int,
|
160
|
+
planes: int,
|
161
|
+
stride: int = 1,
|
162
|
+
downsample: Optional[nn.Module] = None,
|
163
|
+
groups: int = 1,
|
164
|
+
base_width: int = 64,
|
165
|
+
dilation: int = 1,
|
166
|
+
norm_layer: Optional[Callable[..., nn.Module]] = None,
|
167
|
+
) -> None:
|
168
|
+
super().__init__()
|
169
|
+
if norm_layer is None:
|
170
|
+
norm_layer = nn.BatchNorm2d
|
171
|
+
if groups != 1 or base_width != 64:
|
172
|
+
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
|
173
|
+
if dilation > 1:
|
174
|
+
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
|
175
|
+
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
|
176
|
+
self.conv1 = conv3x3(inplanes, planes, stride)
|
177
|
+
self.bn1 = norm_layer(planes)
|
178
|
+
self.relu = nn.ReLU(inplace=True)
|
179
|
+
self.conv2 = conv3x3(planes, planes)
|
180
|
+
self.bn2 = norm_layer(planes)
|
181
|
+
self.downsample = downsample
|
182
|
+
self.stride = stride
|
183
|
+
|
184
|
+
def forward(self, x: Tensor) -> Tensor:
|
185
|
+
identity = x
|
186
|
+
|
187
|
+
out = self.conv1(x)
|
188
|
+
out = self.bn1(out)
|
189
|
+
out = self.relu(out)
|
190
|
+
|
191
|
+
out = self.conv2(out)
|
192
|
+
out = self.bn2(out)
|
193
|
+
|
194
|
+
if self.downsample is not None:
|
195
|
+
identity = self.downsample(x)
|
196
|
+
|
197
|
+
out += identity
|
198
|
+
out = self.relu(out)
|
199
|
+
|
200
|
+
return out
|
201
|
+
|
202
|
+
|
203
|
+
class Bottleneck(nn.Module):
|
204
|
+
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
|
205
|
+
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
|
206
|
+
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
|
207
|
+
# This variant is also known as ResNet V1.5 and improves accuracy according to
|
208
|
+
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
|
209
|
+
|
210
|
+
expansion: int = 4
|
211
|
+
|
212
|
+
def __init__(
|
213
|
+
self,
|
214
|
+
inplanes: int,
|
215
|
+
planes: int,
|
216
|
+
stride: int = 1,
|
217
|
+
downsample: Optional[nn.Module] = None,
|
218
|
+
groups: int = 1,
|
219
|
+
base_width: int = 64,
|
220
|
+
dilation: int = 1,
|
221
|
+
norm_layer: Optional[Callable[..., nn.Module]] = None,
|
222
|
+
) -> None:
|
223
|
+
super().__init__()
|
224
|
+
if norm_layer is None:
|
225
|
+
norm_layer = nn.BatchNorm2d
|
226
|
+
width = int(planes * (base_width / 64.0)) * groups
|
227
|
+
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
|
228
|
+
self.conv1 = conv1x1(inplanes, width)
|
229
|
+
self.bn1 = norm_layer(width)
|
230
|
+
self.conv2 = conv3x3(width, width, stride, groups, dilation)
|
231
|
+
self.bn2 = norm_layer(width)
|
232
|
+
self.conv3 = conv1x1(width, planes * self.expansion)
|
233
|
+
self.bn3 = norm_layer(planes * self.expansion)
|
234
|
+
self.relu = nn.ReLU(inplace=True)
|
235
|
+
self.downsample = downsample
|
236
|
+
self.stride = stride
|
237
|
+
|
238
|
+
def forward(self, x: Tensor) -> Tensor:
|
239
|
+
identity = x
|
240
|
+
|
241
|
+
out = self.conv1(x)
|
242
|
+
out = self.bn1(out)
|
243
|
+
out = self.relu(out)
|
244
|
+
|
245
|
+
out = self.conv2(out)
|
246
|
+
out = self.bn2(out)
|
247
|
+
out = self.relu(out)
|
248
|
+
|
249
|
+
out = self.conv3(out)
|
250
|
+
out = self.bn3(out)
|
251
|
+
|
252
|
+
if self.downsample is not None:
|
253
|
+
identity = self.downsample(x)
|
254
|
+
|
255
|
+
out += identity
|
256
|
+
out = self.relu(out)
|
257
|
+
|
258
|
+
return out
|
259
|
+
|
260
|
+
|
261
|
+
class ResNet(nn.Module):
|
262
|
+
def __init__(
|
263
|
+
self,
|
264
|
+
block: Type[Union[BasicBlock, Bottleneck]],
|
265
|
+
layers: List[int],
|
266
|
+
num_classes: int = 1000,
|
267
|
+
zero_init_residual: bool = False,
|
268
|
+
groups: int = 1,
|
269
|
+
width_per_group: int = 64,
|
270
|
+
replace_stride_with_dilation: Optional[List[bool]] = None,
|
271
|
+
norm_layer: Optional[Callable[..., nn.Module]] = None,
|
272
|
+
attn_pool: bool = False,
|
273
|
+
) -> None:
|
274
|
+
super().__init__()
|
275
|
+
_log_api_usage_once(self)
|
276
|
+
if norm_layer is None:
|
277
|
+
norm_layer = nn.BatchNorm2d
|
278
|
+
self._norm_layer = norm_layer
|
279
|
+
|
280
|
+
self.inplanes = 64
|
281
|
+
self.dilation = 1
|
282
|
+
if replace_stride_with_dilation is None:
|
283
|
+
# each element in the tuple indicates if we should replace
|
284
|
+
# the 2x2 stride with a dilated convolution instead
|
285
|
+
replace_stride_with_dilation = [False, False, False]
|
286
|
+
if len(replace_stride_with_dilation) != 3:
|
287
|
+
raise ValueError(
|
288
|
+
"replace_stride_with_dilation should be None "
|
289
|
+
f"or a 3-element tuple, got {replace_stride_with_dilation}"
|
290
|
+
)
|
291
|
+
self.groups = groups
|
292
|
+
self.base_width = width_per_group
|
293
|
+
self.conv1 = Conv2dSame(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
|
294
|
+
self.bn1 = norm_layer(self.inplanes)
|
295
|
+
self.relu = nn.ReLU(inplace=True)
|
296
|
+
self.attnpool = attn_pool
|
297
|
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
298
|
+
self.layer1 = self._make_layer(block, 64, layers[0])
|
299
|
+
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
|
300
|
+
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
|
301
|
+
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
|
302
|
+
if attn_pool:
|
303
|
+
self.avgpool = SpatialAttn(2048, [4096, 2048], 1)
|
304
|
+
self.avgpool2 = nn.AdaptiveAvgPool2d((1, 1))
|
305
|
+
else:
|
306
|
+
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
307
|
+
self.fc = nn.Linear(512 * block.expansion, num_classes)
|
308
|
+
|
309
|
+
for m in self.modules():
|
310
|
+
if isinstance(m, Conv2dSame):
|
311
|
+
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
|
312
|
+
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
|
313
|
+
nn.init.constant_(m.weight, 1)
|
314
|
+
nn.init.constant_(m.bias, 0)
|
315
|
+
|
316
|
+
# Zero-initialize the last BN in each residual branch,
|
317
|
+
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
|
318
|
+
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
|
319
|
+
if zero_init_residual:
|
320
|
+
for m in self.modules():
|
321
|
+
if isinstance(m, Bottleneck) and m.bn3.weight is not None:
|
322
|
+
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
|
323
|
+
elif isinstance(m, BasicBlock) and m.bn2.weight is not None:
|
324
|
+
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
|
325
|
+
|
326
|
+
def _make_layer(
|
327
|
+
self,
|
328
|
+
block: Type[Union[BasicBlock, Bottleneck]],
|
329
|
+
planes: int,
|
330
|
+
blocks: int,
|
331
|
+
stride: int = 1,
|
332
|
+
dilate: bool = False,
|
333
|
+
) -> nn.Sequential:
|
334
|
+
norm_layer = self._norm_layer
|
335
|
+
downsample = None
|
336
|
+
previous_dilation = self.dilation
|
337
|
+
if dilate:
|
338
|
+
self.dilation *= stride
|
339
|
+
stride = 1
|
340
|
+
if stride != 1 or self.inplanes != planes * block.expansion:
|
341
|
+
downsample = nn.Sequential(
|
342
|
+
conv1x1(self.inplanes, planes * block.expansion, stride),
|
343
|
+
norm_layer(planes * block.expansion),
|
344
|
+
)
|
345
|
+
|
346
|
+
layers = []
|
347
|
+
layers.append(
|
348
|
+
block(
|
349
|
+
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
|
350
|
+
)
|
351
|
+
)
|
352
|
+
self.inplanes = planes * block.expansion
|
353
|
+
for _ in range(1, blocks):
|
354
|
+
layers.append(
|
355
|
+
block(
|
356
|
+
self.inplanes,
|
357
|
+
planes,
|
358
|
+
groups=self.groups,
|
359
|
+
base_width=self.base_width,
|
360
|
+
dilation=self.dilation,
|
361
|
+
norm_layer=norm_layer,
|
362
|
+
)
|
363
|
+
)
|
364
|
+
|
365
|
+
return nn.Sequential(*layers)
|
366
|
+
|
367
|
+
def _forward_impl(self, x: Tensor) -> Tensor:
|
368
|
+
# See note [TorchScript super()]
|
369
|
+
x = self.conv1(x)
|
370
|
+
x = self.bn1(x)
|
371
|
+
x = self.relu(x)
|
372
|
+
x = self.maxpool(x)
|
373
|
+
|
374
|
+
x = self.layer1(x)
|
375
|
+
x = self.layer2(x)
|
376
|
+
x = self.layer3(x)
|
377
|
+
x = self.layer4(x)
|
378
|
+
if self.attnpool:
|
379
|
+
x2 = self.avgpool2(x)
|
380
|
+
x = self.avgpool(x)
|
381
|
+
x = x.squeeze()
|
382
|
+
x = self.fc(x)
|
383
|
+
|
384
|
+
return x
|
385
|
+
|
386
|
+
def forward(self, x: Tensor) -> Tensor:
|
387
|
+
return self._forward_impl(x)
|
388
|
+
|
389
|
+
|
390
|
+
def _resnet(
|
391
|
+
block: Type[Union[BasicBlock, Bottleneck]],
|
392
|
+
layers: List[int],
|
393
|
+
weights: Optional[WeightsEnum],
|
394
|
+
progress: bool,
|
395
|
+
**kwargs: Any,
|
396
|
+
) -> ResNet:
|
397
|
+
if weights is not None:
|
398
|
+
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
|
399
|
+
|
400
|
+
model = ResNet(block, layers, **kwargs)
|
401
|
+
|
402
|
+
if weights is not None:
|
403
|
+
model.load_state_dict(weights.get_state_dict(progress=progress))
|
404
|
+
|
405
|
+
return model
|
406
|
+
|
407
|
+
|
408
|
+
_COMMON_META = {
|
409
|
+
"min_size": (1, 1),
|
410
|
+
"categories": _IMAGENET_CATEGORIES,
|
411
|
+
}
|
412
|
+
|
413
|
+
|
414
|
+
class ResNet50_Weights(WeightsEnum):
|
415
|
+
IMAGENET1K_V1 = Weights(
|
416
|
+
url="https://download.pytorch.org/models/resnet50-0676ba61.pth",
|
417
|
+
transforms=partial(ImageClassification, crop_size=224),
|
418
|
+
meta={
|
419
|
+
**_COMMON_META,
|
420
|
+
"num_params": 25557032,
|
421
|
+
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
|
422
|
+
"_metrics": {
|
423
|
+
"ImageNet-1K": {
|
424
|
+
"acc@1": 76.130,
|
425
|
+
"acc@5": 92.862,
|
426
|
+
}
|
427
|
+
},
|
428
|
+
"_ops": 4.089,
|
429
|
+
"_weight_size": 97.781,
|
430
|
+
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
|
431
|
+
},
|
432
|
+
)
|
433
|
+
IMAGENET1K_V2 = Weights(
|
434
|
+
url="https://download.pytorch.org/models/resnet50-11ad3fa6.pth",
|
435
|
+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
|
436
|
+
meta={
|
437
|
+
**_COMMON_META,
|
438
|
+
"num_params": 25557032,
|
439
|
+
"recipe": "https://github.com/pytorch/vision/issues/3995#issuecomment-1013906621",
|
440
|
+
"_metrics": {
|
441
|
+
"ImageNet-1K": {
|
442
|
+
"acc@1": 80.858,
|
443
|
+
"acc@5": 95.434,
|
444
|
+
}
|
445
|
+
},
|
446
|
+
"_ops": 4.089,
|
447
|
+
"_weight_size": 97.79,
|
448
|
+
"_docs": """
|
449
|
+
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
|
450
|
+
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
|
451
|
+
""",
|
452
|
+
},
|
453
|
+
)
|
454
|
+
DEFAULT = IMAGENET1K_V2
|
455
|
+
|
456
|
+
|
457
|
+
@handle_legacy_interface(weights=("pretrained", ResNet50_Weights.IMAGENET1K_V1))
|
458
|
+
def resnet50(*, weights: Optional[ResNet50_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
|
459
|
+
"""ResNet-50 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.
|
460
|
+
|
461
|
+
.. note::
|
462
|
+
The bottleneck of TorchVision places the stride for downsampling to the second 3x3
|
463
|
+
convolution while the original paper places it to the first 1x1 convolution.
|
464
|
+
This variant improves the accuracy and is known as `ResNet V1.5
|
465
|
+
<https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
|
466
|
+
|
467
|
+
Args:
|
468
|
+
weights (:class:`~torchvision.models.ResNet50_Weights`, optional): The
|
469
|
+
pretrained weights to use. See
|
470
|
+
:class:`~torchvision.models.ResNet50_Weights` below for
|
471
|
+
more details, and possible values. By default, no pre-trained
|
472
|
+
weights are used.
|
473
|
+
progress (bool, optional): If True, displays a progress bar of the
|
474
|
+
download to stderr. Default is True.
|
475
|
+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
|
476
|
+
base class. Please refer to the `source code
|
477
|
+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
|
478
|
+
for more details about this class.
|
479
|
+
|
480
|
+
.. autoclass:: torchvision.models.ResNet50_Weights
|
481
|
+
:members:
|
482
|
+
"""
|
483
|
+
weights = ResNet50_Weights.verify(weights)
|
484
|
+
|
485
|
+
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
|
486
|
+
|
487
|
+
|
488
|
+
# The dictionary below is internal implementation detail and will be removed in v0.15
|
489
|
+
from torchvision.models._utils import _ModelURLs
|
490
|
+
|
491
|
+
model_urls = _ModelURLs(
|
492
|
+
{
|
493
|
+
"resnet50": ResNet50_Weights.IMAGENET1K_V1.url,
|
494
|
+
|
495
|
+
}
|
496
|
+
)
|
@@ -0,0 +1,45 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
3
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
4
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
5
|
+
import functools
|
6
|
+
import torch
|
7
|
+
from .helpers.helpers import resnet50
|
8
|
+
|
9
|
+
|
10
|
+
"""
|
11
|
+
Template module for a base model submission to brain-score
|
12
|
+
"""
|
13
|
+
model = resnet50(attn_pool=False)
|
14
|
+
weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
15
|
+
relative_path="resnet50-moclr8deg/resnet50_moclr_linear_sdnoattn.pth",
|
16
|
+
version_id="null",
|
17
|
+
sha1="e09f117c8e10c0a38ac914aff5b335327fcd4dbf")
|
18
|
+
r50_vito_sd = torch.load(weights_path, map_location=torch.device('cpu'))
|
19
|
+
model.load_state_dict(r50_vito_sd, strict=False)
|
20
|
+
model.eval()
|
21
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
22
|
+
wrapper = PytorchWrapper(identifier='resnet50-moclr8deg', model=model, preprocessing=preprocessing)
|
23
|
+
|
24
|
+
|
25
|
+
def get_model(name):
|
26
|
+
assert name == 'resnet50-moclr8deg'
|
27
|
+
return wrapper
|
28
|
+
|
29
|
+
|
30
|
+
def get_layers(name):
|
31
|
+
assert name == 'resnet50-moclr8deg'
|
32
|
+
layers = ['conv1', 'layer1.0', 'layer1.1', 'layer1.2', 'layer2.0', 'layer2.1', 'layer2.2', 'layer2.3',
|
33
|
+
'layer3.0', 'layer3.1', 'layer3.2', 'layer3.3', 'layer3.4', 'layer3.5', 'layer4.0', 'layer4.1',
|
34
|
+
'layer4.2', 'avgpool', 'fc']
|
35
|
+
return layers
|
36
|
+
|
37
|
+
|
38
|
+
def get_bibtex(model_identifier):
|
39
|
+
return ''
|
40
|
+
|
41
|
+
|
42
|
+
if __name__ == '__main__':
|
43
|
+
# Use this method to ensure the correctness of the BaseModel implementations.
|
44
|
+
# It executes a mock run of brain-score benchmarks.
|
45
|
+
check_models.check_brain_models(__name__)
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from .model import get_layers,get_model
|
4
|
+
|
5
|
+
|
6
|
+
model_registry['resnet50_robust_l2_eps1'] = lambda: ModelCommitment(identifier='resnet50_robust_l2_eps1',
|
7
|
+
activations_model=get_model('resnet50_robust_l2_eps1'),
|
8
|
+
layers=get_layers('resnet50_robust_l2_eps1'))
|
9
|
+
|
@@ -0,0 +1,72 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
3
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
4
|
+
import torchvision
|
5
|
+
import functools
|
6
|
+
import torch
|
7
|
+
import os
|
8
|
+
from torchvision.models import resnet50
|
9
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
10
|
+
dir_path = os.path.dirname(os.path.realpath(__file__))
|
11
|
+
|
12
|
+
"""
|
13
|
+
Template module for a base model submission to brain-score
|
14
|
+
"""
|
15
|
+
|
16
|
+
|
17
|
+
def get_model(name):
|
18
|
+
"""
|
19
|
+
This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
|
20
|
+
containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
|
21
|
+
keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
|
22
|
+
wrappers.
|
23
|
+
:param name: the name of the model to fetch
|
24
|
+
:return: the model instance
|
25
|
+
"""
|
26
|
+
assert name == 'resnet50_robust_l2_eps1'
|
27
|
+
|
28
|
+
model = resnet50()
|
29
|
+
weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
30
|
+
relative_path="resnet50_robust_l2_eps1/resnet50_l2_eps1.ckpt",
|
31
|
+
version_id="null",
|
32
|
+
sha1="c75d68b7509f9d3829663ca3b627d4265fa9f588")
|
33
|
+
sd = torch.load(weights_path, map_location=torch.device('cpu'))
|
34
|
+
sd_processed = {}
|
35
|
+
for k, v in sd['model'].items():
|
36
|
+
if ('attacker' not in k) and ('model' in k):
|
37
|
+
sd_processed[k[13:]] = v
|
38
|
+
model.load_state_dict(sd_processed)
|
39
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
40
|
+
wrapper = PytorchWrapper(identifier='resnet50_robust_l2_eps1', model=model, preprocessing=preprocessing)
|
41
|
+
wrapper.image_size = 224
|
42
|
+
return wrapper
|
43
|
+
|
44
|
+
|
45
|
+
def get_layers(name):
|
46
|
+
assert name == 'resnet50_robust_l2_eps1'
|
47
|
+
"""
|
48
|
+
This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to
|
49
|
+
layers and uses this list as a set of possible layers. The lists doesn't have to contain all layers, the less the
|
50
|
+
faster the benchmark process works. Additionally the given layers have to produce an activations vector of at least
|
51
|
+
size 25! The layer names are delivered back to the model instance and have to be resolved in there. For a pytorch
|
52
|
+
model, the layer name are for instance dot concatenated per module, e.g. "features.2".
|
53
|
+
:param name: the name of the model, to return the layers for
|
54
|
+
:return: a list of strings containing all layers, that should be considered as brain area.
|
55
|
+
"""
|
56
|
+
return ['maxpool', 'layer1.0', 'layer1.1', 'layer1.2',
|
57
|
+
'layer2.0', 'layer2.1', 'layer2.2', 'layer2.3',
|
58
|
+
'layer3.0', 'layer3.1', 'layer3.2', 'layer3.3',
|
59
|
+
'layer3.4', 'layer3.5', 'layer4.0', 'layer4.1', 'layer4.2','avgpool', 'fc']
|
60
|
+
|
61
|
+
|
62
|
+
def get_bibtex(model_identifier):
|
63
|
+
"""
|
64
|
+
A method returning the bibtex reference of the requested model as a string.
|
65
|
+
"""
|
66
|
+
return ''
|
67
|
+
|
68
|
+
|
69
|
+
if __name__ == '__main__':
|
70
|
+
# Use this method to ensure the correctness of the BaseModel implementations.
|
71
|
+
# It executes a mock run of brain-score benchmarks.
|
72
|
+
check_models.check_base_models(__name__)
|
brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
{"IT": "layer4.0", "V4": "layer2.1", "V1": "layer2.3", "V2": "layer3.0"}
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from .model import get_layers,get_model
|
4
|
+
|
5
|
+
|
6
|
+
model_registry['resnet50_robust_l2_eps3'] = \
|
7
|
+
lambda: ModelCommitment(identifier='resnet50_robust_l2_eps3', activations_model=get_model('resnet50_robust_l2_eps3'), layers=get_layers('resnet50_robust_l2_eps3'))
|
8
|
+
|