brainscore-vision 2.2.4__py3-none-any.whl → 2.2.5__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/data/baker2022/__init__.py +10 -10
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/barbumayo2019/__init__.py +3 -3
- brainscore_vision/data/bashivankar2019/__init__.py +10 -10
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
- brainscore_vision/data/bmd2024/__init__.py +20 -20
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
- brainscore_vision/data/bracci2019/__init__.py +5 -5
- brainscore_vision/data/bracci2019/data_packaging.py +1 -1
- brainscore_vision/data/cadena2017/__init__.py +5 -5
- brainscore_vision/data/cichy2019/__init__.py +5 -5
- brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
- brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
- brainscore_vision/data/david2004/__init__.py +5 -5
- brainscore_vision/data/deng2009/__init__.py +3 -3
- brainscore_vision/data/ferguson2024/__init__.py +112 -112
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
- brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
- brainscore_vision/data/geirhos2021/__init__.py +85 -85
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
- brainscore_vision/data/hebart2023/__init__.py +5 -5
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
- brainscore_vision/data/hendrycks2019/__init__.py +12 -12
- brainscore_vision/data/igustibagus2024/__init__.py +5 -5
- brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
- brainscore_vision/data/islam2021/__init__.py +3 -3
- brainscore_vision/data/kar2018/__init__.py +7 -7
- brainscore_vision/data/kar2019/__init__.py +5 -5
- brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
- brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
- brainscore_vision/data/majajhong2015/__init__.py +23 -23
- brainscore_vision/data/malania2007/__init__.py +77 -77
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
- brainscore_vision/data/maniquet2024/__init__.py +11 -11
- brainscore_vision/data/marques2020/__init__.py +30 -30
- brainscore_vision/data/rajalingham2018/__init__.py +10 -10
- brainscore_vision/data/rajalingham2020/__init__.py +5 -5
- brainscore_vision/data/rust2012/__init__.py +7 -7
- brainscore_vision/data/sanghavi2020/__init__.py +19 -19
- brainscore_vision/data/scialom2024/__init__.py +110 -110
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
- brainscore_vision/data/seibert2019/__init__.py +2 -2
- brainscore_vision/data/zhang2018/__init__.py +5 -5
- brainscore_vision/data_helpers/s3.py +25 -6
- brainscore_vision/model_helpers/activations/pytorch.py +34 -12
- brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
- brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
- brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
- brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
- brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
- brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
- brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
- brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
- brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
- brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
- brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
- brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
- brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
- brainscore_vision/models/ReAlnet/__init__.py +64 -0
- brainscore_vision/models/ReAlnet/model.py +237 -0
- brainscore_vision/models/ReAlnet/requirements.txt +7 -0
- brainscore_vision/models/ReAlnet/test.py +0 -0
- brainscore_vision/models/ReAlnet/weights.json +26 -0
- brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
- brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
- brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
- brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
- brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
- brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
- brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
- brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
- brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
- brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
- brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
- brainscore_vision/models/VOneCORnet_S/model.py +25 -0
- brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
- brainscore_vision/models/VOneCORnet_S/test.py +8 -0
- brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
- brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
- brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
- brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
- brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
- brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
- brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
- brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
- brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
- brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
- brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
- brainscore_vision/models/antialiased-r50/__init__.py +7 -0
- brainscore_vision/models/antialiased-r50/model.py +62 -0
- brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
- brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
- brainscore_vision/models/antialiased-r50/test.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
- brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
- brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
- brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
- brainscore_vision/models/cornet_s/model.py +2 -2
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
- brainscore_vision/models/densenet_121/__init__.py +7 -0
- brainscore_vision/models/densenet_121/model.py +63 -0
- brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
- brainscore_vision/models/densenet_121/requirements.txt +1 -0
- brainscore_vision/models/densenet_121/test.py +8 -0
- brainscore_vision/models/densenet_169/__init__.py +7 -0
- brainscore_vision/models/densenet_169/model.py +63 -0
- brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
- brainscore_vision/models/densenet_169/requirements.txt +1 -0
- brainscore_vision/models/densenet_169/test.py +9 -0
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
- brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
- brainscore_vision/models/densenet_201/test.py +8 -0
- brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b0/model.py +45 -0
- brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
- brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
- brainscore_vision/models/efficientnet_b0/test.py +8 -0
- brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b7/model.py +61 -0
- brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
- brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/efficientnet_b7/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
- brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_1/model.py +62 -0
- brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/evresnet_50_1/test.py +8 -0
- brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4/model.py +67 -0
- brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4/test.py +8 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
- brainscore_vision/models/grcnn/__init__.py +7 -0
- brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
- brainscore_vision/models/grcnn/model.py +54 -0
- brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
- brainscore_vision/models/grcnn/requirements.txt +2 -0
- brainscore_vision/models/grcnn/test.py +9 -0
- brainscore_vision/models/grcnn_109/__init__.py +5 -0
- brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
- brainscore_vision/models/grcnn_109/model.py +53 -0
- brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
- brainscore_vision/models/grcnn_109/requirements.txt +2 -0
- brainscore_vision/models/grcnn_109/test.py +9 -0
- brainscore_vision/models/hmax/model.py +2 -2
- brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
- brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
- brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
- brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
- brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
- brainscore_vision/models/inception_v1/__init__.py +7 -0
- brainscore_vision/models/inception_v1/model.py +67 -0
- brainscore_vision/models/inception_v1/requirements.txt +1 -0
- brainscore_vision/models/inception_v1/test.py +8 -0
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
- brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
- brainscore_vision/models/inception_v3/test.py +8 -0
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
- brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
- brainscore_vision/models/inception_v4/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
- brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
- brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
- brainscore_vision/models/nasnet_large/__init__.py +7 -0
- brainscore_vision/models/nasnet_large/model.py +60 -0
- brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
- brainscore_vision/models/nasnet_large/test.py +8 -0
- brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
- brainscore_vision/models/nasnet_mobile/model.py +685 -0
- brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
- brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
- brainscore_vision/models/nasnet_mobile/test.py +8 -0
- brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinB/model.py +79 -0
- brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
- brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
- brainscore_vision/models/omnivore_swinB/test.py +9 -0
- brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinS/model.py +79 -0
- brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
- brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
- brainscore_vision/models/omnivore_swinS/test.py +9 -0
- brainscore_vision/models/pnasnet_large/__init__.py +7 -0
- brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
- brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
- brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large/test.py +8 -0
- brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN/model.py +63 -0
- brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
- brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
- brainscore_vision/models/resnet50_SIN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
- brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
- brainscore_vision/models/resnet50_barlow/model.py +53 -0
- brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
- brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
- brainscore_vision/models/resnet50_barlow/test.py +9 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
- brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
- brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
- brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
- brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
- brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
- brainscore_vision/models/resnet50_sup/__init__.py +5 -0
- brainscore_vision/models/resnet50_sup/model.py +55 -0
- brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
- brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
- brainscore_vision/models/resnet50_sup/test.py +8 -0
- brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
- brainscore_vision/models/resnet50_vicreg/model.py +62 -0
- brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
- brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
- brainscore_vision/models/resnet50_vicreg/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
- brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
- brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
- brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_101_v1/model.py +42 -0
- brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
- brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_101_v1/test.py +8 -0
- brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_101_v2/model.py +33 -0
- brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
- brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_101_v2/test.py +8 -0
- brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_152_v1/model.py +42 -0
- brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
- brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_152_v1/test.py +8 -0
- brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
- brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
- brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
- brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2/test.py +8 -0
- brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
- brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
- brainscore_vision/models/resnet_18_test_m/model.py +80 -0
- brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
- brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
- brainscore_vision/models/resnet_18_test_m/test.py +8 -0
- brainscore_vision/models/resnet_50_2/__init__.py +9 -0
- brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
- brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
- brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
- brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
- brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
- brainscore_vision/models/resnet_50_2/model.py +46 -0
- brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
- brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
- brainscore_vision/models/resnet_50_2/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/model.py +2 -2
- brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
- brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_50_v1/model.py +42 -0
- brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
- brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_50_v1/test.py +8 -0
- brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_50_v2/model.py +33 -0
- brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
- brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_50_v2/test.py +8 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
- brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
- brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
- brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
- brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
- brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
- brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
- brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
- brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
- brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
- brainscore_vision/models/timm_models/__init__.py +193 -0
- brainscore_vision/models/timm_models/model.py +90 -0
- brainscore_vision/models/timm_models/model_configs.json +464 -0
- brainscore_vision/models/timm_models/requirements.txt +3 -0
- brainscore_vision/models/timm_models/test.py +0 -0
- brainscore_vision/models/vgg_16/__init__.py +7 -0
- brainscore_vision/models/vgg_16/model.py +52 -0
- brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
- brainscore_vision/models/vgg_16/requirements.txt +1 -0
- brainscore_vision/models/vgg_16/test.py +8 -0
- brainscore_vision/models/vgg_19/__init__.py +7 -0
- brainscore_vision/models/vgg_19/model.py +52 -0
- brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
- brainscore_vision/models/vgg_19/requirements.txt +1 -0
- brainscore_vision/models/vgg_19/test.py +8 -0
- brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
- brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
- brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
- brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
- brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
- brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
- brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
- brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
- brainscore_vision/models/voneresnet_50/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50/model.py +37 -0
- brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
- brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50/test.py +8 -0
- brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_1/model.py +68 -0
- brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/voneresnet_50_1/test.py +7 -0
- brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_3/model.py +66 -0
- brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_3/test.py +7 -0
- brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
- brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
- brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
- brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
- brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
- brainscore_vision/models/xception/__init__.py +7 -0
- brainscore_vision/models/xception/model.py +64 -0
- brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
- brainscore_vision/models/xception/requirements.txt +2 -0
- brainscore_vision/models/xception/test.py +8 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
- brainscore_vision/submission/actions_helpers.py +2 -3
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/METADATA +6 -6
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/RECORD +714 -130
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/WHEEL +1 -1
- docs/source/index.rst +1 -0
- docs/source/modules/submission.rst +1 -1
- docs/source/modules/version_bumping.rst +43 -0
- tests/test_submission/test_actions_helpers.py +2 -6
- brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
- brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
- /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
- /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
- /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
- /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,101 @@
|
|
1
|
+
import functools
|
2
|
+
import dill
|
3
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
4
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
5
|
+
from robustness.model_utils import DummyModel
|
6
|
+
from robustness.attacker import AttackerModel
|
7
|
+
from robustness.datasets import DATASETS
|
8
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
9
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
10
|
+
import torch as ch
|
11
|
+
import os
|
12
|
+
|
13
|
+
def make_and_restore_model(*_, arch, dataset, resume_path=None,
|
14
|
+
parallel=False, pytorch_pretrained=False, add_custom_forward=False):
|
15
|
+
"""
|
16
|
+
Makes a model and (optionally) restores it from a checkpoint.
|
17
|
+
|
18
|
+
Args:
|
19
|
+
arch (str|nn.Module): Model architecture identifier or otherwise a
|
20
|
+
torch.nn.Module instance with the classifier
|
21
|
+
dataset (Dataset class [see datasets.py])
|
22
|
+
resume_path (str): optional path to checkpoint saved with the
|
23
|
+
robustness library (ignored if ``arch`` is not a string)
|
24
|
+
not a string
|
25
|
+
parallel (bool): if True, wrap the model in a DataParallel
|
26
|
+
(defaults to False)
|
27
|
+
pytorch_pretrained (bool): if True, try to load a standard-trained
|
28
|
+
checkpoint from the torchvision library (throw error if failed)
|
29
|
+
add_custom_forward (bool): ignored unless arch is an instance of
|
30
|
+
nn.Module (and not a string). Normally, architectures should have a
|
31
|
+
forward() function which accepts arguments ``with_latent``,
|
32
|
+
``fake_relu``, and ``no_relu`` to allow for adversarial manipulation
|
33
|
+
(see `here`<https://robustness.readthedocs.io/en/latest/example_usage/training_lib_part_2.html#training-with-custom-architectures>
|
34
|
+
for more info). If this argument is True, then these options will
|
35
|
+
not be passed to forward(). (Useful if you just want to train a
|
36
|
+
model and don't care about these arguments, and are passing in an
|
37
|
+
arch that you don't want to edit forward() for, e.g. a pretrained model)
|
38
|
+
Returns:
|
39
|
+
A tuple consisting of the model (possibly loaded with checkpoint), and the checkpoint itself
|
40
|
+
"""
|
41
|
+
if (not isinstance(arch, str)) and add_custom_forward:
|
42
|
+
arch = DummyModel(arch)
|
43
|
+
|
44
|
+
classifier_model = dataset.get_model(arch, pytorch_pretrained) if \
|
45
|
+
isinstance(arch, str) else arch
|
46
|
+
|
47
|
+
model = AttackerModel(classifier_model, dataset)
|
48
|
+
|
49
|
+
# optionally resume from a checkpoint
|
50
|
+
checkpoint = None
|
51
|
+
if resume_path and os.path.isfile(resume_path):
|
52
|
+
print("=> loading checkpoint '{}'".format(resume_path))
|
53
|
+
checkpoint = ch.load(resume_path, pickle_module=dill,map_location="cpu")
|
54
|
+
|
55
|
+
# Makes us able to load models saved with legacy versions
|
56
|
+
state_dict_path = 'model'
|
57
|
+
if not ('model' in checkpoint):
|
58
|
+
state_dict_path = 'state_dict'
|
59
|
+
|
60
|
+
sd = checkpoint[state_dict_path]
|
61
|
+
sd = {k[len('module.'):]:v for k,v in sd.items()}
|
62
|
+
model.load_state_dict(sd)
|
63
|
+
print("=> loaded checkpoint '{}' (epoch {})".format(resume_path, checkpoint['epoch']))
|
64
|
+
elif resume_path:
|
65
|
+
error_msg = "=> no checkpoint found at '{}'".format(resume_path)
|
66
|
+
raise ValueError(error_msg)
|
67
|
+
|
68
|
+
if parallel:
|
69
|
+
model = ch.nn.DataParallel(model)
|
70
|
+
|
71
|
+
return model, checkpoint
|
72
|
+
|
73
|
+
def get_model(name):
|
74
|
+
assert name == "imagenet_l2_3_0"
|
75
|
+
data_path = "" #os.path.expandvars(args.data)
|
76
|
+
dataset = DATASETS['imagenet'](data_path)
|
77
|
+
weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
78
|
+
relative_path="imagenet_l2_3_0/imagenet_l2_3_0.pt",
|
79
|
+
version_id="null",
|
80
|
+
sha1="cc6e4441abc8ad6d2f4da5db84836e544bfb53fd")
|
81
|
+
model, _ = make_and_restore_model(arch='resnet50', dataset=dataset,
|
82
|
+
resume_path=weights_path)
|
83
|
+
model = model.model.eval()
|
84
|
+
# print(model)
|
85
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
86
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
87
|
+
wrapper.image_size = 224
|
88
|
+
return wrapper
|
89
|
+
|
90
|
+
|
91
|
+
def get_layers(name):
|
92
|
+
assert name == "imagenet_l2_3_0"
|
93
|
+
return ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'avgpool', 'fc']
|
94
|
+
|
95
|
+
|
96
|
+
def get_bibtex(name):
|
97
|
+
return """ """
|
98
|
+
|
99
|
+
|
100
|
+
if __name__ == '__main__':
|
101
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1 @@
|
|
1
|
+
{"V4": "layer2", "IT": "layer4", "V1": "layer2", "V2": "layer3"}
|
@@ -0,0 +1,7 @@
|
|
1
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from .model import get_layers,get_model
|
4
|
+
|
5
|
+
|
6
|
+
model_registry['inception_v1'] = \
|
7
|
+
lambda: ModelCommitment(identifier='inception_v1', activations_model=get_model('inception_v1'), layers=get_layers('inception_v1'))
|
@@ -0,0 +1,67 @@
|
|
1
|
+
import functools
|
2
|
+
import torch
|
3
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
4
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
5
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
6
|
+
|
7
|
+
|
8
|
+
def get_model(name):
|
9
|
+
"""
|
10
|
+
This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
|
11
|
+
containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
|
12
|
+
keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
|
13
|
+
wrappers.
|
14
|
+
:param name: the name of the model to fetch
|
15
|
+
:return: the model instance
|
16
|
+
"""
|
17
|
+
assert name == 'inception_v1'
|
18
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224, preprocess_type='inception')
|
19
|
+
model = torch.hub.load('pytorch/vision:v0.10.0', 'googlenet', pretrained=True)
|
20
|
+
wrapper = PytorchWrapper(identifier='inception_v1', model=model, preprocessing=preprocessing)
|
21
|
+
wrapper.image_size = 224
|
22
|
+
return wrapper
|
23
|
+
|
24
|
+
|
25
|
+
def get_layers(name):
|
26
|
+
assert name == 'inception_v1'
|
27
|
+
layer_names = (['maxpool2'] +
|
28
|
+
[f'inception3{i}' for i in ['a', 'b']] +
|
29
|
+
[f'inception4{i}' for i in ['a', 'b', 'c', 'd', 'e']] +
|
30
|
+
[f'inception5{i}' for i in ['a', 'b']] +
|
31
|
+
['avgpool'])
|
32
|
+
return layer_names
|
33
|
+
|
34
|
+
|
35
|
+
def get_bibtex(name):
|
36
|
+
"""
|
37
|
+
A method returning the bibtex reference of the requested model as a string.
|
38
|
+
"""
|
39
|
+
return '''
|
40
|
+
@article{DBLP:journals/corr/SzegedyLJSRAEVR14,
|
41
|
+
author = {Christian Szegedy and
|
42
|
+
Wei Liu and
|
43
|
+
Yangqing Jia and
|
44
|
+
Pierre Sermanet and
|
45
|
+
Scott E. Reed and
|
46
|
+
Dragomir Anguelov and
|
47
|
+
Dumitru Erhan and
|
48
|
+
Vincent Vanhoucke and
|
49
|
+
Andrew Rabinovich},
|
50
|
+
title = {Going Deeper with Convolutions},
|
51
|
+
journal = {CoRR},
|
52
|
+
volume = {abs/1409.4842},
|
53
|
+
year = {2014},
|
54
|
+
url = {http://arxiv.org/abs/1409.4842},
|
55
|
+
eprinttype = {arXiv},
|
56
|
+
eprint = {1409.4842},
|
57
|
+
timestamp = {Mon, 13 Aug 2018 16:48:52 +0200},
|
58
|
+
biburl = {https://dblp.org/rec/journals/corr/SzegedyLJSRAEVR14.bib},
|
59
|
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
60
|
+
}
|
61
|
+
'''
|
62
|
+
|
63
|
+
|
64
|
+
if __name__ == '__main__':
|
65
|
+
# Use this method to ensure the correctness of the BaseModel implementations.
|
66
|
+
# It executes a mock run of brain-score benchmarks.
|
67
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1 @@
|
|
1
|
+
torch
|
@@ -2,6 +2,6 @@ from brainscore_vision import model_registry
|
|
2
2
|
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
3
|
from .model import get_model, get_layers
|
4
4
|
|
5
|
-
model_registry['
|
6
|
-
activations_model=get_model('
|
7
|
-
layers=get_layers('
|
5
|
+
model_registry['inception_v3'] = lambda: ModelCommitment(identifier='inception_v3',
|
6
|
+
activations_model=get_model('inception_v3'),
|
7
|
+
layers=get_layers('inception_v3'))
|
@@ -21,9 +21,9 @@ implementation.
|
|
21
21
|
MODEL = timm.create_model('inception_v3', pretrained=True)
|
22
22
|
|
23
23
|
def get_model(name):
|
24
|
-
assert name == '
|
25
|
-
preprocessing = functools.partial(load_preprocess_images, image_size=299)
|
26
|
-
wrapper = PytorchWrapper(identifier='
|
24
|
+
assert name == 'inception_v3'
|
25
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=299, preprocess_type='inception')
|
26
|
+
wrapper = PytorchWrapper(identifier='inception_v3', model=MODEL,
|
27
27
|
preprocessing=preprocessing,
|
28
28
|
batch_size=4) # doesn't fit into 12 GB GPU memory otherwise
|
29
29
|
wrapper.image_size = 299
|
@@ -31,13 +31,13 @@ def get_model(name):
|
|
31
31
|
|
32
32
|
|
33
33
|
def get_layers(name):
|
34
|
-
assert name == '
|
35
|
-
layer_names = []
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
return layer_names
|
34
|
+
assert name == 'inception_v3'
|
35
|
+
layer_names = (['Conv2d_1a_3x3', 'Pool1', 'Pool2'] +
|
36
|
+
[f'Mixed_5{i}' for i in ['b', 'c', 'd']] +
|
37
|
+
[f'Mixed_6{i}' for i in ['a', 'b', 'c', 'd', 'e']] +
|
38
|
+
[f'Mixed_7{i}' for i in ['a', 'b', 'c']] +
|
39
|
+
['global_pool'])
|
40
|
+
return layer_names
|
41
41
|
|
42
42
|
|
43
43
|
def get_bibtex(model_identifier):
|
@@ -2,6 +2,6 @@ from brainscore_vision import model_registry
|
|
2
2
|
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
3
|
from .model import get_model, get_layers
|
4
4
|
|
5
|
-
model_registry['
|
6
|
-
activations_model=get_model('
|
7
|
-
layers=get_layers('
|
5
|
+
model_registry['inception_v4'] = lambda: ModelCommitment(identifier='inception_v4',
|
6
|
+
activations_model=get_model('inception_v4'),
|
7
|
+
layers=get_layers('inception_v4'))
|
@@ -12,6 +12,7 @@ This is a Pytorch implementation of inception_v4.
|
|
12
12
|
|
13
13
|
Previously on Brain-Score, this model existed as a Tensorflow model, and was converted via:
|
14
14
|
https://huggingface.co/docs/timm/en/models/inception-v4
|
15
|
+
https://huggingface.co/timm/inception_v4.tf_in1k
|
15
16
|
|
16
17
|
Disclaimer: This (pytorch) implementation's Brain-Score scores might not align identically with Tensorflow
|
17
18
|
implementation.
|
@@ -19,12 +20,12 @@ implementation.
|
|
19
20
|
'''
|
20
21
|
|
21
22
|
|
22
|
-
MODEL = timm.create_model('inception_v4', pretrained=True)
|
23
|
+
MODEL = timm.create_model('inception_v4.tf_in1k', pretrained=True)
|
23
24
|
|
24
25
|
def get_model(name):
|
25
|
-
assert name == '
|
26
|
-
preprocessing = functools.partial(load_preprocess_images, image_size=299)
|
27
|
-
wrapper = PytorchWrapper(identifier='
|
26
|
+
assert name == 'inception_v4'
|
27
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=299, preprocess_type='inception')
|
28
|
+
wrapper = PytorchWrapper(identifier='inception_v4', model=MODEL,
|
28
29
|
preprocessing=preprocessing,
|
29
30
|
batch_size=4) # doesn't fit into 12 GB GPU memory otherwise
|
30
31
|
wrapper.image_size = 299
|
@@ -32,17 +33,9 @@ def get_model(name):
|
|
32
33
|
|
33
34
|
|
34
35
|
def get_layers(name):
|
35
|
-
assert name == '
|
36
|
-
|
37
|
-
|
38
|
-
layers += ['Mixed_3a']
|
39
|
-
layers += ['Mixed_4a']
|
40
|
-
layers += [f'Mixed_5{i}' for i in ['a', 'b', 'c', 'd', 'e']]
|
41
|
-
layers += [f'Mixed_6{i}' for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']]
|
42
|
-
layers += [f'Mixed_7{i}' for i in ['a', 'b', 'c', 'd']]
|
43
|
-
layers += ['global_pool']
|
44
|
-
|
45
|
-
return layers
|
36
|
+
assert name == 'inception_v4'
|
37
|
+
layer_names = ['features.0.conv'] + [f'features.{i}' for i in range(1, 22)] + ['global_pool']
|
38
|
+
return layer_names
|
46
39
|
|
47
40
|
|
48
41
|
def get_bibtex(model_identifier):
|
@@ -0,0 +1,7 @@
|
|
1
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from .model import get_layers,get_model
|
4
|
+
|
5
|
+
|
6
|
+
model_registry['mobilenet_v2_0_5_192'] = \
|
7
|
+
lambda: ModelCommitment(identifier='mobilenet_v2_0_5_192', activations_model=get_model('mobilenet_v2_0_5_192'), layers=get_layers('mobilenet_v2_0_5_192'))
|
@@ -0,0 +1,83 @@
|
|
1
|
+
import functools
|
2
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
3
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
4
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
5
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
6
|
+
import torch
|
7
|
+
import imp
|
8
|
+
|
9
|
+
model_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
10
|
+
relative_path="mobilenet_v2_0.5_192/mobilenet_v2_0.py",
|
11
|
+
version_id="null",
|
12
|
+
sha1="d5c7af8768f9f2475367ac1e48e204cc5cf004a0")
|
13
|
+
model_weight_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
14
|
+
relative_path="mobilenet_v2_0.5_192/mobilenet_v2_0.5_192_frozen.pth",
|
15
|
+
version_id="null",
|
16
|
+
sha1="e5aa083caa4833fccd48af0c578a45064824dd7f")
|
17
|
+
MainModel = imp.load_source('MainModel',model_path.as_posix())
|
18
|
+
|
19
|
+
|
20
|
+
# This custom wrapper handles background class removal, and is used in related mobilenets
|
21
|
+
class MobilenetPytorchWrapper(PytorchWrapper):
|
22
|
+
def __call__(self, *args, **kwargs):
|
23
|
+
result = super().__call__(*args, **kwargs) # retrieve original output
|
24
|
+
if 'logits' in kwargs.get('layers', []):
|
25
|
+
result = result.isel(neuroid=slice(1, None)) # remove background class in last layer
|
26
|
+
return result
|
27
|
+
|
28
|
+
|
29
|
+
def get_model(name):
|
30
|
+
"""
|
31
|
+
This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
|
32
|
+
containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
|
33
|
+
keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
|
34
|
+
wrappers.
|
35
|
+
:param name: the name of the model to fetch
|
36
|
+
:return: the model instance
|
37
|
+
"""
|
38
|
+
assert name == 'mobilenet_v2_0_5_192'
|
39
|
+
model = torch.load(model_weight_path.as_posix(), weights_only=False)
|
40
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=192, preprocess_type='inception')
|
41
|
+
wrapper = MobilenetPytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
42
|
+
wrapper.image_size = 192
|
43
|
+
return wrapper
|
44
|
+
|
45
|
+
|
46
|
+
def get_layers(name):
|
47
|
+
assert name == 'mobilenet_v2_0_5_192'
|
48
|
+
layer_names = (['MobilenetV2_Conv_Conv2D'] +
|
49
|
+
[f'MobilenetV2_expanded_conv_{i}_expand_Conv2D' for i in range(1, 17)] +
|
50
|
+
['MobilenetV2_Conv_1_Conv2D'])
|
51
|
+
return layer_names
|
52
|
+
|
53
|
+
|
54
|
+
def get_bibtex(name):
|
55
|
+
"""
|
56
|
+
A method returning the bibtex reference of the requested model as a string.
|
57
|
+
"""
|
58
|
+
return '''
|
59
|
+
@article{DBLP:journals/corr/abs-1801-04381,
|
60
|
+
author = {Mark Sandler and
|
61
|
+
Andrew G. Howard and
|
62
|
+
Menglong Zhu and
|
63
|
+
Andrey Zhmoginov and
|
64
|
+
Liang{-}Chieh Chen},
|
65
|
+
title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification,
|
66
|
+
Detection and Segmentation},
|
67
|
+
journal = {CoRR},
|
68
|
+
volume = {abs/1801.04381},
|
69
|
+
year = {2018},
|
70
|
+
url = {http://arxiv.org/abs/1801.04381},
|
71
|
+
eprinttype = {arXiv},
|
72
|
+
eprint = {1801.04381},
|
73
|
+
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
|
74
|
+
biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib},
|
75
|
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
76
|
+
}
|
77
|
+
'''
|
78
|
+
|
79
|
+
|
80
|
+
if __name__ == '__main__':
|
81
|
+
# Use this method to ensure the correctness of the BaseModel implementations.
|
82
|
+
# It executes a mock run of brain-score benchmarks.
|
83
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,7 @@
|
|
1
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from .model import get_layers,get_model
|
4
|
+
|
5
|
+
|
6
|
+
model_registry['mobilenet_v2_0_5_224'] = \
|
7
|
+
lambda: ModelCommitment(identifier='mobilenet_v2_0_5_224', activations_model=get_model('mobilenet_v2_0_5_224'), layers=get_layers('mobilenet_v2_0_5_224'))
|
@@ -0,0 +1,73 @@
|
|
1
|
+
import functools
|
2
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
3
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
4
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
5
|
+
from brainscore_vision.models.mobilenet_v2_0_5_192.model import MobilenetPytorchWrapper
|
6
|
+
import torch
|
7
|
+
import imp
|
8
|
+
|
9
|
+
model_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
10
|
+
relative_path="mobilenet_v2_0.5_224/mobilenet_v2_0.py",
|
11
|
+
version_id="null",
|
12
|
+
sha1="51ead4ba3605cdc9b5adc117e14def601288dd94")
|
13
|
+
model_weight_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
14
|
+
relative_path="mobilenet_v2_0.5_224/mobilenet_v2_0.5_224_frozen.pth",
|
15
|
+
version_id="null",
|
16
|
+
sha1="649501eadcf01f871bdb2265aa7dcac80594160a")
|
17
|
+
MainModel = imp.load_source('MainModel',model_path.as_posix())
|
18
|
+
|
19
|
+
def get_model(name):
|
20
|
+
"""
|
21
|
+
This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
|
22
|
+
containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
|
23
|
+
keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
|
24
|
+
wrappers.
|
25
|
+
:param name: the name of the model to fetch
|
26
|
+
:return: the model instance
|
27
|
+
"""
|
28
|
+
assert name == 'mobilenet_v2_0_5_224'
|
29
|
+
model = torch.load(model_weight_path.as_posix(), weights_only=False)
|
30
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224, preprocess_type='inception')
|
31
|
+
wrapper = MobilenetPytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
32
|
+
wrapper.image_size = 224
|
33
|
+
return wrapper
|
34
|
+
|
35
|
+
|
36
|
+
def get_layers(name):
|
37
|
+
assert name == 'mobilenet_v2_0_5_224'
|
38
|
+
layer_names = (['MobilenetV2_Conv_Conv2D'] +
|
39
|
+
[f'MobilenetV2_expanded_conv_{i}_expand_Conv2D' for i in range(1, 17)] +
|
40
|
+
['MobilenetV2_Conv_1_Conv2D'])
|
41
|
+
return layer_names
|
42
|
+
|
43
|
+
|
44
|
+
def get_bibtex(name):
|
45
|
+
"""
|
46
|
+
A method returning the bibtex reference of the requested model as a string.
|
47
|
+
"""
|
48
|
+
return '''
|
49
|
+
@article{DBLP:journals/corr/abs-1801-04381,
|
50
|
+
author = {Mark Sandler and
|
51
|
+
Andrew G. Howard and
|
52
|
+
Menglong Zhu and
|
53
|
+
Andrey Zhmoginov and
|
54
|
+
Liang{-}Chieh Chen},
|
55
|
+
title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification,
|
56
|
+
Detection and Segmentation},
|
57
|
+
journal = {CoRR},
|
58
|
+
volume = {abs/1801.04381},
|
59
|
+
year = {2018},
|
60
|
+
url = {http://arxiv.org/abs/1801.04381},
|
61
|
+
eprinttype = {arXiv},
|
62
|
+
eprint = {1801.04381},
|
63
|
+
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
|
64
|
+
biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib},
|
65
|
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
66
|
+
}
|
67
|
+
'''
|
68
|
+
|
69
|
+
|
70
|
+
if __name__ == '__main__':
|
71
|
+
# Use this method to ensure the correctness of the BaseModel implementations.
|
72
|
+
# It executes a mock run of brain-score benchmarks.
|
73
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,7 @@
|
|
1
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from .model import get_layers,get_model
|
4
|
+
|
5
|
+
|
6
|
+
model_registry['mobilenet_v2_0_75_160'] = \
|
7
|
+
lambda: ModelCommitment(identifier='mobilenet_v2_0_75_160', activations_model=get_model('mobilenet_v2_0_75_160'), layers=get_layers('mobilenet_v2_0_75_160'))
|
@@ -0,0 +1,74 @@
|
|
1
|
+
import functools
|
2
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
3
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
4
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
5
|
+
from brainscore_vision.models.mobilenet_v2_0_5_192.model import MobilenetPytorchWrapper
|
6
|
+
import torch
|
7
|
+
import imp
|
8
|
+
|
9
|
+
model_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
10
|
+
relative_path="mobilenet_v2_0.75_160/mobilenet_v2_0.py",
|
11
|
+
version_id="null",
|
12
|
+
sha1="11bd61b5e71962073072c0dadb252a262ae68579")
|
13
|
+
model_weight_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
14
|
+
relative_path="mobilenet_v2_0.75_160/mobilenet_v2_0.75_160_frozen.pth",
|
15
|
+
version_id="null",
|
16
|
+
sha1="9fc6f5e9864d524760c6e1dc8aa5702415457df4")
|
17
|
+
MainModel = imp.load_source('MainModel',model_path.as_posix())
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
"""
|
22
|
+
This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
|
23
|
+
containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
|
24
|
+
keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
|
25
|
+
wrappers.
|
26
|
+
:param name: the name of the model to fetch
|
27
|
+
:return: the model instance
|
28
|
+
"""
|
29
|
+
assert name == 'mobilenet_v2_0_75_160'
|
30
|
+
model = torch.load(model_weight_path.as_posix(), weights_only=False)
|
31
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=160, preprocess_type='inception')
|
32
|
+
wrapper = MobilenetPytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
33
|
+
wrapper.image_size = 160
|
34
|
+
return wrapper
|
35
|
+
|
36
|
+
|
37
|
+
def get_layers(name):
|
38
|
+
assert name == 'mobilenet_v2_0_75_160'
|
39
|
+
layer_names = (['MobilenetV2_Conv_Conv2D'] +
|
40
|
+
[f'MobilenetV2_expanded_conv_{i}_expand_Conv2D' for i in range(1, 17)] +
|
41
|
+
['MobilenetV2_Conv_1_Conv2D'])
|
42
|
+
return layer_names
|
43
|
+
|
44
|
+
|
45
|
+
def get_bibtex(name):
|
46
|
+
"""
|
47
|
+
A method returning the bibtex reference of the requested model as a string.
|
48
|
+
"""
|
49
|
+
return '''
|
50
|
+
@article{DBLP:journals/corr/abs-1801-04381,
|
51
|
+
author = {Mark Sandler and
|
52
|
+
Andrew G. Howard and
|
53
|
+
Menglong Zhu and
|
54
|
+
Andrey Zhmoginov and
|
55
|
+
Liang{-}Chieh Chen},
|
56
|
+
title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification,
|
57
|
+
Detection and Segmentation},
|
58
|
+
journal = {CoRR},
|
59
|
+
volume = {abs/1801.04381},
|
60
|
+
year = {2018},
|
61
|
+
url = {http://arxiv.org/abs/1801.04381},
|
62
|
+
eprinttype = {arXiv},
|
63
|
+
eprint = {1801.04381},
|
64
|
+
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
|
65
|
+
biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib},
|
66
|
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
67
|
+
}
|
68
|
+
'''
|
69
|
+
|
70
|
+
|
71
|
+
if __name__ == '__main__':
|
72
|
+
# Use this method to ensure the correctness of the BaseModel implementations.
|
73
|
+
# It executes a mock run of brain-score benchmarks.
|
74
|
+
check_models.check_base_models(__name__)
|