brainscore-vision 2.2.4__py3-none-any.whl → 2.2.6__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/data/baker2022/__init__.py +10 -10
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/barbumayo2019/__init__.py +3 -3
- brainscore_vision/data/bashivankar2019/__init__.py +10 -10
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
- brainscore_vision/data/bmd2024/__init__.py +20 -20
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
- brainscore_vision/data/bracci2019/__init__.py +5 -5
- brainscore_vision/data/bracci2019/data_packaging.py +1 -1
- brainscore_vision/data/cadena2017/__init__.py +5 -5
- brainscore_vision/data/cichy2019/__init__.py +5 -5
- brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
- brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
- brainscore_vision/data/david2004/__init__.py +5 -5
- brainscore_vision/data/deng2009/__init__.py +3 -3
- brainscore_vision/data/ferguson2024/__init__.py +112 -112
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
- brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
- brainscore_vision/data/geirhos2021/__init__.py +85 -85
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
- brainscore_vision/data/hebart2023/__init__.py +5 -5
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
- brainscore_vision/data/hendrycks2019/__init__.py +12 -12
- brainscore_vision/data/igustibagus2024/__init__.py +5 -5
- brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
- brainscore_vision/data/islam2021/__init__.py +3 -3
- brainscore_vision/data/kar2018/__init__.py +7 -7
- brainscore_vision/data/kar2019/__init__.py +5 -5
- brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
- brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
- brainscore_vision/data/majajhong2015/__init__.py +23 -23
- brainscore_vision/data/malania2007/__init__.py +77 -77
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
- brainscore_vision/data/maniquet2024/__init__.py +11 -11
- brainscore_vision/data/marques2020/__init__.py +30 -30
- brainscore_vision/data/rajalingham2018/__init__.py +10 -10
- brainscore_vision/data/rajalingham2020/__init__.py +5 -5
- brainscore_vision/data/rust2012/__init__.py +7 -7
- brainscore_vision/data/sanghavi2020/__init__.py +19 -19
- brainscore_vision/data/scialom2024/__init__.py +110 -110
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
- brainscore_vision/data/seibert2019/__init__.py +2 -2
- brainscore_vision/data/zhang2018/__init__.py +5 -5
- brainscore_vision/data_helpers/s3.py +25 -6
- brainscore_vision/model_helpers/activations/pytorch.py +34 -12
- brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
- brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
- brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
- brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
- brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
- brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
- brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
- brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
- brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
- brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
- brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
- brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
- brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
- brainscore_vision/models/ReAlnet/__init__.py +64 -0
- brainscore_vision/models/ReAlnet/model.py +237 -0
- brainscore_vision/models/ReAlnet/requirements.txt +7 -0
- brainscore_vision/models/ReAlnet/test.py +0 -0
- brainscore_vision/models/ReAlnet/weights.json +26 -0
- brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
- brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
- brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
- brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
- brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
- brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
- brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
- brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
- brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
- brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
- brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
- brainscore_vision/models/VOneCORnet_S/model.py +25 -0
- brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
- brainscore_vision/models/VOneCORnet_S/test.py +8 -0
- brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
- brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
- brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
- brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
- brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
- brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
- brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
- brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
- brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
- brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
- brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
- brainscore_vision/models/antialiased-r50/__init__.py +7 -0
- brainscore_vision/models/antialiased-r50/model.py +62 -0
- brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
- brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
- brainscore_vision/models/antialiased-r50/test.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
- brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
- brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
- brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
- brainscore_vision/models/cornet_s/model.py +2 -2
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
- brainscore_vision/models/densenet_121/__init__.py +7 -0
- brainscore_vision/models/densenet_121/model.py +63 -0
- brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
- brainscore_vision/models/densenet_121/requirements.txt +1 -0
- brainscore_vision/models/densenet_121/test.py +8 -0
- brainscore_vision/models/densenet_169/__init__.py +7 -0
- brainscore_vision/models/densenet_169/model.py +63 -0
- brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
- brainscore_vision/models/densenet_169/requirements.txt +1 -0
- brainscore_vision/models/densenet_169/test.py +9 -0
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
- brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
- brainscore_vision/models/densenet_201/test.py +8 -0
- brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b0/model.py +45 -0
- brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
- brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
- brainscore_vision/models/efficientnet_b0/test.py +8 -0
- brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b7/model.py +61 -0
- brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
- brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/efficientnet_b7/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
- brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_1/model.py +62 -0
- brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/evresnet_50_1/test.py +8 -0
- brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4/model.py +67 -0
- brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4/test.py +8 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
- brainscore_vision/models/grcnn/__init__.py +7 -0
- brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
- brainscore_vision/models/grcnn/model.py +54 -0
- brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
- brainscore_vision/models/grcnn/requirements.txt +2 -0
- brainscore_vision/models/grcnn/test.py +9 -0
- brainscore_vision/models/grcnn_109/__init__.py +5 -0
- brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
- brainscore_vision/models/grcnn_109/model.py +53 -0
- brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
- brainscore_vision/models/grcnn_109/requirements.txt +2 -0
- brainscore_vision/models/grcnn_109/test.py +9 -0
- brainscore_vision/models/hmax/model.py +2 -2
- brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
- brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
- brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
- brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
- brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
- brainscore_vision/models/inception_v1/__init__.py +7 -0
- brainscore_vision/models/inception_v1/model.py +67 -0
- brainscore_vision/models/inception_v1/requirements.txt +1 -0
- brainscore_vision/models/inception_v1/test.py +8 -0
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
- brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
- brainscore_vision/models/inception_v3/test.py +8 -0
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
- brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
- brainscore_vision/models/inception_v4/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
- brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
- brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
- brainscore_vision/models/nasnet_large/__init__.py +7 -0
- brainscore_vision/models/nasnet_large/model.py +60 -0
- brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
- brainscore_vision/models/nasnet_large/test.py +8 -0
- brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
- brainscore_vision/models/nasnet_mobile/model.py +685 -0
- brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
- brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
- brainscore_vision/models/nasnet_mobile/test.py +8 -0
- brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinB/model.py +79 -0
- brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
- brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
- brainscore_vision/models/omnivore_swinB/test.py +9 -0
- brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinS/model.py +79 -0
- brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
- brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
- brainscore_vision/models/omnivore_swinS/test.py +9 -0
- brainscore_vision/models/pnasnet_large/__init__.py +7 -0
- brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
- brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
- brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large/test.py +8 -0
- brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN/model.py +63 -0
- brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
- brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
- brainscore_vision/models/resnet50_SIN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
- brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
- brainscore_vision/models/resnet50_barlow/model.py +53 -0
- brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
- brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
- brainscore_vision/models/resnet50_barlow/test.py +9 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
- brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
- brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
- brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
- brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
- brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
- brainscore_vision/models/resnet50_sup/__init__.py +5 -0
- brainscore_vision/models/resnet50_sup/model.py +55 -0
- brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
- brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
- brainscore_vision/models/resnet50_sup/test.py +8 -0
- brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
- brainscore_vision/models/resnet50_vicreg/model.py +62 -0
- brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
- brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
- brainscore_vision/models/resnet50_vicreg/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
- brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
- brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
- brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_101_v1/model.py +42 -0
- brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
- brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_101_v1/test.py +8 -0
- brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_101_v2/model.py +33 -0
- brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
- brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_101_v2/test.py +8 -0
- brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_152_v1/model.py +42 -0
- brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
- brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_152_v1/test.py +8 -0
- brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
- brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
- brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
- brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2/test.py +8 -0
- brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
- brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
- brainscore_vision/models/resnet_18_test_m/model.py +80 -0
- brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
- brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
- brainscore_vision/models/resnet_18_test_m/test.py +8 -0
- brainscore_vision/models/resnet_50_2/__init__.py +9 -0
- brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
- brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
- brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
- brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
- brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
- brainscore_vision/models/resnet_50_2/model.py +46 -0
- brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
- brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
- brainscore_vision/models/resnet_50_2/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/model.py +2 -2
- brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
- brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_50_v1/model.py +42 -0
- brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
- brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_50_v1/test.py +8 -0
- brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_50_v2/model.py +33 -0
- brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
- brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_50_v2/test.py +8 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
- brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
- brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
- brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
- brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
- brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
- brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
- brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
- brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
- brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
- brainscore_vision/models/timm_models/__init__.py +193 -0
- brainscore_vision/models/timm_models/model.py +90 -0
- brainscore_vision/models/timm_models/model_configs.json +464 -0
- brainscore_vision/models/timm_models/requirements.txt +3 -0
- brainscore_vision/models/timm_models/test.py +0 -0
- brainscore_vision/models/vgg_16/__init__.py +7 -0
- brainscore_vision/models/vgg_16/model.py +52 -0
- brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
- brainscore_vision/models/vgg_16/requirements.txt +1 -0
- brainscore_vision/models/vgg_16/test.py +8 -0
- brainscore_vision/models/vgg_19/__init__.py +7 -0
- brainscore_vision/models/vgg_19/model.py +52 -0
- brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
- brainscore_vision/models/vgg_19/requirements.txt +1 -0
- brainscore_vision/models/vgg_19/test.py +8 -0
- brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
- brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
- brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
- brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
- brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
- brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
- brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
- brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
- brainscore_vision/models/voneresnet_50/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50/model.py +37 -0
- brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
- brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50/test.py +8 -0
- brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_1/model.py +68 -0
- brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/voneresnet_50_1/test.py +7 -0
- brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_3/model.py +66 -0
- brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_3/test.py +7 -0
- brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
- brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
- brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
- brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
- brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
- brainscore_vision/models/xception/__init__.py +7 -0
- brainscore_vision/models/xception/model.py +64 -0
- brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
- brainscore_vision/models/xception/requirements.txt +2 -0
- brainscore_vision/models/xception/test.py +8 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
- brainscore_vision/submission/actions_helpers.py +2 -3
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/METADATA +6 -6
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/RECORD +714 -130
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/WHEEL +1 -1
- docs/source/index.rst +1 -0
- docs/source/modules/submission.rst +1 -1
- docs/source/modules/version_bumping.rst +43 -0
- tests/test_submission/test_actions_helpers.py +2 -6
- brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
- brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
- /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
- /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
- /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
- /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,544 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import torch
|
3
|
+
import torch.nn as nn
|
4
|
+
import torch.nn.functional as F
|
5
|
+
from torch.nn import init
|
6
|
+
from functools import reduce
|
7
|
+
import scipy.stats as stats
|
8
|
+
|
9
|
+
device = "cpu"
|
10
|
+
|
11
|
+
|
12
|
+
def gabor_kernel(frequency, sigma_x, sigma_y, theta=0, offset=0, ks=61):
|
13
|
+
w = ks // 2
|
14
|
+
grid_val = torch.arange(-w, w + 1, dtype=torch.float)
|
15
|
+
x, y = torch.meshgrid(grid_val, grid_val)
|
16
|
+
rotx = x * np.cos(theta) + y * np.sin(theta)
|
17
|
+
roty = -x * np.sin(theta) + y * np.cos(theta)
|
18
|
+
g = torch.zeros(y.shape)
|
19
|
+
g[:] = torch.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
|
20
|
+
g /= 2 * np.pi * sigma_x * sigma_y
|
21
|
+
g *= torch.cos(2 * np.pi * frequency * rotx + offset)
|
22
|
+
|
23
|
+
return g
|
24
|
+
|
25
|
+
|
26
|
+
def sample_dist(hist, bins, ns, scale='linear'):
|
27
|
+
rand_sample = np.random.rand(ns)
|
28
|
+
if scale == 'linear':
|
29
|
+
rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), bins)
|
30
|
+
elif scale == 'log2':
|
31
|
+
rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log2(bins))
|
32
|
+
rand_sample = 2 ** rand_sample
|
33
|
+
elif scale == 'log10':
|
34
|
+
rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log10(bins))
|
35
|
+
rand_sample = 10 ** rand_sample
|
36
|
+
return rand_sample
|
37
|
+
|
38
|
+
|
39
|
+
def generate_gabor_param(features, seed=0, rand_flag=False, sf_corr=0, sf_max=9, sf_min=0):
|
40
|
+
# Generates random sample
|
41
|
+
np.random.seed(seed)
|
42
|
+
|
43
|
+
phase_bins = np.array([0, 360])
|
44
|
+
phase_dist = np.array([1])
|
45
|
+
|
46
|
+
if rand_flag:
|
47
|
+
print('Uniform gabor parameters')
|
48
|
+
ori_bins = np.array([0, 180])
|
49
|
+
ori_dist = np.array([1])
|
50
|
+
|
51
|
+
nx_bins = np.array([0.1, 10 ** 0.2])
|
52
|
+
nx_dist = np.array([1])
|
53
|
+
|
54
|
+
ny_bins = np.array([0.1, 10 ** 0.2])
|
55
|
+
ny_dist = np.array([1])
|
56
|
+
|
57
|
+
# sf_bins = np.array([0.5, 8])
|
58
|
+
# sf_dist = np.array([1])
|
59
|
+
|
60
|
+
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
|
61
|
+
sf_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1])
|
62
|
+
|
63
|
+
sfmax_ind = np.where(sf_bins < sf_max)[0][-1]
|
64
|
+
sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
|
65
|
+
|
66
|
+
sf_bins = sf_bins[sfmin_ind:sfmax_ind + 1]
|
67
|
+
sf_dist = sf_dist[sfmin_ind:sfmax_ind]
|
68
|
+
|
69
|
+
sf_dist = sf_dist / sf_dist.sum()
|
70
|
+
else:
|
71
|
+
print('Neuronal distributions gabor parameters')
|
72
|
+
# DeValois 1982a
|
73
|
+
ori_bins = np.array([-22.5, 22.5, 67.5, 112.5, 157.5])
|
74
|
+
ori_dist = np.array([66, 49, 77, 54])
|
75
|
+
ori_dist = ori_dist / ori_dist.sum()
|
76
|
+
|
77
|
+
# Schiller 1976
|
78
|
+
cov_mat = np.array([[1, sf_corr], [sf_corr, 1]])
|
79
|
+
|
80
|
+
# Ringach 2002b
|
81
|
+
nx_bins = np.logspace(-1, 0.2, 6, base=10)
|
82
|
+
ny_bins = np.logspace(-1, 0.2, 6, base=10)
|
83
|
+
n_joint_dist = np.array([[2., 0., 1., 0., 0.],
|
84
|
+
[8., 9., 4., 1., 0.],
|
85
|
+
[1., 2., 19., 17., 3.],
|
86
|
+
[0., 0., 1., 7., 4.],
|
87
|
+
[0., 0., 0., 0., 0.]])
|
88
|
+
n_joint_dist = n_joint_dist / n_joint_dist.sum()
|
89
|
+
nx_dist = n_joint_dist.sum(axis=1)
|
90
|
+
nx_dist = nx_dist / nx_dist.sum()
|
91
|
+
ny_dist_marg = n_joint_dist / n_joint_dist.sum(axis=1, keepdims=True)
|
92
|
+
|
93
|
+
# DeValois 1982b
|
94
|
+
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
|
95
|
+
sf_dist = np.array([4, 4, 8, 25, 32, 26, 28, 12])
|
96
|
+
|
97
|
+
sfmax_ind = np.where(sf_bins <= sf_max)[0][-1]
|
98
|
+
sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
|
99
|
+
|
100
|
+
sf_bins = sf_bins[sfmin_ind:sfmax_ind + 1]
|
101
|
+
sf_dist = sf_dist[sfmin_ind:sfmax_ind]
|
102
|
+
|
103
|
+
sf_dist = sf_dist / sf_dist.sum()
|
104
|
+
|
105
|
+
phase = sample_dist(phase_dist, phase_bins, features)
|
106
|
+
ori = sample_dist(ori_dist, ori_bins, features)
|
107
|
+
ori[ori < 0] = ori[ori < 0] + 180
|
108
|
+
|
109
|
+
if rand_flag:
|
110
|
+
sf = sample_dist(sf_dist, sf_bins, features, scale='log2')
|
111
|
+
nx = sample_dist(nx_dist, nx_bins, features, scale='log10')
|
112
|
+
ny = sample_dist(ny_dist, ny_bins, features, scale='log10')
|
113
|
+
else:
|
114
|
+
|
115
|
+
samps = np.random.multivariate_normal([0, 0], cov_mat, features)
|
116
|
+
samps_cdf = stats.norm.cdf(samps)
|
117
|
+
|
118
|
+
nx = np.interp(samps_cdf[:, 0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
|
119
|
+
nx = 10 ** nx
|
120
|
+
|
121
|
+
ny_samp = np.random.rand(features)
|
122
|
+
ny = np.zeros(features)
|
123
|
+
for samp_ind, nx_samp in enumerate(nx):
|
124
|
+
bin_id = np.argwhere(nx_bins < nx_samp)[-1]
|
125
|
+
ny[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
|
126
|
+
np.log10(ny_bins))
|
127
|
+
ny = 10 ** ny
|
128
|
+
|
129
|
+
sf = np.interp(samps_cdf[:, 1], np.hstack(([0], sf_dist.cumsum())), np.log2(sf_bins))
|
130
|
+
sf = 2 ** sf
|
131
|
+
|
132
|
+
return sf, ori, phase, nx, ny
|
133
|
+
|
134
|
+
|
135
|
+
class Identity(nn.Module):
|
136
|
+
def forward(self, x):
|
137
|
+
return x
|
138
|
+
|
139
|
+
|
140
|
+
class GFB(nn.Module):
|
141
|
+
def __init__(self, in_channels, out_channels, kernel_size, stride=4):
|
142
|
+
super().__init__()
|
143
|
+
self.in_channels = in_channels
|
144
|
+
self.out_channels = out_channels
|
145
|
+
self.kernel_size = (kernel_size, kernel_size)
|
146
|
+
self.stride = (stride, stride)
|
147
|
+
self.padding = (kernel_size // 2, kernel_size // 2)
|
148
|
+
|
149
|
+
# Param instatiations
|
150
|
+
self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size))
|
151
|
+
|
152
|
+
def forward(self, x):
|
153
|
+
return F.conv2d(x, self.weight, None, self.stride, self.padding)
|
154
|
+
|
155
|
+
def initialize(self, sf, theta, sigx, sigy, phase):
|
156
|
+
random_channel = torch.randint(0, self.in_channels, (self.out_channels,))
|
157
|
+
for i in range(self.out_channels):
|
158
|
+
self.weight[i, random_channel[i]] = gabor_kernel(frequency=sf[i], sigma_x=sigx[i], sigma_y=sigy[i],
|
159
|
+
theta=theta[i], offset=phase[i], ks=self.kernel_size[0])
|
160
|
+
self.weight = nn.Parameter(self.weight, requires_grad=False)
|
161
|
+
|
162
|
+
|
163
|
+
class VOneBlock(nn.Module):
|
164
|
+
def __init__(self, sf, theta, sigx, sigy, phase,
|
165
|
+
k_exc=25, noise_mode=None, noise_scale=1, noise_level=1,
|
166
|
+
simple_channels=128, complex_channels=128, ksize=25, stride=4, input_size=224):
|
167
|
+
super().__init__()
|
168
|
+
|
169
|
+
self.in_channels = 3
|
170
|
+
|
171
|
+
self.simple_channels = simple_channels
|
172
|
+
self.complex_channels = complex_channels
|
173
|
+
self.out_channels = simple_channels + complex_channels
|
174
|
+
self.stride = stride
|
175
|
+
self.input_size = input_size
|
176
|
+
|
177
|
+
self.sf = sf
|
178
|
+
self.theta = theta
|
179
|
+
self.sigx = sigx
|
180
|
+
self.sigy = sigy
|
181
|
+
self.phase = phase
|
182
|
+
self.k_exc = k_exc
|
183
|
+
|
184
|
+
self.set_noise_mode(noise_mode, noise_scale, noise_level)
|
185
|
+
self.fixed_noise = None
|
186
|
+
|
187
|
+
self.simple_conv_q0 = GFB(self.in_channels, self.out_channels, ksize, stride)
|
188
|
+
self.simple_conv_q1 = GFB(self.in_channels, self.out_channels, ksize, stride)
|
189
|
+
self.simple_conv_q0.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy,
|
190
|
+
phase=self.phase)
|
191
|
+
self.simple_conv_q1.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy,
|
192
|
+
phase=self.phase + np.pi / 2)
|
193
|
+
|
194
|
+
self.simple = nn.ReLU(inplace=True)
|
195
|
+
self.complex = Identity()
|
196
|
+
self.gabors = Identity()
|
197
|
+
self.noise = nn.ReLU(inplace=True)
|
198
|
+
self.output = Identity()
|
199
|
+
|
200
|
+
def forward(self, x):
|
201
|
+
# Gabor activations [Batch, out_channels, H/stride, W/stride]
|
202
|
+
x = self.gabors_f(x)
|
203
|
+
# Noise [Batch, out_channels, H/stride, W/stride]
|
204
|
+
x = self.noise_f(x)
|
205
|
+
# V1 Block output: (Batch, out_channels, H/stride, W/stride)
|
206
|
+
x = self.output(x)
|
207
|
+
return x
|
208
|
+
|
209
|
+
def gabors_f(self, x):
|
210
|
+
s_q0 = self.simple_conv_q0(x)
|
211
|
+
s_q1 = self.simple_conv_q1(x)
|
212
|
+
c = self.complex(torch.sqrt(s_q0[:, self.simple_channels:, :, :] ** 2 +
|
213
|
+
s_q1[:, self.simple_channels:, :, :] ** 2) / np.sqrt(2))
|
214
|
+
s = self.simple(s_q0[:, 0:self.simple_channels, :, :])
|
215
|
+
return self.gabors(self.k_exc * torch.cat((s, c), 1))
|
216
|
+
|
217
|
+
def noise_f(self, x):
|
218
|
+
if self.noise_mode == 'neuronal':
|
219
|
+
eps = 10e-5
|
220
|
+
x *= self.noise_scale
|
221
|
+
x += self.noise_level
|
222
|
+
if self.fixed_noise is not None:
|
223
|
+
x += self.fixed_noise * torch.sqrt(F.relu(x.clone()) + eps)
|
224
|
+
else:
|
225
|
+
x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * \
|
226
|
+
torch.sqrt(F.relu(x.clone()) + eps)
|
227
|
+
x -= self.noise_level
|
228
|
+
x /= self.noise_scale
|
229
|
+
if self.noise_mode == 'gaussian':
|
230
|
+
if self.fixed_noise is not None:
|
231
|
+
x += self.fixed_noise * self.noise_scale
|
232
|
+
else:
|
233
|
+
x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * self.noise_scale
|
234
|
+
return self.noise(x)
|
235
|
+
|
236
|
+
def set_noise_mode(self, noise_mode=None, noise_scale=1, noise_level=1):
|
237
|
+
self.noise_mode = noise_mode
|
238
|
+
self.noise_scale = noise_scale
|
239
|
+
self.noise_level = noise_level
|
240
|
+
|
241
|
+
def fix_noise(self, batch_size=256, seed=None):
|
242
|
+
noise_mean = torch.zeros(batch_size, self.out_channels, int(self.input_size / self.stride),
|
243
|
+
int(self.input_size / self.stride))
|
244
|
+
if seed:
|
245
|
+
torch.manual_seed(seed)
|
246
|
+
if self.noise_mode:
|
247
|
+
self.fixed_noise = torch.distributions.normal.Normal(noise_mean, scale=1).rsample().to(device)
|
248
|
+
|
249
|
+
def unfix_noise(self):
|
250
|
+
self.fixed_noise = None
|
251
|
+
|
252
|
+
|
253
|
+
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
|
254
|
+
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
255
|
+
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
256
|
+
|
257
|
+
|
258
|
+
def conv1x1(in_planes, out_planes, stride=1):
|
259
|
+
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
260
|
+
|
261
|
+
|
262
|
+
class Bottleneck(nn.Module):
|
263
|
+
expansion = 4
|
264
|
+
__constants__ = ['downsample']
|
265
|
+
|
266
|
+
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
|
267
|
+
base_width=64, dilation=1, norm_layer=None):
|
268
|
+
super(Bottleneck, self).__init__()
|
269
|
+
if norm_layer is None:
|
270
|
+
norm_layer = nn.BatchNorm2d
|
271
|
+
width = int(planes * (base_width / 64.)) * groups
|
272
|
+
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
|
273
|
+
self.conv1 = conv1x1(inplanes, width)
|
274
|
+
self.bn1 = norm_layer(width)
|
275
|
+
self.conv2 = conv3x3(width, width, stride, groups, dilation)
|
276
|
+
self.bn2 = norm_layer(width)
|
277
|
+
self.conv3 = conv1x1(width, planes * self.expansion)
|
278
|
+
self.bn3 = norm_layer(planes * self.expansion)
|
279
|
+
self.relu = nn.ReLU(inplace=True) # inplace=True
|
280
|
+
self.downsample = downsample
|
281
|
+
self.stride = stride
|
282
|
+
|
283
|
+
def forward(self, x):
|
284
|
+
identity = x
|
285
|
+
|
286
|
+
out = self.conv1(x)
|
287
|
+
out = self.bn1(out)
|
288
|
+
out = self.relu(out)
|
289
|
+
|
290
|
+
out = self.conv2(out)
|
291
|
+
out = self.bn2(out)
|
292
|
+
out = self.relu(out)
|
293
|
+
|
294
|
+
out = self.conv3(out)
|
295
|
+
out = self.bn3(out)
|
296
|
+
|
297
|
+
if self.downsample is not None:
|
298
|
+
identity = self.downsample(x)
|
299
|
+
|
300
|
+
out += identity
|
301
|
+
out = self.relu(out)
|
302
|
+
|
303
|
+
return out
|
304
|
+
|
305
|
+
|
306
|
+
def VOneNet(sf_corr=0.75, sf_max=9, sf_min=0, rand_param=False, gabor_seed=0,
|
307
|
+
simple_channels=256, complex_channels=256,
|
308
|
+
noise_mode='neuronal', noise_scale=0.35, noise_level=0.07, k_exc=25,
|
309
|
+
model_arch='resnet50', image_size=224, visual_degrees=8, ksize=25, stride=4):
|
310
|
+
out_channels = simple_channels + complex_channels
|
311
|
+
|
312
|
+
sf, theta, phase, nx, ny = generate_gabor_param(out_channels, gabor_seed, rand_param, sf_corr, sf_max, sf_min)
|
313
|
+
|
314
|
+
gabor_params = {'simple_channels': simple_channels, 'complex_channels': complex_channels, 'rand_param': rand_param,
|
315
|
+
'gabor_seed': gabor_seed, 'sf_max': sf_max, 'sf_corr': sf_corr, 'sf': sf.copy(),
|
316
|
+
'theta': theta.copy(), 'phase': phase.copy(), 'nx': nx.copy(), 'ny': ny.copy()}
|
317
|
+
arch_params = {'k_exc': k_exc, 'arch': model_arch, 'ksize': ksize, 'stride': stride}
|
318
|
+
|
319
|
+
# Conversions
|
320
|
+
ppd = image_size / visual_degrees
|
321
|
+
|
322
|
+
sf = sf / ppd
|
323
|
+
sigx = nx / sf
|
324
|
+
sigy = ny / sf
|
325
|
+
theta = theta / 180 * np.pi
|
326
|
+
phase = phase / 180 * np.pi
|
327
|
+
|
328
|
+
vone_block = VOneBlock(sf=sf, theta=theta, sigx=sigx, sigy=sigy, phase=phase,
|
329
|
+
k_exc=k_exc, noise_mode=noise_mode, noise_scale=noise_scale, noise_level=noise_level,
|
330
|
+
simple_channels=simple_channels, complex_channels=complex_channels,
|
331
|
+
ksize=ksize, stride=stride, input_size=image_size)
|
332
|
+
|
333
|
+
bottleneck = nn.Conv2d(out_channels, 64, kernel_size=1, stride=1, bias=False)
|
334
|
+
nn.init.kaiming_normal_(bottleneck.weight, mode='fan_out', nonlinearity='relu')
|
335
|
+
|
336
|
+
return vone_block, bottleneck
|
337
|
+
|
338
|
+
|
339
|
+
class SKConv(nn.Module):
|
340
|
+
def __init__(self, in_channels, out_channels, stride=1, M=2, r=16, L=32, groups=32):
|
341
|
+
|
342
|
+
super(SKConv, self).__init__()
|
343
|
+
d = max(in_channels // r, L)
|
344
|
+
self.M = M
|
345
|
+
self.out_channels = out_channels
|
346
|
+
self.conv = nn.ModuleList()
|
347
|
+
for i in range(M):
|
348
|
+
conv1 = nn.Conv2d(in_channels, out_channels, 3, stride, padding=1 + i, dilation=1 + i, groups=groups,
|
349
|
+
bias=False)
|
350
|
+
init.kaiming_normal_(conv1.weight)
|
351
|
+
self.conv.append(nn.Sequential(conv1,
|
352
|
+
nn.BatchNorm2d(out_channels),
|
353
|
+
nn.ReLU(inplace=True)))
|
354
|
+
self.global_pool = nn.AdaptiveAvgPool2d(1)
|
355
|
+
conv_fc = nn.Conv2d(out_channels, d, 1, bias=False)
|
356
|
+
init.normal_(conv_fc.weight, std=0.01)
|
357
|
+
self.fc1 = nn.Sequential(conv_fc,
|
358
|
+
nn.BatchNorm2d(d),
|
359
|
+
nn.ReLU(inplace=True))
|
360
|
+
self.fc2 = nn.Conv2d(d, out_channels * M, 1, 1, bias=False)
|
361
|
+
init.normal_(self.fc2.weight, std=0.01)
|
362
|
+
self.softmax = nn.Softmax(dim=1)
|
363
|
+
|
364
|
+
def forward(self, input):
|
365
|
+
batch_size = input.size(0)
|
366
|
+
output = []
|
367
|
+
for i, conv in enumerate(self.conv):
|
368
|
+
output.append(conv(input))
|
369
|
+
U = reduce(lambda x, y: x + y, output)
|
370
|
+
s = self.global_pool(U)
|
371
|
+
z = self.fc1(s)
|
372
|
+
a_b = self.fc2(z)
|
373
|
+
a_b = a_b.reshape(batch_size, self.M, self.out_channels, -1)
|
374
|
+
a_b = self.softmax(a_b)
|
375
|
+
a_b = list(a_b.chunk(self.M, dim=1))
|
376
|
+
a_b = list(map(lambda x: x.reshape(batch_size, self.out_channels, 1, 1), a_b))
|
377
|
+
V = list(map(lambda x, y: x * y, output, a_b))
|
378
|
+
V = reduce(lambda x, y: x + y, V)
|
379
|
+
return V
|
380
|
+
|
381
|
+
|
382
|
+
class GRCL(nn.Module):
|
383
|
+
def __init__(self, inplanes, planes, downsample=True, iter=3, SKconv=True, expansion=2):
|
384
|
+
super(GRCL, self).__init__()
|
385
|
+
|
386
|
+
self.iter = iter
|
387
|
+
self.expansion = expansion
|
388
|
+
# feed-forward part
|
389
|
+
self.add_module('bn_f', nn.BatchNorm2d(inplanes))
|
390
|
+
self.add_module('relu_f', nn.ReLU(inplace=True))
|
391
|
+
conv_f = nn.Conv2d(inplanes, int(planes * self.expansion), kernel_size=3, stride=1, padding=1, bias=False,
|
392
|
+
groups=32)
|
393
|
+
init.kaiming_normal_(conv_f.weight)
|
394
|
+
self.add_module('conv_f', conv_f)
|
395
|
+
|
396
|
+
self.add_module('bn_g_f', nn.BatchNorm2d(inplanes))
|
397
|
+
self.add_module('relu_g_f', nn.ReLU(inplace=True))
|
398
|
+
conv_g_f = nn.Conv2d(inplanes, int(planes * self.expansion), kernel_size=1, stride=1, padding=0, bias=True,
|
399
|
+
groups=32)
|
400
|
+
init.normal_(conv_g_f.weight, std=0.01)
|
401
|
+
self.add_module('conv_g_f', conv_g_f)
|
402
|
+
self.conv_g_r = nn.Conv2d(int(planes * self.expansion), int(planes * self.expansion), kernel_size=1, stride=1,
|
403
|
+
padding=0, bias=False, groups=32)
|
404
|
+
self.add_module('sig', nn.Sigmoid())
|
405
|
+
|
406
|
+
# recurrent part
|
407
|
+
for i in range(0, self.iter):
|
408
|
+
layers = []
|
409
|
+
layers_g_bn = []
|
410
|
+
|
411
|
+
layers.append(nn.BatchNorm2d(planes * self.expansion))
|
412
|
+
layers.append(nn.ReLU(inplace=True))
|
413
|
+
conv_1 = nn.Conv2d(int(planes * self.expansion), planes, kernel_size=1, stride=1, padding=0, bias=False)
|
414
|
+
init.kaiming_normal_(conv_1.weight)
|
415
|
+
layers.append(conv_1)
|
416
|
+
|
417
|
+
layers.append(nn.BatchNorm2d(planes))
|
418
|
+
layers.append(nn.ReLU(inplace=True))
|
419
|
+
|
420
|
+
if SKconv:
|
421
|
+
layers.append(SKConv(planes, planes))
|
422
|
+
else:
|
423
|
+
layers.append(nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False))
|
424
|
+
layers.append(nn.BatchNorm2d(planes))
|
425
|
+
layers.append(nn.ReLU(inplace=True))
|
426
|
+
|
427
|
+
conv_2 = nn.Conv2d(planes, int(planes * self.expansion), kernel_size=1, stride=1, padding=0, bias=False)
|
428
|
+
init.kaiming_normal_(conv_2.weight)
|
429
|
+
layers.append(conv_2)
|
430
|
+
layers_g_bn.append(nn.BatchNorm2d(int(planes * self.expansion)))
|
431
|
+
|
432
|
+
layers_g_bn.append(nn.ReLU(inplace=True))
|
433
|
+
|
434
|
+
self.add_module('iter_' + str(i + 1), nn.Sequential(*layers))
|
435
|
+
self.add_module('iter_g_' + str(i + 1), nn.Sequential(*layers_g_bn))
|
436
|
+
|
437
|
+
self.downsample = downsample
|
438
|
+
if self.downsample:
|
439
|
+
self.add_module('d_bn', nn.BatchNorm2d(planes * self.expansion))
|
440
|
+
self.add_module('d_relu', nn.ReLU(inplace=True))
|
441
|
+
d_conv = nn.Conv2d(int(planes * self.expansion), int(planes * self.expansion), kernel_size=1, stride=1,
|
442
|
+
padding=0, bias=False)
|
443
|
+
init.kaiming_normal_(d_conv.weight)
|
444
|
+
self.add_module('d_conv', d_conv)
|
445
|
+
self.add_module('d_ave', nn.AvgPool2d((2, 2), stride=2))
|
446
|
+
|
447
|
+
self.add_module('d_bn_1', nn.BatchNorm2d(planes * self.expansion))
|
448
|
+
self.add_module('d_relu_1', nn.ReLU(inplace=True))
|
449
|
+
d_conv_1 = nn.Conv2d(int(planes * self.expansion), planes, kernel_size=1, stride=1, padding=0,
|
450
|
+
bias=False)
|
451
|
+
init.kaiming_normal_(d_conv_1.weight)
|
452
|
+
self.add_module('d_conv_1', d_conv_1)
|
453
|
+
|
454
|
+
self.add_module('d_bn_3', nn.BatchNorm2d(planes))
|
455
|
+
self.add_module('d_relu_3', nn.ReLU(inplace=True))
|
456
|
+
|
457
|
+
if SKconv:
|
458
|
+
d_conv_3 = SKConv(planes, planes, stride=2)
|
459
|
+
self.add_module('d_conv_3', d_conv_3)
|
460
|
+
else:
|
461
|
+
d_conv_3 = nn.Conv2d(planes, planes, kernel_size=3, stride=2, padding=1, bias=False)
|
462
|
+
init.kaiming_normal_(d_conv_3.weight)
|
463
|
+
self.add_module('d_conv_3', d_conv_3)
|
464
|
+
|
465
|
+
d_conv_1e = nn.Conv2d(planes, int(planes * self.expansion), kernel_size=1, stride=1, padding=0, bias=False)
|
466
|
+
init.kaiming_normal_(d_conv_1e.weight)
|
467
|
+
self.add_module('d_conv_1e', d_conv_1e)
|
468
|
+
|
469
|
+
def forward(self, x):
|
470
|
+
# feed-forward
|
471
|
+
x_bn = self.bn_f(x)
|
472
|
+
x_act = self.relu_f(x_bn)
|
473
|
+
x_s = self.conv_f(x_act)
|
474
|
+
|
475
|
+
x_g_bn = self.bn_g_f(x)
|
476
|
+
x_g_act = self.relu_g_f(x_g_bn)
|
477
|
+
x_g_s = self.conv_g_f(x_g_act)
|
478
|
+
|
479
|
+
# recurrent
|
480
|
+
for i in range(0, self.iter):
|
481
|
+
x_g_r = self.conv_g_r(self.__dict__['_modules']["iter_g_%s" % str(i + 1)](x_s))
|
482
|
+
x_s = self.__dict__['_modules']["iter_%s" % str(i + 1)](x_s) * torch.sigmoid(x_g_r + x_g_s) + x_s
|
483
|
+
|
484
|
+
if self.downsample:
|
485
|
+
x_s_1 = self.d_conv(self.d_ave(self.d_relu(self.d_bn(x_s))))
|
486
|
+
x_s_2 = self.d_conv_1e(
|
487
|
+
self.d_conv_3(self.d_relu_3(self.d_bn_3(self.d_conv_1(self.d_relu_1(self.d_bn_1(x_s)))))))
|
488
|
+
x_s = x_s_1 + x_s_2
|
489
|
+
|
490
|
+
return x_s
|
491
|
+
|
492
|
+
|
493
|
+
class GRCNNBackEnd(nn.Module):
|
494
|
+
def __init__(self, iters, maps, SKconv, expansion, num_classes):
|
495
|
+
""" Args:
|
496
|
+
iters:iterations.
|
497
|
+
num_classes: number of classes
|
498
|
+
"""
|
499
|
+
super(GRCNNBackEnd, self).__init__()
|
500
|
+
self.iters = iters
|
501
|
+
self.maps = maps
|
502
|
+
self.num_classes = num_classes
|
503
|
+
self.expansion = expansion
|
504
|
+
|
505
|
+
self.layer1 = GRCL(64, self.maps[0], True, self.iters[0], SKconv, self.expansion)
|
506
|
+
self.layer2 = GRCL(self.maps[0] * self.expansion, self.maps[1], True, self.iters[1], SKconv, self.expansion)
|
507
|
+
self.layer3 = GRCL(self.maps[1] * self.expansion, self.maps[2], True, self.iters[2], SKconv, self.expansion)
|
508
|
+
self.layer4 = GRCL(self.maps[2] * self.expansion, self.maps[3], False, self.iters[3], SKconv, self.expansion)
|
509
|
+
|
510
|
+
self.lastact = nn.Sequential(nn.BatchNorm2d(self.maps[3] * self.expansion), nn.ReLU(inplace=True))
|
511
|
+
self.avgpool = nn.AvgPool2d(7)
|
512
|
+
self.classifier = nn.Linear(self.maps[3] * self.expansion, num_classes)
|
513
|
+
|
514
|
+
for m in self.modules():
|
515
|
+
if isinstance(m, nn.Conv2d):
|
516
|
+
if m.bias is not None:
|
517
|
+
init.zeros_(m.bias)
|
518
|
+
elif isinstance(m, nn.BatchNorm2d):
|
519
|
+
init.ones_(m.weight)
|
520
|
+
init.zeros_(m.bias)
|
521
|
+
elif isinstance(m, nn.Linear):
|
522
|
+
init.kaiming_normal_(m.weight)
|
523
|
+
init.zeros_(m.bias)
|
524
|
+
|
525
|
+
def forward(self, x):
|
526
|
+
|
527
|
+
x = self.layer1(x)
|
528
|
+
x = self.layer2(x)
|
529
|
+
x = self.layer3(x)
|
530
|
+
x = self.layer4(x)
|
531
|
+
|
532
|
+
x = self.lastact(x)
|
533
|
+
x = self.avgpool(x)
|
534
|
+
x = x.view(x.size(0), -1)
|
535
|
+
return self.classifier(x)
|
536
|
+
|
537
|
+
|
538
|
+
def grcnn55BackEnd(num_classes=1000):
|
539
|
+
"""
|
540
|
+
Args:
|
541
|
+
num_classes (uint): number of classes
|
542
|
+
"""
|
543
|
+
model = GRCNNBackEnd([3, 3, 4, 3], [64, 128, 256, 512], SKconv=False, expansion=4, num_classes=num_classes)
|
544
|
+
return model
|
@@ -0,0 +1,122 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import torch
|
3
|
+
import functools
|
4
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
5
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
6
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
7
|
+
from .helpers.vongrcnn_helpers import VOneNet, grcnn55BackEnd
|
8
|
+
import torch.nn as nn
|
9
|
+
from collections import OrderedDict
|
10
|
+
|
11
|
+
device = "cpu"
|
12
|
+
model_identifier = 'vonegrcnn_62e_nobn'
|
13
|
+
|
14
|
+
|
15
|
+
###DEFINE YOUR CUSTOM MODEL HERE
|
16
|
+
|
17
|
+
# get_model method actually gets the model. For a custom model, this is just linked to the
|
18
|
+
# model we defined above.
|
19
|
+
def get_model(name):
|
20
|
+
"""
|
21
|
+
This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
|
22
|
+
containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
|
23
|
+
keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
|
24
|
+
wrappers.
|
25
|
+
:param name: the name of the model to fetch
|
26
|
+
:return: the model instance
|
27
|
+
"""
|
28
|
+
assert name == 'vonegrcnn_62e_nobn'
|
29
|
+
# link the custom model to the wrapper object(activations_model above):
|
30
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
31
|
+
vone_block, bottleneck = VOneNet()
|
32
|
+
model_back_end = grcnn55BackEnd()
|
33
|
+
|
34
|
+
model = nn.Sequential(OrderedDict([
|
35
|
+
('vone_block', vone_block),
|
36
|
+
('bottleneck', bottleneck),
|
37
|
+
('model', model_back_end),
|
38
|
+
]))
|
39
|
+
|
40
|
+
model = nn.Sequential(OrderedDict([('module',model)]))
|
41
|
+
weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
42
|
+
relative_path="vonegrcnn_62e/model_best.pth",
|
43
|
+
version_id="null",
|
44
|
+
sha1="66f5319888ebd146565fb45144afa92d8a2bef3b")
|
45
|
+
checkpoint = torch.load(weights_path, map_location=torch.device('cpu'))
|
46
|
+
model.load_state_dict(checkpoint['state_dict'], strict=True)
|
47
|
+
model = model.to(device)
|
48
|
+
|
49
|
+
# get an activations model from the Pytorch Wrapper
|
50
|
+
activations_model = PytorchWrapper(identifier=model_identifier, model= model,
|
51
|
+
preprocessing=preprocessing)
|
52
|
+
wrapper = activations_model
|
53
|
+
wrapper.image_size = 224
|
54
|
+
return wrapper
|
55
|
+
|
56
|
+
|
57
|
+
# get_layers method to tell the code what layers to consider. If you are submitting a custom
|
58
|
+
# model, then you will most likely need to change this method's return values.
|
59
|
+
def get_layers(name):
|
60
|
+
"""
|
61
|
+
This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to
|
62
|
+
layers and uses this list as a set of possible layers. The lists doesn't have to contain all layers, the less the
|
63
|
+
faster the benchmark process works. Additionally the given layers have to produce an activations vector of at least
|
64
|
+
size 25! The layer names are delivered back to the model instance and have to be resolved in there. For a pytorch
|
65
|
+
model, the layer name are for instance dot concatenated per module, e.g. "features.2".
|
66
|
+
:param name: the name of the model, to return the layers for
|
67
|
+
:return: a list of strings containing all layers, that should be considered as brain area.
|
68
|
+
"""
|
69
|
+
|
70
|
+
# quick check to make sure the model is the correct one:
|
71
|
+
assert name == 'vonegrcnn_62e_nobn'
|
72
|
+
all_layers = ['module',
|
73
|
+
'module.vone_block',
|
74
|
+
'module.vone_block.simple_conv_q0',
|
75
|
+
'module.vone_block.simple_conv_q1',
|
76
|
+
'module.vone_block.simple',
|
77
|
+
'module.vone_block.complex',
|
78
|
+
'module.vone_block.gabors',
|
79
|
+
'module.vone_block.noise',
|
80
|
+
'module.vone_block.output',
|
81
|
+
'module.bottleneck',
|
82
|
+
'module.model',
|
83
|
+
'module.model.layer1',
|
84
|
+
'module.model.layer2',
|
85
|
+
'module.model.layer3',
|
86
|
+
'module.model.layer4',
|
87
|
+
'module.model.layer1.conv_f',
|
88
|
+
'module.model.layer2.conv_f',
|
89
|
+
'module.model.layer3.conv_f',
|
90
|
+
'module.model.layer4.conv_f',
|
91
|
+
'module.model.layer1.d_conv_1e',
|
92
|
+
'module.model.layer2.d_conv_1e',
|
93
|
+
'module.model.layer3.d_conv_1e',
|
94
|
+
'module.model.layer1.iter_g_3.1',
|
95
|
+
'module.model.layer2.iter_g_3.1',
|
96
|
+
'module.model.layer3.iter_g_4.1',
|
97
|
+
'module.model.layer4.iter_g_3.1',
|
98
|
+
'module.model.lastact',
|
99
|
+
'module.model.lastact.0',
|
100
|
+
'module.model.lastact.1',
|
101
|
+
'module.model.avgpool',
|
102
|
+
'module.model.classifier']
|
103
|
+
# returns the layers you want to consider
|
104
|
+
return all_layers
|
105
|
+
|
106
|
+
|
107
|
+
# Bibtex Method. For submitting a custom model, you can either put your own Bibtex if your
|
108
|
+
# model has been published, or leave the empty return value if there is no publication to refer to.
|
109
|
+
def get_bibtex(model_identifier):
|
110
|
+
"""
|
111
|
+
A method returning the bibtex reference of the requested model as a string.
|
112
|
+
"""
|
113
|
+
|
114
|
+
# from pytorch.py:
|
115
|
+
return ''
|
116
|
+
|
117
|
+
|
118
|
+
# Main Method: In submitting a custom model, you should not have to mess with this.
|
119
|
+
if __name__ == '__main__':
|
120
|
+
# Use this method to ensure the correctness of the BaseModel implementations.
|
121
|
+
# It executes a mock run of brain-score benchmarks.
|
122
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,7 @@
|
|
1
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from .model import get_layers,get_model
|
4
|
+
|
5
|
+
|
6
|
+
model_registry['voneresnet-50'] = \
|
7
|
+
lambda: ModelCommitment(identifier='voneresnet-50', activations_model=get_model('voneresnet-50'), layers=get_layers('voneresnet-50'))
|