brainscore-vision 2.2.4__py3-none-any.whl → 2.2.6__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/data/baker2022/__init__.py +10 -10
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/barbumayo2019/__init__.py +3 -3
- brainscore_vision/data/bashivankar2019/__init__.py +10 -10
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
- brainscore_vision/data/bmd2024/__init__.py +20 -20
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
- brainscore_vision/data/bracci2019/__init__.py +5 -5
- brainscore_vision/data/bracci2019/data_packaging.py +1 -1
- brainscore_vision/data/cadena2017/__init__.py +5 -5
- brainscore_vision/data/cichy2019/__init__.py +5 -5
- brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
- brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
- brainscore_vision/data/david2004/__init__.py +5 -5
- brainscore_vision/data/deng2009/__init__.py +3 -3
- brainscore_vision/data/ferguson2024/__init__.py +112 -112
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
- brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
- brainscore_vision/data/geirhos2021/__init__.py +85 -85
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
- brainscore_vision/data/hebart2023/__init__.py +5 -5
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
- brainscore_vision/data/hendrycks2019/__init__.py +12 -12
- brainscore_vision/data/igustibagus2024/__init__.py +5 -5
- brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
- brainscore_vision/data/islam2021/__init__.py +3 -3
- brainscore_vision/data/kar2018/__init__.py +7 -7
- brainscore_vision/data/kar2019/__init__.py +5 -5
- brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
- brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
- brainscore_vision/data/majajhong2015/__init__.py +23 -23
- brainscore_vision/data/malania2007/__init__.py +77 -77
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
- brainscore_vision/data/maniquet2024/__init__.py +11 -11
- brainscore_vision/data/marques2020/__init__.py +30 -30
- brainscore_vision/data/rajalingham2018/__init__.py +10 -10
- brainscore_vision/data/rajalingham2020/__init__.py +5 -5
- brainscore_vision/data/rust2012/__init__.py +7 -7
- brainscore_vision/data/sanghavi2020/__init__.py +19 -19
- brainscore_vision/data/scialom2024/__init__.py +110 -110
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
- brainscore_vision/data/seibert2019/__init__.py +2 -2
- brainscore_vision/data/zhang2018/__init__.py +5 -5
- brainscore_vision/data_helpers/s3.py +25 -6
- brainscore_vision/model_helpers/activations/pytorch.py +34 -12
- brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
- brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
- brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
- brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
- brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
- brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
- brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
- brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
- brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
- brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
- brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
- brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
- brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
- brainscore_vision/models/ReAlnet/__init__.py +64 -0
- brainscore_vision/models/ReAlnet/model.py +237 -0
- brainscore_vision/models/ReAlnet/requirements.txt +7 -0
- brainscore_vision/models/ReAlnet/test.py +0 -0
- brainscore_vision/models/ReAlnet/weights.json +26 -0
- brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
- brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
- brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
- brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
- brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
- brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
- brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
- brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
- brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
- brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
- brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
- brainscore_vision/models/VOneCORnet_S/model.py +25 -0
- brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
- brainscore_vision/models/VOneCORnet_S/test.py +8 -0
- brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
- brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
- brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
- brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
- brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
- brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
- brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
- brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
- brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
- brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
- brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
- brainscore_vision/models/antialiased-r50/__init__.py +7 -0
- brainscore_vision/models/antialiased-r50/model.py +62 -0
- brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
- brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
- brainscore_vision/models/antialiased-r50/test.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
- brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
- brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
- brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
- brainscore_vision/models/cornet_s/model.py +2 -2
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
- brainscore_vision/models/densenet_121/__init__.py +7 -0
- brainscore_vision/models/densenet_121/model.py +63 -0
- brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
- brainscore_vision/models/densenet_121/requirements.txt +1 -0
- brainscore_vision/models/densenet_121/test.py +8 -0
- brainscore_vision/models/densenet_169/__init__.py +7 -0
- brainscore_vision/models/densenet_169/model.py +63 -0
- brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
- brainscore_vision/models/densenet_169/requirements.txt +1 -0
- brainscore_vision/models/densenet_169/test.py +9 -0
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
- brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
- brainscore_vision/models/densenet_201/test.py +8 -0
- brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b0/model.py +45 -0
- brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
- brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
- brainscore_vision/models/efficientnet_b0/test.py +8 -0
- brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b7/model.py +61 -0
- brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
- brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/efficientnet_b7/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
- brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_1/model.py +62 -0
- brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/evresnet_50_1/test.py +8 -0
- brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4/model.py +67 -0
- brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4/test.py +8 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
- brainscore_vision/models/grcnn/__init__.py +7 -0
- brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
- brainscore_vision/models/grcnn/model.py +54 -0
- brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
- brainscore_vision/models/grcnn/requirements.txt +2 -0
- brainscore_vision/models/grcnn/test.py +9 -0
- brainscore_vision/models/grcnn_109/__init__.py +5 -0
- brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
- brainscore_vision/models/grcnn_109/model.py +53 -0
- brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
- brainscore_vision/models/grcnn_109/requirements.txt +2 -0
- brainscore_vision/models/grcnn_109/test.py +9 -0
- brainscore_vision/models/hmax/model.py +2 -2
- brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
- brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
- brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
- brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
- brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
- brainscore_vision/models/inception_v1/__init__.py +7 -0
- brainscore_vision/models/inception_v1/model.py +67 -0
- brainscore_vision/models/inception_v1/requirements.txt +1 -0
- brainscore_vision/models/inception_v1/test.py +8 -0
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
- brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
- brainscore_vision/models/inception_v3/test.py +8 -0
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
- brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
- brainscore_vision/models/inception_v4/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
- brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
- brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
- brainscore_vision/models/nasnet_large/__init__.py +7 -0
- brainscore_vision/models/nasnet_large/model.py +60 -0
- brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
- brainscore_vision/models/nasnet_large/test.py +8 -0
- brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
- brainscore_vision/models/nasnet_mobile/model.py +685 -0
- brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
- brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
- brainscore_vision/models/nasnet_mobile/test.py +8 -0
- brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinB/model.py +79 -0
- brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
- brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
- brainscore_vision/models/omnivore_swinB/test.py +9 -0
- brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinS/model.py +79 -0
- brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
- brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
- brainscore_vision/models/omnivore_swinS/test.py +9 -0
- brainscore_vision/models/pnasnet_large/__init__.py +7 -0
- brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
- brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
- brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large/test.py +8 -0
- brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN/model.py +63 -0
- brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
- brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
- brainscore_vision/models/resnet50_SIN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
- brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
- brainscore_vision/models/resnet50_barlow/model.py +53 -0
- brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
- brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
- brainscore_vision/models/resnet50_barlow/test.py +9 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
- brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
- brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
- brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
- brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
- brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
- brainscore_vision/models/resnet50_sup/__init__.py +5 -0
- brainscore_vision/models/resnet50_sup/model.py +55 -0
- brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
- brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
- brainscore_vision/models/resnet50_sup/test.py +8 -0
- brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
- brainscore_vision/models/resnet50_vicreg/model.py +62 -0
- brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
- brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
- brainscore_vision/models/resnet50_vicreg/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
- brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
- brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
- brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_101_v1/model.py +42 -0
- brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
- brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_101_v1/test.py +8 -0
- brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_101_v2/model.py +33 -0
- brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
- brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_101_v2/test.py +8 -0
- brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_152_v1/model.py +42 -0
- brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
- brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_152_v1/test.py +8 -0
- brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
- brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
- brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
- brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2/test.py +8 -0
- brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
- brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
- brainscore_vision/models/resnet_18_test_m/model.py +80 -0
- brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
- brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
- brainscore_vision/models/resnet_18_test_m/test.py +8 -0
- brainscore_vision/models/resnet_50_2/__init__.py +9 -0
- brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
- brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
- brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
- brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
- brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
- brainscore_vision/models/resnet_50_2/model.py +46 -0
- brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
- brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
- brainscore_vision/models/resnet_50_2/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/model.py +2 -2
- brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
- brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_50_v1/model.py +42 -0
- brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
- brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_50_v1/test.py +8 -0
- brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_50_v2/model.py +33 -0
- brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
- brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_50_v2/test.py +8 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
- brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
- brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
- brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
- brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
- brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
- brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
- brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
- brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
- brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
- brainscore_vision/models/timm_models/__init__.py +193 -0
- brainscore_vision/models/timm_models/model.py +90 -0
- brainscore_vision/models/timm_models/model_configs.json +464 -0
- brainscore_vision/models/timm_models/requirements.txt +3 -0
- brainscore_vision/models/timm_models/test.py +0 -0
- brainscore_vision/models/vgg_16/__init__.py +7 -0
- brainscore_vision/models/vgg_16/model.py +52 -0
- brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
- brainscore_vision/models/vgg_16/requirements.txt +1 -0
- brainscore_vision/models/vgg_16/test.py +8 -0
- brainscore_vision/models/vgg_19/__init__.py +7 -0
- brainscore_vision/models/vgg_19/model.py +52 -0
- brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
- brainscore_vision/models/vgg_19/requirements.txt +1 -0
- brainscore_vision/models/vgg_19/test.py +8 -0
- brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
- brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
- brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
- brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
- brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
- brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
- brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
- brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
- brainscore_vision/models/voneresnet_50/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50/model.py +37 -0
- brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
- brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50/test.py +8 -0
- brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_1/model.py +68 -0
- brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/voneresnet_50_1/test.py +7 -0
- brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_3/model.py +66 -0
- brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_3/test.py +7 -0
- brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
- brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
- brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
- brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
- brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
- brainscore_vision/models/xception/__init__.py +7 -0
- brainscore_vision/models/xception/model.py +64 -0
- brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
- brainscore_vision/models/xception/requirements.txt +2 -0
- brainscore_vision/models/xception/test.py +8 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
- brainscore_vision/submission/actions_helpers.py +2 -3
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/METADATA +6 -6
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/RECORD +714 -130
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/WHEEL +1 -1
- docs/source/index.rst +1 -0
- docs/source/modules/submission.rst +1 -1
- docs/source/modules/version_bumping.rst +43 -0
- tests/test_submission/test_actions_helpers.py +2 -6
- brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
- brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
- /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
- /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
- /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
- /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,326 @@
|
|
1
|
+
import torch
|
2
|
+
import numpy as np
|
3
|
+
import scipy.stats as stats
|
4
|
+
from .utils import sample_dist
|
5
|
+
from typing import Literal
|
6
|
+
|
7
|
+
image_size = 224
|
8
|
+
visual_degrees = 7
|
9
|
+
kernel_size = {'p': 21, 'm': 65} # 95% of Gaussian coverage
|
10
|
+
|
11
|
+
|
12
|
+
# Receptive fields of P and M ganglion cells across the primate retina (Kroner and Kaplan, 1994)
|
13
|
+
# https://www.sciencedirect.com/science/article/pii/0042698994E0066T
|
14
|
+
|
15
|
+
# P Cells (eccentricity range of 0-5)
|
16
|
+
P_cell_params = {
|
17
|
+
'med_rc': 0.03, 'iqr_rc': 0.01, # Center radius
|
18
|
+
'med_kc': 325.2, 'iqr_kc': 302, # Center peak sensitivity
|
19
|
+
'med_rs': 0.18, 'iqr_rs': 0.07, # Surround radius
|
20
|
+
'med_ks': 4.4, 'iqr_ks': 4.6, # Surround peak sensitivity
|
21
|
+
'c_kc': 0.391, 'm_kc': -1.850, # Center peak sensitivity vs. radius regression
|
22
|
+
'c_ks': 0.128, 'm_ks': -2.147, # Surround peak sensitivity vs. radius regression
|
23
|
+
}
|
24
|
+
|
25
|
+
# M Cells (eccentricity range of 0-10)
|
26
|
+
M_cell_params = {
|
27
|
+
'med_rc': 0.10, 'iqr_rc': 0.02, # Center radius
|
28
|
+
'med_kc': 148.0, 'iqr_kc': 122.4, # Center peak sensitivity
|
29
|
+
'med_rs': 0.72, 'iqr_rs': 0.23, # Surround radius
|
30
|
+
'med_ks': 1.1, 'iqr_ks': 0.8, # Surround peak sensitivity
|
31
|
+
}
|
32
|
+
|
33
|
+
|
34
|
+
def get_dog_params(
|
35
|
+
features:int, sampling:Literal['median', 'binning', 'uniform', 'lognormal']='median',
|
36
|
+
colors:list[Literal['r/g', 'g/r', 'b/y', 'w/b']]=['r/g', 'g/r', 'b/y'],
|
37
|
+
polarity:list[Literal[0, 1]]=None,
|
38
|
+
cell_type:Literal['p', 'm']='p', image_size:int=image_size, visual_degrees:int=visual_degrees
|
39
|
+
) -> dict:
|
40
|
+
"""Generates DoG parameters for RetinaBlock with more than 3 channels.
|
41
|
+
Number of channels = number of features * 3 color options (R/G, G/R, B/Y).
|
42
|
+
Only generates ON-center cells.
|
43
|
+
|
44
|
+
Args:
|
45
|
+
features (int): _description_
|
46
|
+
binning (bool, optional): whether to use discrete binning while sampling values. Defaults to True.
|
47
|
+
image_size (int, optional): model image size. Defaults to image_size.
|
48
|
+
visual_degrees (int, optional): visual degrees of the model FoV. Defaults to visual_degrees.
|
49
|
+
|
50
|
+
Returns:
|
51
|
+
dict: dictionary with center and surround radii, opponency tensor and DoG kernel size
|
52
|
+
"""
|
53
|
+
|
54
|
+
if not features:
|
55
|
+
return {
|
56
|
+
f'rc_{cell_type}_cell': torch.tensor([]),
|
57
|
+
f'rs_{cell_type}_cell': torch.tensor([]),
|
58
|
+
f'opponency_{cell_type}_cell': torch.tensor([]),
|
59
|
+
f'kernel_{cell_type}_cell': torch.tensor([])
|
60
|
+
}
|
61
|
+
|
62
|
+
assert cell_type in ['p', 'm']
|
63
|
+
|
64
|
+
cell_params = M_cell_params if cell_type=='m' else P_cell_params
|
65
|
+
min_rc = cell_params['med_rc'] - cell_params['iqr_rc']
|
66
|
+
max_rc = cell_params['med_rc'] + cell_params['iqr_rc']
|
67
|
+
min_rs = cell_params['med_rs'] - cell_params['iqr_rs']
|
68
|
+
max_rs = cell_params['med_rs'] + cell_params['iqr_rs']
|
69
|
+
|
70
|
+
color_mapping = {
|
71
|
+
'r/g': np.array([[1,0,0],[0,-1,0]], dtype=np.float16), # R+/G- (center/surround)
|
72
|
+
'g/r': np.array([[0,1,0],[-1,0,0]], dtype=np.float16), # G+/R-
|
73
|
+
'b/y': np.array([[0,0,1],[-.5,-.5,0]], dtype=np.float16), # B+/Y-
|
74
|
+
'w/b': np.array([[1/3]*3,[-1/3]*3], dtype=np.float16) # ON/OFF
|
75
|
+
}
|
76
|
+
|
77
|
+
assert features % len(colors) == 0
|
78
|
+
|
79
|
+
if sampling=='median':
|
80
|
+
# Use median values from distributions (deterministic)
|
81
|
+
assert features == len(colors)
|
82
|
+
rc = np.ones((features,), dtype=np.float16) * cell_params['med_rc']
|
83
|
+
rs = np.ones((features,), dtype=np.float16) * cell_params['med_rs']
|
84
|
+
kc = np.ones((features,), dtype=np.float16) * cell_params['med_kc']
|
85
|
+
ks = np.ones((features,), dtype=np.float16) * cell_params['med_ks']
|
86
|
+
elif sampling=='binning':
|
87
|
+
# Assume uniform joint distribution of rc and rs with discrete binning (deterministic)
|
88
|
+
assert int(np.sqrt(features//len(colors)))==np.sqrt(features//len(colors))
|
89
|
+
edges_rc = np.linspace(min_rc, max_rc, int(np.sqrt(features // len(colors))) + 1)
|
90
|
+
edges_rs = np.linspace(min_rs, max_rs, int(np.sqrt(features // len(colors))) + 1)
|
91
|
+
centers_rc = (edges_rc[:-1] + edges_rc[1:]) / 2
|
92
|
+
centers_rs = (edges_rs[:-1] + edges_rs[1:]) / 2
|
93
|
+
rc = np.repeat(centers_rc, int(np.sqrt(features // len(colors))))
|
94
|
+
rs = np.tile(centers_rs, int(np.sqrt(features // len(colors))))
|
95
|
+
elif sampling=='uniform':
|
96
|
+
# Assume uniform disjoint distribution of rc and rs without binning (stochastic)
|
97
|
+
rc = np.random.uniform(min_rc, max_rc, features // len(colors))
|
98
|
+
rs = np.random.uniform(min_rs, max_rs, features // len(colors))
|
99
|
+
elif sampling=='lognormal':
|
100
|
+
# Assume lognormal disjoint distribution of rc and rs (stochastic)
|
101
|
+
std_rc = (np.log(cell_params['med_rc'] - (cell_params['iqr_rc']/2)) - np.log(cell_params['med_rc'])) / stats.norm.ppf(.25)
|
102
|
+
std_rs = (np.log(cell_params['med_rs'] - (cell_params['iqr_rs']/2)) - np.log(cell_params['med_rs'])) / stats.norm.ppf(.25)
|
103
|
+
rc = np.random.lognormal(np.log(cell_params['med_rc']), std_rc, features // len(colors))
|
104
|
+
rs = np.random.lognormal(np.log(cell_params['med_rs']), std_rs, features // len(colors))
|
105
|
+
|
106
|
+
if sampling != 'median':
|
107
|
+
assert cell_type == 'p'
|
108
|
+
rc = np.tile(rc, len(colors))
|
109
|
+
rs = np.tile(rs, len(colors))
|
110
|
+
kc = cell_params['c_kc'] * rc ** cell_params['m_kc']
|
111
|
+
ks = cell_params['c_ks'] * rs ** cell_params['m_ks']
|
112
|
+
|
113
|
+
opponency = np.concatenate([
|
114
|
+
np.repeat(color_mapping[c][None, ...], features // len(colors), axis=0)
|
115
|
+
for c in colors
|
116
|
+
])
|
117
|
+
|
118
|
+
# Conversions
|
119
|
+
ppd = image_size / visual_degrees # pixels per FOV degree
|
120
|
+
rc = torch.from_numpy(rc * ppd)
|
121
|
+
rs = torch.from_numpy(rs * ppd)
|
122
|
+
kc = torch.from_numpy(kc / ppd ** 2)
|
123
|
+
ks = torch.from_numpy(ks / ppd ** 2)
|
124
|
+
opponency = torch.from_numpy(opponency)
|
125
|
+
|
126
|
+
opponency[:,1] *= torch.unsqueeze(ks[:]/kc[:], 1)
|
127
|
+
|
128
|
+
if polarity:
|
129
|
+
assert len(polarity) == opponency.size(0)
|
130
|
+
opponency *= torch.tensor(polarity)[..., None, None]
|
131
|
+
|
132
|
+
params = {
|
133
|
+
f'rc_{cell_type}_cell': rc,
|
134
|
+
f'rs_{cell_type}_cell': rs,
|
135
|
+
f'opponency_{cell_type}_cell': opponency,
|
136
|
+
f'kernel_{cell_type}_cell': kernel_size[cell_type]
|
137
|
+
}
|
138
|
+
|
139
|
+
return params
|
140
|
+
|
141
|
+
def get_div_norm_params(
|
142
|
+
relative_size_la, kernel_la=None,
|
143
|
+
image_size=image_size, visual_degrees=visual_degrees
|
144
|
+
) -> dict:
|
145
|
+
|
146
|
+
# Conversions
|
147
|
+
ppd = image_size / visual_degrees # pixels per FOV degree
|
148
|
+
radius_la = P_cell_params['med_rs'] * relative_size_la * ppd
|
149
|
+
radius_cn = 2 * P_cell_params['med_rc'] * ppd
|
150
|
+
c50 = .3
|
151
|
+
|
152
|
+
if not kernel_la and radius_la < np.inf:
|
153
|
+
kernel_la = int(radius_la*2) + int(int(radius_la*2)%2==0)
|
154
|
+
|
155
|
+
params = {
|
156
|
+
'kernel_la': kernel_la,
|
157
|
+
'radius_la': radius_la,
|
158
|
+
'kernel_cn': kernel_size['p'],
|
159
|
+
'radius_cn': radius_cn,
|
160
|
+
'c50': c50
|
161
|
+
}
|
162
|
+
|
163
|
+
return params
|
164
|
+
|
165
|
+
|
166
|
+
def get_grating_params(
|
167
|
+
sf, angle=0, phase=0, contrast=1, radius=.5,
|
168
|
+
image_size=image_size, visual_degrees=visual_degrees
|
169
|
+
) -> dict:
|
170
|
+
ppd = image_size / visual_degrees # pixels per FOV degree
|
171
|
+
params = {
|
172
|
+
'size': image_size,
|
173
|
+
'radius': radius * ppd,
|
174
|
+
'sf': sf/ppd,
|
175
|
+
'theta': angle,
|
176
|
+
'phase': phase,
|
177
|
+
'contrast': contrast
|
178
|
+
}
|
179
|
+
return params
|
180
|
+
|
181
|
+
|
182
|
+
def generate_gabor_param(
|
183
|
+
n_sc, n_cc, seed=0, rand_flag=False, sf_corr=0.75,
|
184
|
+
sf_max=11.5, sf_min=0, diff_n=False, dnstd=0.22,
|
185
|
+
# Additional parameters
|
186
|
+
in_channels=3, set_orientation=None
|
187
|
+
):
|
188
|
+
|
189
|
+
features = n_sc + n_cc
|
190
|
+
|
191
|
+
# Generates random sample
|
192
|
+
np.random.seed(seed)
|
193
|
+
|
194
|
+
phase_bins = np.array([0, 360])
|
195
|
+
phase_dist = np.array([1])
|
196
|
+
|
197
|
+
if rand_flag:
|
198
|
+
print('Uniform gabor parameters')
|
199
|
+
ori_bins = np.array([0, 180])
|
200
|
+
ori_dist = np.array([1])
|
201
|
+
|
202
|
+
nx_bins = np.array([0.1, 10**0])
|
203
|
+
nx_dist = np.array([1])
|
204
|
+
|
205
|
+
ny_bins = np.array([0.1, 10**0])
|
206
|
+
ny_dist = np.array([1])
|
207
|
+
|
208
|
+
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8, 11.2])
|
209
|
+
sf_s_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
|
210
|
+
sf_c_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
|
211
|
+
|
212
|
+
else:
|
213
|
+
print('Neuronal distributions gabor parameters')
|
214
|
+
# DeValois 1982a
|
215
|
+
ori_bins = np.array([-22.5, 22.5, 67.5, 112.5, 157.5])
|
216
|
+
ori_dist = np.array([66, 49, 77, 54])
|
217
|
+
ori_dist = ori_dist / ori_dist.sum()
|
218
|
+
|
219
|
+
# Ringach 2002b
|
220
|
+
nx_bins = np.logspace(-1, 0., 5, base=10)
|
221
|
+
ny_bins = np.logspace(-1, 0., 5, base=10)
|
222
|
+
n_joint_dist = np.array([[2., 0., 1., 0.],
|
223
|
+
[8., 9., 4., 1.],
|
224
|
+
[1., 2., 19., 17.],
|
225
|
+
[0., 0., 1., 7.]])
|
226
|
+
n_joint_dist = n_joint_dist / n_joint_dist.sum()
|
227
|
+
nx_dist = n_joint_dist.sum(axis=1)
|
228
|
+
nx_dist = nx_dist / nx_dist.sum()
|
229
|
+
ny_dist_marg = n_joint_dist / n_joint_dist.sum(axis=1, keepdims=True)
|
230
|
+
|
231
|
+
# DeValois 1982b
|
232
|
+
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8, 11.2])
|
233
|
+
# foveal only
|
234
|
+
sf_s_dist = np.array([4, 4, 8, 25, 33, 26, 28, 12, 8])
|
235
|
+
sf_c_dist = np.array([0, 0, 9, 9, 7, 10, 23, 12, 14])
|
236
|
+
|
237
|
+
phase = sample_dist(phase_dist, phase_bins, features)
|
238
|
+
|
239
|
+
if set_orientation or set_orientation == 0:
|
240
|
+
ori = np.ones((features,)) * set_orientation
|
241
|
+
else:
|
242
|
+
ori = sample_dist(ori_dist, ori_bins, features)
|
243
|
+
|
244
|
+
sfmax_ind = np.where(sf_bins <= sf_max)[0][-1]
|
245
|
+
sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
|
246
|
+
|
247
|
+
sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
|
248
|
+
sf_s_dist = sf_s_dist[sfmin_ind:sfmax_ind]
|
249
|
+
sf_c_dist = sf_c_dist[sfmin_ind:sfmax_ind]
|
250
|
+
|
251
|
+
sf_s_dist = sf_s_dist / sf_s_dist.sum()
|
252
|
+
sf_c_dist = sf_c_dist / sf_c_dist.sum()
|
253
|
+
|
254
|
+
cov_mat = np.array([[1, sf_corr], [sf_corr, 1]])
|
255
|
+
|
256
|
+
if rand_flag: # Uniform
|
257
|
+
samps = np.random.multivariate_normal([0, 0], cov_mat, features)
|
258
|
+
samps_cdf = stats.norm.cdf(samps)
|
259
|
+
|
260
|
+
nx = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
|
261
|
+
nx = 10**nx
|
262
|
+
|
263
|
+
if diff_n:
|
264
|
+
ny = sample_dist(ny_dist, ny_bins, features, scale='log10')
|
265
|
+
else:
|
266
|
+
ny = 10**(np.random.normal(np.log10(nx), dnstd))
|
267
|
+
ny[ny<0.1] = 0.1
|
268
|
+
ny[ny>1] = 1
|
269
|
+
# ny = nx
|
270
|
+
|
271
|
+
sf = np.interp(samps_cdf[:,1], np.hstack(([0], sf_s_dist.cumsum())), np.log2(sf_bins))
|
272
|
+
sf = 2**sf
|
273
|
+
|
274
|
+
else: # Biological
|
275
|
+
|
276
|
+
if n_sc > 0:
|
277
|
+
samps = np.random.multivariate_normal([0, 0], cov_mat, n_sc)
|
278
|
+
samps_cdf = stats.norm.cdf(samps)
|
279
|
+
|
280
|
+
nx_s = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
|
281
|
+
nx_s = 10**nx_s
|
282
|
+
|
283
|
+
ny_samp = np.random.rand(n_sc)
|
284
|
+
ny_s = np.zeros(n_sc)
|
285
|
+
for samp_ind, nx_samp in enumerate(nx_s):
|
286
|
+
bin_id = np.argwhere(nx_bins < nx_samp)[-1]
|
287
|
+
ny_s[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
|
288
|
+
np.log10(ny_bins))
|
289
|
+
ny_s = 10**ny_s
|
290
|
+
|
291
|
+
sf_s = np.interp(samps_cdf[:,1], np.hstack(([0], sf_s_dist.cumsum())), np.log2(sf_bins))
|
292
|
+
sf_s = 2**sf_s
|
293
|
+
else:
|
294
|
+
nx_s = np.array([])
|
295
|
+
ny_s = np.array([])
|
296
|
+
sf_s = np.array([])
|
297
|
+
|
298
|
+
if n_cc > 0:
|
299
|
+
samps = np.random.multivariate_normal([0, 0], cov_mat, n_cc)
|
300
|
+
samps_cdf = stats.norm.cdf(samps)
|
301
|
+
|
302
|
+
nx_c = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
|
303
|
+
nx_c = 10**nx_c
|
304
|
+
|
305
|
+
ny_samp = np.random.rand(n_cc)
|
306
|
+
ny_c = np.zeros(n_cc)
|
307
|
+
for samp_ind, nx_samp in enumerate(nx_c):
|
308
|
+
bin_id = np.argwhere(nx_bins < nx_samp)[-1]
|
309
|
+
ny_c[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
|
310
|
+
np.log10(ny_bins))
|
311
|
+
ny_c = 10**ny_c
|
312
|
+
|
313
|
+
sf_c = np.interp(samps_cdf[:,1], np.hstack(([0], sf_c_dist.cumsum())), np.log2(sf_bins))
|
314
|
+
sf_c = 2**sf_c
|
315
|
+
else:
|
316
|
+
nx_c = np.array([])
|
317
|
+
ny_c = np.array([])
|
318
|
+
sf_c = np.array([])
|
319
|
+
|
320
|
+
nx = np.concatenate((nx_s, nx_c))
|
321
|
+
ny = np.concatenate((ny_s, ny_c))
|
322
|
+
sf = np.concatenate((sf_s, sf_c))
|
323
|
+
|
324
|
+
color = np.random.randint(low=0, high=in_channels, size=features, dtype=np.int8)
|
325
|
+
|
326
|
+
return sf, ori, phase, nx, ny, color
|
@@ -0,0 +1,142 @@
|
|
1
|
+
import math
|
2
|
+
import torch
|
3
|
+
import numpy as np
|
4
|
+
import random
|
5
|
+
|
6
|
+
def gaussian_kernel(
|
7
|
+
sigma: float, k: float=1, size:float=15, norm:bool=False
|
8
|
+
) -> torch.tensor:
|
9
|
+
"""Returns a 2D Gaussian kernel.
|
10
|
+
|
11
|
+
:param sigma (float): standard deviation of the Gaussian
|
12
|
+
:param k (float, optional): height of the Gaussian
|
13
|
+
:param size (float, optional): kernel size
|
14
|
+
:param norm (bool, optional): whether no normalize the kernel
|
15
|
+
:return: gaussian kernel
|
16
|
+
"""
|
17
|
+
assert size % 2 == 1
|
18
|
+
w = size // 2
|
19
|
+
grid_val = torch.arange(-w, w+1, dtype=torch.float)
|
20
|
+
x, y = torch.meshgrid(grid_val, grid_val, indexing='ij')
|
21
|
+
gaussian = k * torch.exp(-(x**2 + y**2) / (2*(sigma)**2))
|
22
|
+
if norm: gaussian /= torch.abs(gaussian.sum())
|
23
|
+
return gaussian
|
24
|
+
|
25
|
+
|
26
|
+
def dog_kernel(
|
27
|
+
sigma_c: float, sigma_s: float, k_c: float, k_s: float,
|
28
|
+
polarity:int, size:int=21
|
29
|
+
) -> torch.tensor:
|
30
|
+
"""Returns a 2D Difference-of-Gaussians kernel.
|
31
|
+
|
32
|
+
:param sigma_c: standard deviation of the center Gaussian
|
33
|
+
:param sigma_s: standard deviation of the surround Gaussian
|
34
|
+
:param k_c: peak sensitivity of the center
|
35
|
+
:param k_s: peak sensitivity of the surround
|
36
|
+
:param polarity: polarity of the center Gaussian (+1 or -1)
|
37
|
+
:param size: kernel size
|
38
|
+
:return: difference-of-gaussians kernel
|
39
|
+
"""
|
40
|
+
assert size % 2 == 1
|
41
|
+
assert polarity in [-1 , 1]
|
42
|
+
center_gaussian = gaussian_kernel(sigma=sigma_c, k=k_c, size=size)
|
43
|
+
surround_gaussian = gaussian_kernel(sigma=sigma_s, k=k_s, size=size)
|
44
|
+
dog = polarity * (center_gaussian - surround_gaussian)
|
45
|
+
dog /= torch.sum(dog)
|
46
|
+
return dog
|
47
|
+
|
48
|
+
def circular_kernel(size:int, radius:float) -> torch.tensor:
|
49
|
+
"""Returns circular kernel.
|
50
|
+
|
51
|
+
:param size (int): kernel size
|
52
|
+
:param radius (float): radius of the circle
|
53
|
+
:return: circular kernel
|
54
|
+
"""
|
55
|
+
|
56
|
+
w = size // 2
|
57
|
+
grid_val = torch.arange(-w, w+1, dtype=torch.float)
|
58
|
+
x, y = torch.meshgrid(grid_val, grid_val, indexing='ij')
|
59
|
+
kernel = torch.zeros(y.shape)
|
60
|
+
kernel[torch.sqrt(x**2 + y**2) <= radius] = 1
|
61
|
+
kernel /= torch.sum(kernel)
|
62
|
+
return kernel
|
63
|
+
|
64
|
+
def gabor_kernel(
|
65
|
+
frequency:float, sigma_x:float, sigma_y:float,
|
66
|
+
theta:float=0, offset:float=0, ks:int=61
|
67
|
+
):
|
68
|
+
"""Returns gabor kernel.
|
69
|
+
|
70
|
+
:param frequency (float): spatial frequency of gabor
|
71
|
+
:param sigma_x (float): standard deviation in x direction
|
72
|
+
:param sigma_y (float): standard deviation in y direction
|
73
|
+
:param theta (int, optional): Angle theta. Defaults to 0.
|
74
|
+
:param offset (int, optional): Offset. Defaults to 0.
|
75
|
+
:param ks (int, optional): Kernel size. Defaults to 61.
|
76
|
+
:return: np.ndarray: 2-dimensional Gabor kernel
|
77
|
+
"""
|
78
|
+
w = ks // 2
|
79
|
+
grid_val = torch.arange(-w, w+1, dtype=torch.float)
|
80
|
+
x, y = torch.meshgrid(grid_val, grid_val)
|
81
|
+
rotx = x * np.cos(theta) + y * np.sin(theta)
|
82
|
+
roty = -x * np.sin(theta) + y * np.cos(theta)
|
83
|
+
g = torch.zeros(y.shape)
|
84
|
+
g[:] = torch.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
|
85
|
+
g /= 2 * np.pi * sigma_x * sigma_y
|
86
|
+
g *= torch.cos(2 * np.pi * frequency * rotx + offset)
|
87
|
+
return g
|
88
|
+
|
89
|
+
def generate_grating(
|
90
|
+
size:int, radius:float, sf:float, theta:float=0, phase:float=0,
|
91
|
+
contrast:float=1, gaussian_mask:bool=False
|
92
|
+
) -> torch.tensor:
|
93
|
+
"""Returns masked grating array.
|
94
|
+
|
95
|
+
:param size (int): kernel size
|
96
|
+
:param radius (float): standard deviation times sqrt(2) of the mask if gaussian_mask is True, and the radius if is false
|
97
|
+
:param sf (float): spatial frequency of the grating
|
98
|
+
:param theta (float, optional): angle of the grating
|
99
|
+
:param phase (float, optional): phase of the grating
|
100
|
+
:param gaussian_mask (bool, optional): mask is a Gaussian if true and a circle if false
|
101
|
+
:param contrast (float, optional): maximum contrast of the grating
|
102
|
+
:return: 2d masked grating array
|
103
|
+
"""
|
104
|
+
grid_val = torch.linspace(-size//2, size//2+1, size, dtype=torch.float)
|
105
|
+
X, Y = torch.meshgrid(grid_val, grid_val, indexing='ij')
|
106
|
+
grating = torch.sin(2*math.pi*sf*(X*math.cos(theta) + Y*math.sin(theta)) + phase) * contrast
|
107
|
+
mask = torch.exp(-((X**2 + Y**2)/(2*(radius/np.sqrt(2))**2))) if gaussian_mask else torch.sqrt(X**2 + Y**2) <= radius
|
108
|
+
return grating * mask * .5 + .5
|
109
|
+
|
110
|
+
|
111
|
+
def sample_dist(hist:np.array, bins:int, ns:float, scale:str='linear'):
|
112
|
+
"""Samples from distributions with different scales.
|
113
|
+
|
114
|
+
Args:
|
115
|
+
hist (np.array): histogram
|
116
|
+
bins (int): number of bins
|
117
|
+
ns (float): sample size
|
118
|
+
scale (str, optional): distribution scale. Defaults to 'linear'.
|
119
|
+
|
120
|
+
:returns rand_sample (np.array):
|
121
|
+
"""
|
122
|
+
rand_sample = np.random.rand(ns)
|
123
|
+
if scale == 'linear':
|
124
|
+
rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), bins)
|
125
|
+
elif scale == 'log2':
|
126
|
+
rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log2(bins))
|
127
|
+
rand_sample = 2**rand_sample
|
128
|
+
elif scale == 'log10':
|
129
|
+
rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log10(bins))
|
130
|
+
rand_sample = 10**rand_sample
|
131
|
+
return rand_sample
|
132
|
+
|
133
|
+
def set_seed(seed):
|
134
|
+
"""Enforces deterministic behaviour and sets RNG seed for numpy and pytorch.
|
135
|
+
|
136
|
+
:param seed (int): seed
|
137
|
+
"""
|
138
|
+
random.seed(seed)
|
139
|
+
torch.manual_seed(seed)
|
140
|
+
torch.cuda.manual_seed_all(seed)
|
141
|
+
torch.backends.cudnn.deterministic = True
|
142
|
+
|
@@ -0,0 +1,67 @@
|
|
1
|
+
import functools
|
2
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
3
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
4
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
5
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
6
|
+
import torch
|
7
|
+
from .evnet.evnet import EVNet, evnet_params
|
8
|
+
|
9
|
+
def get_model(name):
|
10
|
+
assert name == 'evresnet_50_4'
|
11
|
+
model = EVNet(
|
12
|
+
**evnet_params['evnet'], model_arch='resnet50', image_size=224,
|
13
|
+
gabor_seed=1, visual_degrees=7, num_classes=1000,
|
14
|
+
sf_max=11.5, k_exc=25, stride=4
|
15
|
+
)
|
16
|
+
weight_file = load_weight_file(
|
17
|
+
bucket="evnets-model-weights",
|
18
|
+
relative_path="evresnet_50_1.pth",
|
19
|
+
sha1="ca92ffc54171dd0eee1576b6ff375d6b02623aa7",
|
20
|
+
version_id="LuiUBSKSo8r0qVAi8UuaFCBVoQxszmWw"
|
21
|
+
)
|
22
|
+
model.to(torch.device('cpu'))
|
23
|
+
checkpoint = torch.load(weight_file, map_location=torch.device('cpu'))
|
24
|
+
model.load_state_dict(checkpoint['model'])
|
25
|
+
preprocessing = functools.partial(
|
26
|
+
load_preprocess_images,
|
27
|
+
image_size=224,
|
28
|
+
normalize_mean=(0,0,0),
|
29
|
+
normalize_std=(1,1,1)
|
30
|
+
)
|
31
|
+
wrapper = PytorchWrapper(
|
32
|
+
identifier='evresnet_50_4',
|
33
|
+
model=model, preprocessing=preprocessing
|
34
|
+
)
|
35
|
+
wrapper.image_size = 224
|
36
|
+
return wrapper
|
37
|
+
|
38
|
+
|
39
|
+
def get_layers(name):
|
40
|
+
assert name == 'evresnet_50_4'
|
41
|
+
layers = (
|
42
|
+
['voneblock'] +
|
43
|
+
['model.0.0', 'model.0.1', 'model.0.2'] +
|
44
|
+
['model.1.0', 'model.1.1', 'model.1.2', 'model.1.3'] +
|
45
|
+
['model.2.0', 'model.2.1', 'model.2.2', 'model.2.3',
|
46
|
+
'model.2.4', 'model.2.5'] +
|
47
|
+
['model.3.0', 'model.3.1', 'model.3.2'] +
|
48
|
+
['model.5']
|
49
|
+
)
|
50
|
+
return layers
|
51
|
+
|
52
|
+
|
53
|
+
def get_bibtex(model_identifier):
|
54
|
+
return """@misc{piper2024explicitlymodelingprecorticalvision,
|
55
|
+
title={Explicitly Modeling Pre-Cortical Vision with a Neuro-Inspired Front-End Improves CNN Robustness},
|
56
|
+
author={Lucas Piper and Arlindo L. Oliveira and Tiago Marques},
|
57
|
+
year={2024},
|
58
|
+
eprint={2409.16838},
|
59
|
+
archivePrefix={arXiv},
|
60
|
+
primaryClass={cs.CV},
|
61
|
+
url={https://arxiv.org/abs/2409.16838}
|
62
|
+
}"""
|
63
|
+
|
64
|
+
|
65
|
+
if __name__ == '__main__':
|
66
|
+
get_model('evresnet_50_4')
|
67
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,10 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry['evresnet_50_4_no_mapping'] = lambda: ModelCommitment(
|
6
|
+
identifier='evresnet_50_4_no_mapping',
|
7
|
+
activations_model=get_model('evresnet_50_4_no_mapping'),
|
8
|
+
layers=get_layers('evresnet_50_4_no_mapping'),
|
9
|
+
visual_degrees=7
|
10
|
+
)
|
@@ -0,0 +1,109 @@
|
|
1
|
+
import torch
|
2
|
+
from torch import nn
|
3
|
+
import torchvision
|
4
|
+
|
5
|
+
def get_resnet_backend(
|
6
|
+
num_classes:int,
|
7
|
+
p_channels:int,
|
8
|
+
m_channels:int,
|
9
|
+
layers:int=18,
|
10
|
+
with_voneblock:bool=False,
|
11
|
+
tiny:bool=False
|
12
|
+
):
|
13
|
+
"""Returns ResNet18 or ResNet50 backend.
|
14
|
+
|
15
|
+
:param num_classes (int): number of classes in the classifier
|
16
|
+
:param p_channels (int, optional): number of Midget/P cell channels
|
17
|
+
:param m_channels (int, optional): number of Parasol/M cell channels
|
18
|
+
:param layers (int, optional): number of architecture layers
|
19
|
+
:param with_voneblock (bool, optional): whether to remove the first block of the backend
|
20
|
+
:param tiny (bool, optional): whether to employ Tiny ImageNet adaptation (64px/2deg input)
|
21
|
+
:return: backend model, number of backend in channels
|
22
|
+
"""
|
23
|
+
assert layers in [18, 50]
|
24
|
+
backend = torchvision.models.resnet18() if layers == 18 else torchvision.models.resnet50()
|
25
|
+
backend.fc = nn.Linear(backend.fc.in_features, num_classes)
|
26
|
+
if with_voneblock:
|
27
|
+
# When using the VOneBlock, in channels are defined in the bottleneck
|
28
|
+
backend_in_channels = backend.layer1[0].conv1.in_channels
|
29
|
+
backend = nn.Sequential(
|
30
|
+
*list(backend.children())[4:-1], # Remove first block from ResNet-18
|
31
|
+
nn.Flatten(),
|
32
|
+
backend.fc
|
33
|
+
)
|
34
|
+
else:
|
35
|
+
backend_in_channels = 3
|
36
|
+
in_channels = p_channels + m_channels
|
37
|
+
if tiny: backend.conv1.stride = (1, 1)
|
38
|
+
conv1 = nn.Conv2d(
|
39
|
+
in_channels=in_channels,
|
40
|
+
out_channels=backend.conv1.out_channels,
|
41
|
+
kernel_size=backend.conv1.kernel_size,
|
42
|
+
stride=backend.conv1.stride,
|
43
|
+
padding=backend.conv1.padding,
|
44
|
+
bias=backend.conv1.bias
|
45
|
+
)
|
46
|
+
weight = torch.zeros_like(conv1.weight.data)
|
47
|
+
weight[:, :min(in_channels, backend_in_channels), :, :] =\
|
48
|
+
backend.conv1.weight.data[:, :min(in_channels, backend_in_channels), :, :]
|
49
|
+
if weight.size(1) > backend.conv1.weight.data.size(1):
|
50
|
+
nn.init.kaiming_normal_(weight[:, in_channels:, :, :], mode="fan_out", nonlinearity="relu")
|
51
|
+
conv1.weight.data = weight
|
52
|
+
backend.conv1 = conv1
|
53
|
+
|
54
|
+
return backend, backend_in_channels
|
55
|
+
|
56
|
+
|
57
|
+
def get_vgg_backend(
|
58
|
+
num_classes:int,
|
59
|
+
p_channels:int,
|
60
|
+
m_channels:int,
|
61
|
+
layers:int=16,
|
62
|
+
with_voneblock:bool=False,
|
63
|
+
tiny:bool=False
|
64
|
+
):
|
65
|
+
"""Returns VGG16 or VGG19 backend.
|
66
|
+
|
67
|
+
:param num_classes (int): number of classes in the classifier
|
68
|
+
:param p_channels (float, optional): number of Midget/P cell channels
|
69
|
+
:param m_channels (float, optional): number of Parasol/M cell channels
|
70
|
+
:param layers (int, optional): number of architecture layers
|
71
|
+
:param with_voneblock (bool, optional): whether to remove the first block of the backend
|
72
|
+
:param tiny (bool, optional): whether to employ Tiny ImageNet adaptation (64px/2deg input)
|
73
|
+
:return: backend model, number of backend in channels
|
74
|
+
"""
|
75
|
+
assert layers in [16, 19]
|
76
|
+
backend = torchvision.models.vgg16() if layers == 16 else torchvision.models.vgg19()
|
77
|
+
backend.classifier[-1] = nn.Linear(backend.classifier[-1].in_features, num_classes)
|
78
|
+
if tiny:
|
79
|
+
backend.features = nn.Sequential(*list(backend.features[:-1]))
|
80
|
+
backend.classifier[0] = nn.Linear(in_features=25088, out_features=2048, bias=True)
|
81
|
+
backend.classifier[3] = nn.Linear(in_features=2048, out_features=2048, bias=True)
|
82
|
+
backend.classifier[6] = nn.Linear(in_features=2048, out_features=200, bias=True)
|
83
|
+
if with_voneblock:
|
84
|
+
backend_in_channels = backend.features[2].in_channels
|
85
|
+
backend.features = nn.Sequential(
|
86
|
+
*list(backend.features[2:])
|
87
|
+
)
|
88
|
+
else:
|
89
|
+
backend_in_channels = 3
|
90
|
+
in_channels = p_channels + m_channels
|
91
|
+
backend.features[0] = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=True)
|
92
|
+
conv1 = nn.Conv2d(
|
93
|
+
in_channels=in_channels,
|
94
|
+
out_channels=backend.features[0].out_channels,
|
95
|
+
kernel_size=backend.features[0].kernel_size,
|
96
|
+
stride=backend.features[0].stride,
|
97
|
+
padding=backend.features[0].padding,
|
98
|
+
bias=True
|
99
|
+
)
|
100
|
+
weight = torch.zeros_like(conv1.weight.data)
|
101
|
+
weight[:, :min(in_channels, backend_in_channels), :, :] =\
|
102
|
+
backend.conv1.weight.data[:, :min(in_channels, backend_in_channels), :, :]
|
103
|
+
if weight.size(1) > backend.conv1.weight.data.size(1):
|
104
|
+
nn.init.kaiming_normal_(weight[:, in_channels:, :, :], mode="fan_out", nonlinearity="relu")
|
105
|
+
conv1.bias.data = backend.features[0].bias.data
|
106
|
+
conv1.weight.data = weight
|
107
|
+
backend.features[0] = conv1
|
108
|
+
|
109
|
+
return backend, backend_in_channels
|