brainscore-vision 2.2.3__py3-none-any.whl → 2.2.5__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/data/baker2022/__init__.py +10 -10
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/barbumayo2019/__init__.py +3 -3
- brainscore_vision/data/bashivankar2019/__init__.py +10 -10
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
- brainscore_vision/data/bmd2024/__init__.py +20 -20
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
- brainscore_vision/data/bracci2019/__init__.py +5 -5
- brainscore_vision/data/bracci2019/data_packaging.py +1 -1
- brainscore_vision/data/cadena2017/__init__.py +5 -5
- brainscore_vision/data/cichy2019/__init__.py +5 -5
- brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
- brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
- brainscore_vision/data/david2004/__init__.py +5 -5
- brainscore_vision/data/deng2009/__init__.py +3 -3
- brainscore_vision/data/ferguson2024/__init__.py +112 -112
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
- brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
- brainscore_vision/data/geirhos2021/__init__.py +85 -85
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
- brainscore_vision/data/hebart2023/__init__.py +5 -5
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
- brainscore_vision/data/hendrycks2019/__init__.py +12 -12
- brainscore_vision/data/igustibagus2024/__init__.py +5 -5
- brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
- brainscore_vision/data/islam2021/__init__.py +3 -3
- brainscore_vision/data/kar2018/__init__.py +7 -7
- brainscore_vision/data/kar2019/__init__.py +5 -5
- brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
- brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
- brainscore_vision/data/majajhong2015/__init__.py +23 -23
- brainscore_vision/data/malania2007/__init__.py +77 -77
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
- brainscore_vision/data/maniquet2024/__init__.py +11 -11
- brainscore_vision/data/marques2020/__init__.py +30 -30
- brainscore_vision/data/rajalingham2018/__init__.py +10 -10
- brainscore_vision/data/rajalingham2020/__init__.py +5 -5
- brainscore_vision/data/rust2012/__init__.py +7 -7
- brainscore_vision/data/sanghavi2020/__init__.py +19 -19
- brainscore_vision/data/scialom2024/__init__.py +110 -110
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
- brainscore_vision/data/seibert2019/__init__.py +2 -2
- brainscore_vision/data/zhang2018/__init__.py +5 -5
- brainscore_vision/data_helpers/s3.py +25 -6
- brainscore_vision/model_helpers/activations/pytorch.py +34 -12
- brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
- brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
- brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
- brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
- brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
- brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
- brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
- brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
- brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
- brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
- brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
- brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
- brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
- brainscore_vision/models/ReAlnet/__init__.py +64 -0
- brainscore_vision/models/ReAlnet/model.py +237 -0
- brainscore_vision/models/ReAlnet/requirements.txt +7 -0
- brainscore_vision/models/ReAlnet/test.py +0 -0
- brainscore_vision/models/ReAlnet/weights.json +26 -0
- brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
- brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
- brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
- brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
- brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
- brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
- brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
- brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
- brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
- brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
- brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
- brainscore_vision/models/VOneCORnet_S/model.py +25 -0
- brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
- brainscore_vision/models/VOneCORnet_S/test.py +8 -0
- brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
- brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
- brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
- brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
- brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
- brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
- brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
- brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
- brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
- brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
- brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
- brainscore_vision/models/antialiased-r50/__init__.py +7 -0
- brainscore_vision/models/antialiased-r50/model.py +62 -0
- brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
- brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
- brainscore_vision/models/antialiased-r50/test.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
- brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
- brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
- brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
- brainscore_vision/models/cornet_s/model.py +2 -2
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
- brainscore_vision/models/densenet_121/__init__.py +7 -0
- brainscore_vision/models/densenet_121/model.py +63 -0
- brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
- brainscore_vision/models/densenet_121/requirements.txt +1 -0
- brainscore_vision/models/densenet_121/test.py +8 -0
- brainscore_vision/models/densenet_169/__init__.py +7 -0
- brainscore_vision/models/densenet_169/model.py +63 -0
- brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
- brainscore_vision/models/densenet_169/requirements.txt +1 -0
- brainscore_vision/models/densenet_169/test.py +9 -0
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
- brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
- brainscore_vision/models/densenet_201/test.py +8 -0
- brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b0/model.py +45 -0
- brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
- brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
- brainscore_vision/models/efficientnet_b0/test.py +8 -0
- brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b7/model.py +61 -0
- brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
- brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/efficientnet_b7/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
- brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_1/model.py +62 -0
- brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/evresnet_50_1/test.py +8 -0
- brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4/model.py +67 -0
- brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4/test.py +8 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
- brainscore_vision/models/grcnn/__init__.py +7 -0
- brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
- brainscore_vision/models/grcnn/model.py +54 -0
- brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
- brainscore_vision/models/grcnn/requirements.txt +2 -0
- brainscore_vision/models/grcnn/test.py +9 -0
- brainscore_vision/models/grcnn_109/__init__.py +5 -0
- brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
- brainscore_vision/models/grcnn_109/model.py +53 -0
- brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
- brainscore_vision/models/grcnn_109/requirements.txt +2 -0
- brainscore_vision/models/grcnn_109/test.py +9 -0
- brainscore_vision/models/hmax/model.py +2 -2
- brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
- brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
- brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
- brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
- brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
- brainscore_vision/models/inception_v1/__init__.py +7 -0
- brainscore_vision/models/inception_v1/model.py +67 -0
- brainscore_vision/models/inception_v1/requirements.txt +1 -0
- brainscore_vision/models/inception_v1/test.py +8 -0
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
- brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
- brainscore_vision/models/inception_v3/test.py +8 -0
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
- brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
- brainscore_vision/models/inception_v4/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
- brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
- brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
- brainscore_vision/models/nasnet_large/__init__.py +7 -0
- brainscore_vision/models/nasnet_large/model.py +60 -0
- brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
- brainscore_vision/models/nasnet_large/test.py +8 -0
- brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
- brainscore_vision/models/nasnet_mobile/model.py +685 -0
- brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
- brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
- brainscore_vision/models/nasnet_mobile/test.py +8 -0
- brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinB/model.py +79 -0
- brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
- brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
- brainscore_vision/models/omnivore_swinB/test.py +9 -0
- brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinS/model.py +79 -0
- brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
- brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
- brainscore_vision/models/omnivore_swinS/test.py +9 -0
- brainscore_vision/models/pnasnet_large/__init__.py +7 -0
- brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
- brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
- brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large/test.py +8 -0
- brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN/model.py +63 -0
- brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
- brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
- brainscore_vision/models/resnet50_SIN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
- brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
- brainscore_vision/models/resnet50_barlow/model.py +53 -0
- brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
- brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
- brainscore_vision/models/resnet50_barlow/test.py +9 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
- brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
- brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
- brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
- brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
- brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
- brainscore_vision/models/resnet50_sup/__init__.py +5 -0
- brainscore_vision/models/resnet50_sup/model.py +55 -0
- brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
- brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
- brainscore_vision/models/resnet50_sup/test.py +8 -0
- brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
- brainscore_vision/models/resnet50_vicreg/model.py +62 -0
- brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
- brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
- brainscore_vision/models/resnet50_vicreg/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
- brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
- brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
- brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_101_v1/model.py +42 -0
- brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
- brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_101_v1/test.py +8 -0
- brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_101_v2/model.py +33 -0
- brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
- brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_101_v2/test.py +8 -0
- brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_152_v1/model.py +42 -0
- brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
- brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_152_v1/test.py +8 -0
- brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
- brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
- brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
- brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2/test.py +8 -0
- brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
- brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
- brainscore_vision/models/resnet_18_test_m/model.py +80 -0
- brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
- brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
- brainscore_vision/models/resnet_18_test_m/test.py +8 -0
- brainscore_vision/models/resnet_50_2/__init__.py +9 -0
- brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
- brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
- brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
- brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
- brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
- brainscore_vision/models/resnet_50_2/model.py +46 -0
- brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
- brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
- brainscore_vision/models/resnet_50_2/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/model.py +2 -2
- brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
- brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_50_v1/model.py +42 -0
- brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
- brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_50_v1/test.py +8 -0
- brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_50_v2/model.py +33 -0
- brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
- brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_50_v2/test.py +8 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
- brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
- brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
- brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
- brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
- brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
- brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
- brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
- brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
- brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
- brainscore_vision/models/timm_models/__init__.py +193 -0
- brainscore_vision/models/timm_models/model.py +90 -0
- brainscore_vision/models/timm_models/model_configs.json +464 -0
- brainscore_vision/models/timm_models/requirements.txt +3 -0
- brainscore_vision/models/timm_models/test.py +0 -0
- brainscore_vision/models/vgg_16/__init__.py +7 -0
- brainscore_vision/models/vgg_16/model.py +52 -0
- brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
- brainscore_vision/models/vgg_16/requirements.txt +1 -0
- brainscore_vision/models/vgg_16/test.py +8 -0
- brainscore_vision/models/vgg_19/__init__.py +7 -0
- brainscore_vision/models/vgg_19/model.py +52 -0
- brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
- brainscore_vision/models/vgg_19/requirements.txt +1 -0
- brainscore_vision/models/vgg_19/test.py +8 -0
- brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
- brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
- brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
- brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
- brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
- brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
- brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
- brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
- brainscore_vision/models/voneresnet_50/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50/model.py +37 -0
- brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
- brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50/test.py +8 -0
- brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_1/model.py +68 -0
- brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/voneresnet_50_1/test.py +7 -0
- brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_3/model.py +66 -0
- brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_3/test.py +7 -0
- brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
- brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
- brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
- brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
- brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
- brainscore_vision/models/xception/__init__.py +7 -0
- brainscore_vision/models/xception/model.py +64 -0
- brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
- brainscore_vision/models/xception/requirements.txt +2 -0
- brainscore_vision/models/xception/test.py +8 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
- brainscore_vision/submission/actions_helpers.py +2 -3
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/METADATA +6 -6
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/RECORD +714 -130
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/WHEEL +1 -1
- docs/source/index.rst +1 -0
- docs/source/modules/submission.rst +1 -1
- docs/source/modules/version_bumping.rst +43 -0
- tests/test_submission/test_actions_helpers.py +2 -6
- brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
- brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
- /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
- /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
- /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
- /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,237 @@
|
|
1
|
+
import math
|
2
|
+
from collections import OrderedDict
|
3
|
+
import torch
|
4
|
+
from torch import nn
|
5
|
+
from torchvision import transforms
|
6
|
+
import torch.utils.model_zoo
|
7
|
+
import os
|
8
|
+
from torch.utils.data import Dataset, DataLoader
|
9
|
+
import pandas as pd
|
10
|
+
import numpy as np
|
11
|
+
import torch.nn.functional as F
|
12
|
+
import h5py
|
13
|
+
import random
|
14
|
+
import functools
|
15
|
+
import torchvision.models
|
16
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
17
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
18
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
19
|
+
import json
|
20
|
+
|
21
|
+
LAYERS = ['V1', 'V2', 'V4', 'IT', 'decoder.avgpool']
|
22
|
+
|
23
|
+
class Flatten(nn.Module):
|
24
|
+
"""
|
25
|
+
Helper module for flattening input tensor to 1-D for the use in Linear modules
|
26
|
+
"""
|
27
|
+
def forward(self, x):
|
28
|
+
return x.view(x.size(0), -1)
|
29
|
+
|
30
|
+
class Identity(nn.Module):
|
31
|
+
"""
|
32
|
+
Helper module that stores the current tensor. Useful for accessing by name
|
33
|
+
"""
|
34
|
+
def forward(self, x):
|
35
|
+
return x
|
36
|
+
|
37
|
+
class CORblock_S(nn.Module):
|
38
|
+
scale = 4 # scale of the bottleneck convolution channels
|
39
|
+
|
40
|
+
def __init__(self, in_channels, out_channels, times=1):
|
41
|
+
super().__init__()
|
42
|
+
self.times = times
|
43
|
+
|
44
|
+
self.conv_input = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
|
45
|
+
self.skip = nn.Conv2d(out_channels, out_channels,
|
46
|
+
kernel_size=1, stride=2, bias=False)
|
47
|
+
self.norm_skip = nn.BatchNorm2d(out_channels)
|
48
|
+
|
49
|
+
self.conv1 = nn.Conv2d(out_channels, out_channels * self.scale,
|
50
|
+
kernel_size=1, bias=False)
|
51
|
+
self.nonlin1 = nn.ReLU(inplace=True)
|
52
|
+
|
53
|
+
self.conv2 = nn.Conv2d(out_channels * self.scale, out_channels * self.scale,
|
54
|
+
kernel_size=3, stride=2, padding=1, bias=False)
|
55
|
+
self.nonlin2 = nn.ReLU(inplace=True)
|
56
|
+
|
57
|
+
self.conv3 = nn.Conv2d(out_channels * self.scale, out_channels,
|
58
|
+
kernel_size=1, bias=False)
|
59
|
+
self.nonlin3 = nn.ReLU(inplace=True)
|
60
|
+
|
61
|
+
self.output = Identity() # for an easy access to this block's output
|
62
|
+
|
63
|
+
# need BatchNorm for each time step for training to work well
|
64
|
+
for t in range(self.times):
|
65
|
+
setattr(self, f'norm1_{t}', nn.BatchNorm2d(out_channels * self.scale))
|
66
|
+
setattr(self, f'norm2_{t}', nn.BatchNorm2d(out_channels * self.scale))
|
67
|
+
setattr(self, f'norm3_{t}', nn.BatchNorm2d(out_channels))
|
68
|
+
|
69
|
+
def forward(self, inp):
|
70
|
+
x = self.conv_input(inp)
|
71
|
+
for t in range(self.times):
|
72
|
+
if t == 0:
|
73
|
+
skip = self.norm_skip(self.skip(x))
|
74
|
+
self.conv2.stride = (2, 2)
|
75
|
+
else:
|
76
|
+
skip = x
|
77
|
+
self.conv2.stride = (1, 1)
|
78
|
+
|
79
|
+
x = self.conv1(x)
|
80
|
+
x = getattr(self, f'norm1_{t}')(x)
|
81
|
+
x = self.nonlin1(x)
|
82
|
+
|
83
|
+
x = self.conv2(x)
|
84
|
+
x = getattr(self, f'norm2_{t}')(x)
|
85
|
+
x = self.nonlin2(x)
|
86
|
+
|
87
|
+
x = self.conv3(x)
|
88
|
+
x = getattr(self, f'norm3_{t}')(x)
|
89
|
+
|
90
|
+
x += skip
|
91
|
+
x = self.nonlin3(x)
|
92
|
+
output = self.output(x)
|
93
|
+
|
94
|
+
return output
|
95
|
+
|
96
|
+
def CORnet_S():
|
97
|
+
model = nn.Sequential(OrderedDict([
|
98
|
+
('V1', nn.Sequential(OrderedDict([
|
99
|
+
('conv1', nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)),
|
100
|
+
('norm1', nn.BatchNorm2d(64)),
|
101
|
+
('nonlin1', nn.ReLU(inplace=True)),
|
102
|
+
('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
|
103
|
+
('conv2', nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)),
|
104
|
+
('norm2', nn.BatchNorm2d(64)),
|
105
|
+
('nonlin2', nn.ReLU(inplace=True)),
|
106
|
+
('output', Identity())
|
107
|
+
]))),
|
108
|
+
('V2', CORblock_S(64, 128, times=2)),
|
109
|
+
('V4', CORblock_S(128, 256, times=4)),
|
110
|
+
('IT', CORblock_S(256, 512, times=2)),
|
111
|
+
('decoder', nn.Sequential(OrderedDict([
|
112
|
+
('avgpool', nn.AdaptiveAvgPool2d(1)),
|
113
|
+
('flatten', Flatten()),
|
114
|
+
('linear', nn.Linear(512, 1000)),
|
115
|
+
('output', Identity())
|
116
|
+
])))
|
117
|
+
]))
|
118
|
+
|
119
|
+
# weight initialization
|
120
|
+
for m in model.modules():
|
121
|
+
if isinstance(m, nn.Conv2d):
|
122
|
+
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
123
|
+
m.weight.data.normal_(0, math.sqrt(2. / n))
|
124
|
+
elif isinstance(m, nn.BatchNorm2d):
|
125
|
+
m.weight.data.fill_(1)
|
126
|
+
m.bias.data.zero_()
|
127
|
+
|
128
|
+
return model
|
129
|
+
|
130
|
+
class Encoder(nn.Module):
|
131
|
+
def __init__(self, realnet, n_output):
|
132
|
+
super(Encoder, self).__init__()
|
133
|
+
|
134
|
+
# CORnet
|
135
|
+
self.realnet = realnet
|
136
|
+
|
137
|
+
# fully connected layers
|
138
|
+
self.fc_v1 = nn.Linear(200704, 128)
|
139
|
+
self.fc_v2 = nn.Linear(100352, 128)
|
140
|
+
self.fc_v4 = nn.Linear(50176, 128)
|
141
|
+
self.fc_it = nn.Linear(25088, 128)
|
142
|
+
self.fc = nn.Linear(512, n_output)
|
143
|
+
self.activation = nn.ReLU()
|
144
|
+
|
145
|
+
def forward(self, imgs):
|
146
|
+
# forward pass through CORnet_S
|
147
|
+
outputs = self.realnet(imgs)
|
148
|
+
|
149
|
+
N = len(imgs)
|
150
|
+
v1_outputs = self.realnet.V1(imgs) # N * 64 * 56 * 56
|
151
|
+
v2_outputs = self.realnet.V2(v1_outputs) # N * 128 * 28 * 28
|
152
|
+
v4_outputs = self.realnet.V4(v2_outputs) # N * 256 * 14 * 14
|
153
|
+
it_outputs = self.realnet.IT(v4_outputs) # N * 512 * 7 * 7
|
154
|
+
|
155
|
+
# flatten and pass through fully connected layers
|
156
|
+
v1_features = self.fc_v1(v1_outputs.view(N, -1))
|
157
|
+
v1_features = self.activation(v1_features)
|
158
|
+
|
159
|
+
v2_features = self.fc_v2(v2_outputs.view(N, -1))
|
160
|
+
v2_features = self.activation(v2_features)
|
161
|
+
|
162
|
+
v4_features = self.fc_v4(v4_outputs.view(N, -1))
|
163
|
+
v4_features = self.activation(v4_features)
|
164
|
+
|
165
|
+
it_features = self.fc_it(it_outputs.view(N, -1))
|
166
|
+
it_features = self.activation(it_features)
|
167
|
+
|
168
|
+
features = torch.cat((v1_features, v2_features, v4_features, it_features), dim=1)
|
169
|
+
features = self.fc(features)
|
170
|
+
|
171
|
+
return outputs, features
|
172
|
+
|
173
|
+
# Change here: use 'cpu'
|
174
|
+
device = 'cpu'
|
175
|
+
|
176
|
+
transform = transforms.Compose([
|
177
|
+
transforms.Resize((224, 224)),
|
178
|
+
transforms.ToTensor(),
|
179
|
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
180
|
+
])
|
181
|
+
|
182
|
+
# Construct CORnet_S
|
183
|
+
realnet = CORnet_S()
|
184
|
+
# (Optional) remove DataParallel if not needed for CPU
|
185
|
+
# realnet = torch.nn.DataParallel(realnet)
|
186
|
+
|
187
|
+
def load_config(json_file):
|
188
|
+
# Get the directory containing this script (model.py)
|
189
|
+
base_dir = os.path.dirname(__file__)
|
190
|
+
|
191
|
+
# Construct the path to the JSON file
|
192
|
+
json_path = os.path.join(base_dir, json_file)
|
193
|
+
|
194
|
+
# Read the JSON
|
195
|
+
with open(json_path, "r", encoding="utf-8") as f:
|
196
|
+
data = json.load(f)
|
197
|
+
return data
|
198
|
+
|
199
|
+
|
200
|
+
# Build encoder model
|
201
|
+
encoder = Encoder(realnet, 340)
|
202
|
+
def model_load_weights(identifier: str):
|
203
|
+
# Download weights (Brain-Score team modification)
|
204
|
+
# Read the version id and sha1 from json file called "weights.json"
|
205
|
+
weights_info = load_config("weights.json")
|
206
|
+
|
207
|
+
version_id = weights_info['version_ids'][identifier]
|
208
|
+
sha1 = weights_info['sha1s'][identifier]
|
209
|
+
|
210
|
+
weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
|
211
|
+
relative_path=f"ReAlnet/{identifier}_best_model_params.pt",
|
212
|
+
version_id=version_id,
|
213
|
+
sha1=sha1)
|
214
|
+
|
215
|
+
# Load weights onto CPU and remove "module." from keys
|
216
|
+
weights = torch.load(weights_path, map_location='cpu')
|
217
|
+
new_state_dict = {}
|
218
|
+
for key, val in weights.items():
|
219
|
+
# remove "module." (if it exists) from the key
|
220
|
+
new_key = key.replace("module.", "")
|
221
|
+
new_state_dict[new_key] = val
|
222
|
+
|
223
|
+
encoder.load_state_dict(new_state_dict)
|
224
|
+
|
225
|
+
# Retrieve the realnet portion from the encoder
|
226
|
+
realnet = encoder.realnet
|
227
|
+
realnet.eval()
|
228
|
+
return realnet
|
229
|
+
|
230
|
+
def get_model(identifier: str):
|
231
|
+
model = model_load_weights(identifier)
|
232
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
233
|
+
wrapper = PytorchWrapper(identifier=identifier, model=model, preprocessing=preprocessing)
|
234
|
+
wrapper.image_size = 224
|
235
|
+
return wrapper
|
236
|
+
|
237
|
+
# if __name__ == "__main__":
|
File without changes
|
@@ -0,0 +1,26 @@
|
|
1
|
+
{
|
2
|
+
"version_ids": {
|
3
|
+
"ReAlnet01": "75oY3CnI17U5S1f_yrZxl1XGhRfJEG9N",
|
4
|
+
"ReAlnet02": "TfGdm1CphJJ1vvkJGcm3n266PHvTuOaV",
|
5
|
+
"ReAlnet03": "dmohrH_AHZzgL_o8Xd2SDp6XCnjPOdAu",
|
6
|
+
"ReAlnet04": "45qJFXHihmIHdpHbjKWZco6STH1eh49p",
|
7
|
+
"ReAlnet05": "nqvoYgiBTyWSskjnpF9YOK4yYQfOnc_H",
|
8
|
+
"ReAlnet06": "6.cloFvnMihiicwQ0jkag8reEe4bVlxZ",
|
9
|
+
"ReAlnet07": "WKJaiN4b1ttpbGYNn8yVjng4LjCqWdk.",
|
10
|
+
"ReAlnet08": "vmouew6ePkPnKP.We8VnVxU7TifuhL.x",
|
11
|
+
"ReAlnet09": "53gqQ2tgS.5MEoncipy9mrBEqCc5izw5",
|
12
|
+
"ReAlnet10": "ZZFMhTm9KQYEXl8OGwKmnTr0S.pxkU0J"
|
13
|
+
},
|
14
|
+
"sha1s": {
|
15
|
+
"ReAlnet01": "05e4e401e8734b97e561aad306fc584b7e027225",
|
16
|
+
"ReAlnet02": "e85769fadb3c09ff88a7d73b01451b6bcccefd77",
|
17
|
+
"ReAlnet03": "f32d01d73380374ae501a1504e9c8cd219e9f0bf",
|
18
|
+
"ReAlnet04": "8062373fd6a74c52360420619235590d3688b4df",
|
19
|
+
"ReAlnet05": "88ca110f6b6d225b7b4e7dca02d2e7a906f5a8ed",
|
20
|
+
"ReAlnet06": "a1658c15a3c9d61262f87349c9fb7aa63854ac5b",
|
21
|
+
"ReAlnet07": "6a1c260839c75f6e6c018e06830562cdcda877e5",
|
22
|
+
"ReAlnet08": "1772211b27dd3a7d9255ac59d5f9b7e7cb6c3314",
|
23
|
+
"ReAlnet09": "159d96f0433a87c7063259dac4527325a3c7b79a",
|
24
|
+
"ReAlnet10": "dbdeaee9280267613ebce92dd5d515d89b544352"
|
25
|
+
}
|
26
|
+
}
|
@@ -0,0 +1,46 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from .helpers.helpers import CORnetCommitment, _build_time_mappings
|
3
|
+
from .model import get_model, TIME_MAPPINGS, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
model_registry['ReAlnet01_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet01_cornet', activations_model=get_model('ReAlnet01_cornet'),
|
7
|
+
layers=get_layers('ReAlnet01_cornet'),
|
8
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
9
|
+
|
10
|
+
|
11
|
+
model_registry['ReAlnet02_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet02_cornet', activations_model=get_model('ReAlnet02_cornet'),
|
12
|
+
layers=get_layers('ReAlnet02_cornet'),
|
13
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
14
|
+
|
15
|
+
model_registry['ReAlnet03_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet03_cornet', activations_model=get_model('ReAlnet03_cornet'),
|
16
|
+
layers = get_layers('ReAlnet03_cornet'),
|
17
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
18
|
+
|
19
|
+
model_registry['ReAlnet04_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet04_cornet', activations_model=get_model('ReAlnet04_cornet'),
|
20
|
+
layers = get_layers('ReAlnet04_cornet'),
|
21
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
22
|
+
|
23
|
+
model_registry['ReAlnet05_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet05_cornet', activations_model=get_model('ReAlnet05_cornet'),
|
24
|
+
layers = get_layers('ReAlnet05_cornet'),
|
25
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
26
|
+
|
27
|
+
model_registry['ReAlnet06_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet06_cornet', activations_model=get_model('ReAlnet06_cornet'),
|
28
|
+
layers = get_layers('ReAlnet06_cornet'),
|
29
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
30
|
+
|
31
|
+
model_registry['ReAlnet07_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet07_cornet', activations_model=get_model('ReAlnet07_cornet'),
|
32
|
+
layers = get_layers('ReAlnet07_cornet'),
|
33
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
34
|
+
|
35
|
+
model_registry['ReAlnet08_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet08_cornet', activations_model=get_model('ReAlnet08_cornet'),
|
36
|
+
layers = get_layers('ReAlnet08_cornet'),
|
37
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
38
|
+
|
39
|
+
model_registry['ReAlnet09_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet09_cornet', activations_model=get_model('ReAlnet09_cornet'),
|
40
|
+
layers = get_layers('ReAlnet09_cornet'),
|
41
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
42
|
+
|
43
|
+
model_registry['ReAlnet10_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet10_cornet', activations_model=get_model('ReAlnet10_cornet'),
|
44
|
+
layers = get_layers('ReAlnet10_cornet'),
|
45
|
+
time_mapping=_build_time_mappings(TIME_MAPPINGS))
|
46
|
+
|
@@ -0,0 +1,215 @@
|
|
1
|
+
import re
|
2
|
+
from collections import defaultdict
|
3
|
+
from typing import Dict, Tuple
|
4
|
+
|
5
|
+
import numpy as np
|
6
|
+
from tqdm import tqdm
|
7
|
+
|
8
|
+
from brainio.assemblies import merge_data_arrays, NeuroidAssembly, walk_coords
|
9
|
+
from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper
|
10
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
11
|
+
from brainscore_vision.model_helpers.brain_transformation.behavior import BehaviorArbiter, LogitsBehavior, \
|
12
|
+
ProbabilitiesMapping, OddOneOut
|
13
|
+
from brainscore_vision.model_interface import BrainModel
|
14
|
+
from result_caching import store
|
15
|
+
|
16
|
+
|
17
|
+
class TemporalPytorchWrapper(PytorchWrapper):
|
18
|
+
def __init__(self, *args, separate_time=True, **kwargs):
|
19
|
+
self._separate_time = separate_time
|
20
|
+
super(TemporalPytorchWrapper, self).__init__(*args, **kwargs)
|
21
|
+
|
22
|
+
def _build_extractor(self, *args, **kwargs):
|
23
|
+
if self._separate_time:
|
24
|
+
return TemporalExtractor(*args, **kwargs)
|
25
|
+
else:
|
26
|
+
return super(TemporalPytorchWrapper, self)._build_extractor(*args, **kwargs)
|
27
|
+
|
28
|
+
def get_activations(self, images, layer_names):
|
29
|
+
# reset
|
30
|
+
self._layer_counter = defaultdict(lambda: 0)
|
31
|
+
self._layer_hooks = {}
|
32
|
+
return super(TemporalPytorchWrapper, self).get_activations(images=images, layer_names=layer_names)
|
33
|
+
|
34
|
+
def register_hook(self, layer, layer_name, target_dict):
|
35
|
+
layer_name = self._strip_layer_timestep(layer_name)
|
36
|
+
if layer_name in self._layer_hooks: # add hook only once for multiple timesteps
|
37
|
+
return self._layer_hooks[layer_name]
|
38
|
+
|
39
|
+
def hook_function(_layer, _input, output):
|
40
|
+
target_dict[f"{layer_name}-t{self._layer_counter[layer_name]}"] = PytorchWrapper._tensor_to_numpy(output)
|
41
|
+
self._layer_counter[layer_name] += 1
|
42
|
+
|
43
|
+
hook = layer.register_forward_hook(hook_function)
|
44
|
+
self._layer_hooks[layer_name] = hook
|
45
|
+
return hook
|
46
|
+
|
47
|
+
def get_layer(self, layer_name):
|
48
|
+
layer_name = self._strip_layer_timestep(layer_name)
|
49
|
+
return super(TemporalPytorchWrapper, self).get_layer(layer_name)
|
50
|
+
|
51
|
+
def _strip_layer_timestep(self, layer_name):
|
52
|
+
match = re.search('-t[0-9]+$', layer_name)
|
53
|
+
if match:
|
54
|
+
layer_name = layer_name[:match.start()]
|
55
|
+
return layer_name
|
56
|
+
|
57
|
+
|
58
|
+
class CORnetCommitment(BrainModel):
|
59
|
+
"""
|
60
|
+
CORnet commitment where only the model interface is implemented and behavioral readouts are attached.
|
61
|
+
Importantly, layer-region commitments do not occur due to the anatomical pre-mapping.
|
62
|
+
Further, due to the temporal component of the model, requested time-bins are matched to the nearest committed
|
63
|
+
time-bin for the model.
|
64
|
+
"""
|
65
|
+
|
66
|
+
def __init__(self, identifier, activations_model, layers,
|
67
|
+
time_mapping: Dict[str, Dict[int, Tuple[int, int]]], behavioral_readout_layer=None,
|
68
|
+
visual_degrees=8):
|
69
|
+
"""
|
70
|
+
:param time_mapping: mapping from region -> {model_timestep -> (time_bin_start, time_bin_end)}
|
71
|
+
"""
|
72
|
+
self.layers = layers
|
73
|
+
self.activations_model = activations_model
|
74
|
+
self.time_mapping = time_mapping
|
75
|
+
self.recording_layers = None
|
76
|
+
self.recording_time_bins = None
|
77
|
+
self._identifier = identifier
|
78
|
+
|
79
|
+
logits_behavior = LogitsBehavior(
|
80
|
+
identifier=identifier, activations_model=TemporalIgnore(activations_model))
|
81
|
+
behavioral_readout_layer = behavioral_readout_layer or layers[-1]
|
82
|
+
probabilities_behavior = ProbabilitiesMapping(
|
83
|
+
identifier=identifier, activations_model=TemporalIgnore(activations_model), layer=behavioral_readout_layer)
|
84
|
+
odd_one_out = OddOneOut(identifier=identifier, activations_model=TemporalIgnore(activations_model),
|
85
|
+
layer=behavioral_readout_layer)
|
86
|
+
self.behavior_model = BehaviorArbiter({BrainModel.Task.label: logits_behavior,
|
87
|
+
BrainModel.Task.probabilities: probabilities_behavior,
|
88
|
+
BrainModel.Task.odd_one_out: odd_one_out,
|
89
|
+
})
|
90
|
+
self.do_behavior = False
|
91
|
+
|
92
|
+
self._visual_degrees = visual_degrees
|
93
|
+
|
94
|
+
@property
|
95
|
+
def identifier(self):
|
96
|
+
return self._identifier
|
97
|
+
|
98
|
+
def visual_degrees(self) -> int:
|
99
|
+
return self._visual_degrees
|
100
|
+
|
101
|
+
def start_recording(self, recording_target, time_bins):
|
102
|
+
self.recording_layers = [layer for layer in self.layers if layer.startswith(recording_target)]
|
103
|
+
self.recording_time_bins = time_bins
|
104
|
+
|
105
|
+
def start_task(self, task: BrainModel.Task, *args, **kwargs):
|
106
|
+
if task != BrainModel.Task.passive:
|
107
|
+
self.behavior_model.start_task(task, *args, **kwargs)
|
108
|
+
self.do_behavior = True
|
109
|
+
|
110
|
+
def look_at(self, stimuli, number_of_trials: int = 1, require_variance: bool = False):
|
111
|
+
if self.do_behavior:
|
112
|
+
return self.behavior_model.look_at(stimuli,
|
113
|
+
number_of_trials=number_of_trials, require_variance=require_variance)
|
114
|
+
else:
|
115
|
+
# cache, since piecing times together is not too fast unfortunately
|
116
|
+
return self.look_at_cached(self.identifier, stimuli.identifier, stimuli,
|
117
|
+
number_of_trials=number_of_trials, require_variance=require_variance)
|
118
|
+
|
119
|
+
@store(identifier_ignore=['stimuli', 'number_of_trials', 'require_variance'])
|
120
|
+
def look_at_cached(self, model_identifier, stimuli_identifier, stimuli,
|
121
|
+
number_of_trials, require_variance):
|
122
|
+
responses = self.activations_model(stimuli, layers=self.recording_layers,
|
123
|
+
number_of_trials=number_of_trials, require_variance=require_variance)
|
124
|
+
# map time
|
125
|
+
regions = set(responses['region'].values)
|
126
|
+
if len(regions) > 1:
|
127
|
+
raise NotImplementedError("cannot handle more than one simultaneous region")
|
128
|
+
region = list(regions)[0]
|
129
|
+
time_bins = [self.time_mapping[region][timestep] if timestep in self.time_mapping[region] else (None, None)
|
130
|
+
for timestep in responses['time_step'].values]
|
131
|
+
responses['time_bin_start'] = 'time_step', [time_bin[0] for time_bin in time_bins]
|
132
|
+
responses['time_bin_end'] = 'time_step', [time_bin[1] for time_bin in time_bins]
|
133
|
+
responses = NeuroidAssembly(responses.rename({'time_step': 'time_bin'}))
|
134
|
+
responses = responses[{'time_bin': [not np.isnan(time_start) for time_start in responses['time_bin_start']]}]
|
135
|
+
# select time
|
136
|
+
time_responses = []
|
137
|
+
for time_bin in tqdm(self.recording_time_bins, desc='CORnet-time to recording time'):
|
138
|
+
time_bin = time_bin if not isinstance(time_bin, np.ndarray) else time_bin.tolist()
|
139
|
+
time_bin_start, time_bin_end = time_bin
|
140
|
+
nearest_start = find_nearest(responses['time_bin_start'].values, time_bin_start)
|
141
|
+
bin_responses = responses.sel(time_bin_start=nearest_start)
|
142
|
+
bin_responses = NeuroidAssembly(bin_responses.values, coords={
|
143
|
+
**{coord: (dims, values) for coord, dims, values in walk_coords(bin_responses)
|
144
|
+
if coord not in ['time_bin_level_0', 'time_bin_end']},
|
145
|
+
**{'time_bin_start': ('time_bin', [time_bin_start]),
|
146
|
+
'time_bin_end': ('time_bin', [time_bin_end])}
|
147
|
+
}, dims=bin_responses.dims)
|
148
|
+
time_responses.append(bin_responses)
|
149
|
+
responses = merge_data_arrays(time_responses)
|
150
|
+
return responses
|
151
|
+
|
152
|
+
|
153
|
+
def find_nearest(array, value):
|
154
|
+
array = np.asarray(array)
|
155
|
+
idx = (np.abs(array - value)).argmin()
|
156
|
+
return array[idx]
|
157
|
+
|
158
|
+
|
159
|
+
class TemporalIgnore:
|
160
|
+
"""
|
161
|
+
Wrapper around a activations model that squeezes out the temporal axis.
|
162
|
+
Useful when there is only one time step and the behavioral readout does not know what to do with time.
|
163
|
+
"""
|
164
|
+
|
165
|
+
def __init__(self, temporal_activations_model):
|
166
|
+
self._activations_model = temporal_activations_model
|
167
|
+
|
168
|
+
def __call__(self, *args, **kwargs):
|
169
|
+
activations = self._activations_model(*args, **kwargs)
|
170
|
+
activations = activations.squeeze('time_step')
|
171
|
+
return activations
|
172
|
+
|
173
|
+
|
174
|
+
class TemporalExtractor(ActivationsExtractorHelper):
|
175
|
+
# `from_paths` is the earliest method at which we can interject because calls below are stored and checked for the
|
176
|
+
# presence of all layers which, for CORnet, are passed as e.g. `IT.output-t0`.
|
177
|
+
# This code re-arranges the time component.
|
178
|
+
def from_paths(self, *args, **kwargs):
|
179
|
+
raw_activations = super(TemporalExtractor, self).from_paths(*args, **kwargs)
|
180
|
+
# introduce time dimension
|
181
|
+
regions = defaultdict(list)
|
182
|
+
for layer in set(raw_activations['layer'].values):
|
183
|
+
match = re.match(r'(([^-]*)\..*|logits|avgpool)-t([0-9]+)', layer)
|
184
|
+
region, timestep = match.group(2) if match.group(2) else match.group(1), match.group(3)
|
185
|
+
stripped_layer = match.group(1)
|
186
|
+
regions[region].append((layer, stripped_layer, timestep))
|
187
|
+
activations = {}
|
188
|
+
for region, time_layers in regions.items():
|
189
|
+
for (full_layer, stripped_layer, timestep) in time_layers:
|
190
|
+
region_time_activations = raw_activations.sel(layer=full_layer)
|
191
|
+
region_time_activations['layer'] = 'neuroid', [stripped_layer] * len(region_time_activations['neuroid'])
|
192
|
+
activations[(region, timestep)] = region_time_activations
|
193
|
+
for key, key_activations in activations.items():
|
194
|
+
region, timestep = key
|
195
|
+
key_activations['region'] = 'neuroid', [region] * len(key_activations['neuroid'])
|
196
|
+
activations[key] = NeuroidAssembly([key_activations.values], coords={
|
197
|
+
**{coord: (dims, values) for coord, dims, values in walk_coords(activations[key])
|
198
|
+
if coord != 'neuroid_id'}, # otherwise, neuroid dim will be as large as before with nans
|
199
|
+
**{'time_step': [int(timestep)]}
|
200
|
+
}, dims=['time_step'] + list(key_activations.dims))
|
201
|
+
activations = list(activations.values())
|
202
|
+
activations = merge_data_arrays(activations)
|
203
|
+
# rebuild neuroid_id without timestep
|
204
|
+
neuroid_id = [".".join([f"{value}" for value in values]) for values in zip(*[
|
205
|
+
activations[coord].values for coord in ['model', 'region', 'neuroid_num']])]
|
206
|
+
activations['neuroid_id'] = 'neuroid', neuroid_id
|
207
|
+
return activations
|
208
|
+
|
209
|
+
|
210
|
+
def _build_time_mappings(time_mappings):
|
211
|
+
return {region: {
|
212
|
+
timestep: (time_start + timestep * time_step_size,
|
213
|
+
time_start + (timestep + 1) * time_step_size)
|
214
|
+
for timestep in range(0, timesteps)}
|
215
|
+
for region, (time_start, time_step_size, timesteps) in time_mappings.items()}
|
@@ -0,0 +1,69 @@
|
|
1
|
+
import functools
|
2
|
+
import importlib
|
3
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
4
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
5
|
+
import torch.hub
|
6
|
+
import ssl
|
7
|
+
from brainscore_vision.model_helpers.s3 import load_weight_file
|
8
|
+
from torch.nn import Module
|
9
|
+
from .helpers.helpers import TemporalPytorchWrapper
|
10
|
+
from pathlib import Path
|
11
|
+
from urllib.request import urlretrieve
|
12
|
+
|
13
|
+
ssl._create_default_https_context = ssl._create_unverified_context
|
14
|
+
|
15
|
+
|
16
|
+
TIME_MAPPINGS = {
|
17
|
+
'V1': (50, 100, 1),
|
18
|
+
'V2': (70, 100, 2),
|
19
|
+
# 'V2': (20, 50, 2), # MS: This follows from the movshon anesthesized-monkey recordings, so might not hold up
|
20
|
+
'V4': (90, 50, 4),
|
21
|
+
'IT': (100, 100, 2),
|
22
|
+
}
|
23
|
+
|
24
|
+
|
25
|
+
def get_model(identifier: str):
|
26
|
+
|
27
|
+
class Wrapper(Module):
|
28
|
+
def __init__(self, model):
|
29
|
+
super(Wrapper, self).__init__()
|
30
|
+
self.module = model
|
31
|
+
|
32
|
+
mod = importlib.import_module(f'cornet.cornet_s')
|
33
|
+
model_ctr = getattr(mod, 'CORnet_S')
|
34
|
+
model = model_ctr()
|
35
|
+
model = Wrapper(model) # model was wrapped with DataParallel, so weights require `module.` prefix
|
36
|
+
# cornet version: shorten identifier
|
37
|
+
identifier_short = identifier[:9]
|
38
|
+
# cornet version: shorten identifier
|
39
|
+
identifier_short = identifier[:9]
|
40
|
+
url = f'https://brainscore-storage.s3.us-east-2.amazonaws.com/brainscore-vision/models/ReAlnet/{identifier_short}_best_model_params.pt'
|
41
|
+
fh = urlretrieve(url, f'{identifier_short}_best_model_params.pth')
|
42
|
+
load_path = fh[0]
|
43
|
+
checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage) # map onto cpu
|
44
|
+
new_state_dict = {}
|
45
|
+
for key, val in checkpoint.items():
|
46
|
+
# remove "module." (if it exists) from the key
|
47
|
+
new_key = key.replace("realnet.", "")
|
48
|
+
# discard the keys starting with "fc"
|
49
|
+
if not new_key.startswith('fc'):
|
50
|
+
new_state_dict[new_key] = val
|
51
|
+
|
52
|
+
model.load_state_dict(new_state_dict)
|
53
|
+
model = model.module # unwrap
|
54
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
55
|
+
wrapper = TemporalPytorchWrapper(identifier=identifier, model=model, preprocessing=preprocessing,
|
56
|
+
separate_time=True)
|
57
|
+
wrapper.image_size = 224
|
58
|
+
return wrapper
|
59
|
+
|
60
|
+
|
61
|
+
|
62
|
+
|
63
|
+
def get_layers(identifier: str):
|
64
|
+
return (['V1.output-t0'] +
|
65
|
+
[f'{area}.output-t{timestep}'
|
66
|
+
for area, timesteps in [('V2', range(2)), ('V4', range(4)), ('IT', range(2))]
|
67
|
+
for timestep in timesteps] +
|
68
|
+
['decoder.avgpool-t0']
|
69
|
+
)
|
File without changes
|
@@ -0,0 +1,5 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry['Res2Net50_26w_4s'] = lambda: ModelCommitment(identifier='Res2Net50_26w_4s', activations_model=get_model('Res2Net50_26w_4s'), layers=get_layers('Res2Net50_26w_4s'))
|