brainscore-vision 2.2.3__py3-none-any.whl → 2.2.5__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/data/baker2022/__init__.py +10 -10
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
- brainscore_vision/data/barbumayo2019/__init__.py +3 -3
- brainscore_vision/data/bashivankar2019/__init__.py +10 -10
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
- brainscore_vision/data/bmd2024/__init__.py +20 -20
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
- brainscore_vision/data/bracci2019/__init__.py +5 -5
- brainscore_vision/data/bracci2019/data_packaging.py +1 -1
- brainscore_vision/data/cadena2017/__init__.py +5 -5
- brainscore_vision/data/cichy2019/__init__.py +5 -5
- brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
- brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
- brainscore_vision/data/david2004/__init__.py +5 -5
- brainscore_vision/data/deng2009/__init__.py +3 -3
- brainscore_vision/data/ferguson2024/__init__.py +112 -112
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
- brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
- brainscore_vision/data/geirhos2021/__init__.py +85 -85
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
- brainscore_vision/data/hebart2023/__init__.py +5 -5
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
- brainscore_vision/data/hendrycks2019/__init__.py +12 -12
- brainscore_vision/data/igustibagus2024/__init__.py +5 -5
- brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
- brainscore_vision/data/islam2021/__init__.py +3 -3
- brainscore_vision/data/kar2018/__init__.py +7 -7
- brainscore_vision/data/kar2019/__init__.py +5 -5
- brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
- brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
- brainscore_vision/data/majajhong2015/__init__.py +23 -23
- brainscore_vision/data/malania2007/__init__.py +77 -77
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
- brainscore_vision/data/maniquet2024/__init__.py +11 -11
- brainscore_vision/data/marques2020/__init__.py +30 -30
- brainscore_vision/data/rajalingham2018/__init__.py +10 -10
- brainscore_vision/data/rajalingham2020/__init__.py +5 -5
- brainscore_vision/data/rust2012/__init__.py +7 -7
- brainscore_vision/data/sanghavi2020/__init__.py +19 -19
- brainscore_vision/data/scialom2024/__init__.py +110 -110
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
- brainscore_vision/data/seibert2019/__init__.py +2 -2
- brainscore_vision/data/zhang2018/__init__.py +5 -5
- brainscore_vision/data_helpers/s3.py +25 -6
- brainscore_vision/model_helpers/activations/pytorch.py +34 -12
- brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
- brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
- brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
- brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
- brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
- brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
- brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
- brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
- brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
- brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
- brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
- brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
- brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
- brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
- brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
- brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
- brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
- brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
- brainscore_vision/models/ReAlnet/__init__.py +64 -0
- brainscore_vision/models/ReAlnet/model.py +237 -0
- brainscore_vision/models/ReAlnet/requirements.txt +7 -0
- brainscore_vision/models/ReAlnet/test.py +0 -0
- brainscore_vision/models/ReAlnet/weights.json +26 -0
- brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
- brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
- brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
- brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
- brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
- brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
- brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
- brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
- brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
- brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
- brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
- brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
- brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
- brainscore_vision/models/VOneCORnet_S/model.py +25 -0
- brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
- brainscore_vision/models/VOneCORnet_S/test.py +8 -0
- brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
- brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
- brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
- brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
- brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
- brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
- brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
- brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
- brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
- brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
- brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
- brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
- brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
- brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
- brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
- brainscore_vision/models/antialiased-r50/__init__.py +7 -0
- brainscore_vision/models/antialiased-r50/model.py +62 -0
- brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
- brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
- brainscore_vision/models/antialiased-r50/test.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
- brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
- brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
- brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
- brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
- brainscore_vision/models/cornet_s/model.py +2 -2
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
- brainscore_vision/models/densenet_121/__init__.py +7 -0
- brainscore_vision/models/densenet_121/model.py +63 -0
- brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
- brainscore_vision/models/densenet_121/requirements.txt +1 -0
- brainscore_vision/models/densenet_121/test.py +8 -0
- brainscore_vision/models/densenet_169/__init__.py +7 -0
- brainscore_vision/models/densenet_169/model.py +63 -0
- brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
- brainscore_vision/models/densenet_169/requirements.txt +1 -0
- brainscore_vision/models/densenet_169/test.py +9 -0
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
- brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
- brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
- brainscore_vision/models/densenet_201/test.py +8 -0
- brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b0/model.py +45 -0
- brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
- brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
- brainscore_vision/models/efficientnet_b0/test.py +8 -0
- brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
- brainscore_vision/models/efficientnet_b7/model.py +61 -0
- brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
- brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
- brainscore_vision/models/efficientnet_b7/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
- brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_1/model.py +62 -0
- brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/evresnet_50_1/test.py +8 -0
- brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
- brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4/model.py +67 -0
- brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4/test.py +8 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
- brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
- brainscore_vision/models/grcnn/__init__.py +7 -0
- brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
- brainscore_vision/models/grcnn/model.py +54 -0
- brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
- brainscore_vision/models/grcnn/requirements.txt +2 -0
- brainscore_vision/models/grcnn/test.py +9 -0
- brainscore_vision/models/grcnn_109/__init__.py +5 -0
- brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
- brainscore_vision/models/grcnn_109/model.py +53 -0
- brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
- brainscore_vision/models/grcnn_109/requirements.txt +2 -0
- brainscore_vision/models/grcnn_109/test.py +9 -0
- brainscore_vision/models/hmax/model.py +2 -2
- brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
- brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
- brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
- brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
- brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
- brainscore_vision/models/inception_v1/__init__.py +7 -0
- brainscore_vision/models/inception_v1/model.py +67 -0
- brainscore_vision/models/inception_v1/requirements.txt +1 -0
- brainscore_vision/models/inception_v1/test.py +8 -0
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
- brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
- brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
- brainscore_vision/models/inception_v3/test.py +8 -0
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
- brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
- brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
- brainscore_vision/models/inception_v4/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
- brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
- brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
- brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
- brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
- brainscore_vision/models/nasnet_large/__init__.py +7 -0
- brainscore_vision/models/nasnet_large/model.py +60 -0
- brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
- brainscore_vision/models/nasnet_large/test.py +8 -0
- brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
- brainscore_vision/models/nasnet_mobile/model.py +685 -0
- brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
- brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
- brainscore_vision/models/nasnet_mobile/test.py +8 -0
- brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinB/model.py +79 -0
- brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
- brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
- brainscore_vision/models/omnivore_swinB/test.py +9 -0
- brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
- brainscore_vision/models/omnivore_swinS/model.py +79 -0
- brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
- brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
- brainscore_vision/models/omnivore_swinS/test.py +9 -0
- brainscore_vision/models/pnasnet_large/__init__.py +7 -0
- brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
- brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
- brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large/test.py +8 -0
- brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN/model.py +63 -0
- brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
- brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
- brainscore_vision/models/resnet50_SIN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
- brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
- brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
- brainscore_vision/models/resnet50_barlow/model.py +53 -0
- brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
- brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
- brainscore_vision/models/resnet50_barlow/test.py +9 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
- brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
- brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
- brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
- brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
- brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
- brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
- brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
- brainscore_vision/models/resnet50_sup/__init__.py +5 -0
- brainscore_vision/models/resnet50_sup/model.py +55 -0
- brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
- brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
- brainscore_vision/models/resnet50_sup/test.py +8 -0
- brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
- brainscore_vision/models/resnet50_vicreg/model.py +62 -0
- brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
- brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
- brainscore_vision/models/resnet50_vicreg/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
- brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
- brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
- brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
- brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
- brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
- brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
- brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_101_v1/model.py +42 -0
- brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
- brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_101_v1/test.py +8 -0
- brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_101_v2/model.py +33 -0
- brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
- brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_101_v2/test.py +8 -0
- brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_152_v1/model.py +42 -0
- brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
- brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_152_v1/test.py +8 -0
- brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
- brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
- brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
- brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2/test.py +8 -0
- brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
- brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
- brainscore_vision/models/resnet_18_test_m/model.py +80 -0
- brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
- brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
- brainscore_vision/models/resnet_18_test_m/test.py +8 -0
- brainscore_vision/models/resnet_50_2/__init__.py +9 -0
- brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
- brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
- brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
- brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
- brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
- brainscore_vision/models/resnet_50_2/model.py +46 -0
- brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
- brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
- brainscore_vision/models/resnet_50_2/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/model.py +2 -2
- brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
- brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
- brainscore_vision/models/resnet_50_v1/model.py +42 -0
- brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
- brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
- brainscore_vision/models/resnet_50_v1/test.py +8 -0
- brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
- brainscore_vision/models/resnet_50_v2/model.py +33 -0
- brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
- brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
- brainscore_vision/models/resnet_50_v2/test.py +8 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
- brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
- brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
- brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
- brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
- brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
- brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
- brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
- brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
- brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
- brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
- brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
- brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
- brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
- brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
- brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
- brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
- brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
- brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
- brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
- brainscore_vision/models/timm_models/__init__.py +193 -0
- brainscore_vision/models/timm_models/model.py +90 -0
- brainscore_vision/models/timm_models/model_configs.json +464 -0
- brainscore_vision/models/timm_models/requirements.txt +3 -0
- brainscore_vision/models/timm_models/test.py +0 -0
- brainscore_vision/models/vgg_16/__init__.py +7 -0
- brainscore_vision/models/vgg_16/model.py +52 -0
- brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
- brainscore_vision/models/vgg_16/requirements.txt +1 -0
- brainscore_vision/models/vgg_16/test.py +8 -0
- brainscore_vision/models/vgg_19/__init__.py +7 -0
- brainscore_vision/models/vgg_19/model.py +52 -0
- brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
- brainscore_vision/models/vgg_19/requirements.txt +1 -0
- brainscore_vision/models/vgg_19/test.py +8 -0
- brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
- brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
- brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
- brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
- brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
- brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
- brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
- brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
- brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
- brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
- brainscore_vision/models/voneresnet_50/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50/model.py +37 -0
- brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
- brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50/test.py +8 -0
- brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_1/model.py +68 -0
- brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
- brainscore_vision/models/voneresnet_50_1/test.py +7 -0
- brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_3/model.py +66 -0
- brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_3/test.py +7 -0
- brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
- brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
- brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
- brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
- brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
- brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
- brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
- brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
- brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
- brainscore_vision/models/xception/__init__.py +7 -0
- brainscore_vision/models/xception/model.py +64 -0
- brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
- brainscore_vision/models/xception/requirements.txt +2 -0
- brainscore_vision/models/xception/test.py +8 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
- brainscore_vision/submission/actions_helpers.py +2 -3
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/METADATA +6 -6
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/RECORD +714 -130
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/WHEEL +1 -1
- docs/source/index.rst +1 -0
- docs/source/modules/submission.rst +1 -1
- docs/source/modules/version_bumping.rst +43 -0
- tests/test_submission/test_actions_helpers.py +2 -6
- brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
- brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
- brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
- /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
- /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
- /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
- /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
- /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["scsBarlow_lmda_1"] = lambda: ModelCommitment(
|
6
|
+
identifier="scsBarlow_lmda_1",
|
7
|
+
activations_model=get_model("scsBarlow_lmda_1"),
|
8
|
+
layers=get_layers("scsBarlow_lmda_1"),
|
9
|
+
)
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import functools
|
3
|
+
import os
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
import torchvision.models
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
8
|
+
from pathlib import Path
|
9
|
+
from brainscore_vision.model_helpers import download_weights
|
10
|
+
import torch
|
11
|
+
from collections import OrderedDict
|
12
|
+
|
13
|
+
# This is an example implementation for submitting resnet-50 as a pytorch model
|
14
|
+
|
15
|
+
# Attention: It is important, that the wrapper identifier is unique per model!
|
16
|
+
# The results will otherwise be the same due to brain-scores internal result caching mechanism.
|
17
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
18
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
19
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
20
|
+
|
21
|
+
|
22
|
+
def get_model_list():
|
23
|
+
return ["scsBarlow_lmda_1"]
|
24
|
+
|
25
|
+
|
26
|
+
def get_model(name):
|
27
|
+
assert name == "scsBarlow_lmda_1"
|
28
|
+
url = "https://users.flatironinstitute.org/~tyerxa/spatial_mmcr/spatial_mmcr/models/imagenet_1k/Barlow/single_crop/lmda_1.0/latest-rank0"
|
29
|
+
fh = urlretrieve(url)
|
30
|
+
state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"]
|
31
|
+
model = load_composer_classifier(state_dict)
|
32
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
33
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
34
|
+
wrapper.image_size = 224
|
35
|
+
return wrapper
|
36
|
+
|
37
|
+
def load_composer_classifier(sd):
|
38
|
+
model = torchvision.models.resnet.resnet50()
|
39
|
+
new_sd = OrderedDict()
|
40
|
+
for k, v in sd.items():
|
41
|
+
if 'lin_cls' in k:
|
42
|
+
new_sd['fc.' + k.split('.')[-1]] = v
|
43
|
+
if ".f." not in k:
|
44
|
+
continue
|
45
|
+
parts = k.split(".")
|
46
|
+
idx = parts.index("f")
|
47
|
+
new_k = ".".join(parts[idx + 1 :])
|
48
|
+
new_sd[new_k] = v
|
49
|
+
model.load_state_dict(new_sd, strict=True)
|
50
|
+
return model
|
51
|
+
|
52
|
+
def get_layers(name):
|
53
|
+
assert name == "scsBarlow_lmda_1"
|
54
|
+
|
55
|
+
outs = ["conv1", "layer1", "layer2", "layer3", "layer4", "avgpool", "fc"]
|
56
|
+
return outs
|
57
|
+
|
58
|
+
|
59
|
+
def get_bibtex(model_identifier):
|
60
|
+
return """xx"""
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["scsBarlow_lmda_2"] = lambda: ModelCommitment(
|
6
|
+
identifier="scsBarlow_lmda_2",
|
7
|
+
activations_model=get_model("scsBarlow_lmda_2"),
|
8
|
+
layers=get_layers("scsBarlow_lmda_2"),
|
9
|
+
)
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import functools
|
3
|
+
import os
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
import torchvision.models
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
8
|
+
from pathlib import Path
|
9
|
+
from brainscore_vision.model_helpers import download_weights
|
10
|
+
import torch
|
11
|
+
from collections import OrderedDict
|
12
|
+
|
13
|
+
# This is an example implementation for submitting resnet-50 as a pytorch model
|
14
|
+
|
15
|
+
# Attention: It is important, that the wrapper identifier is unique per model!
|
16
|
+
# The results will otherwise be the same due to brain-scores internal result caching mechanism.
|
17
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
18
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
19
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
20
|
+
|
21
|
+
|
22
|
+
def get_model_list():
|
23
|
+
return ["scsBarlow_lmda_2"]
|
24
|
+
|
25
|
+
|
26
|
+
def get_model(name):
|
27
|
+
assert name == "scsBarlow_lmda_2"
|
28
|
+
url = "https://users.flatironinstitute.org/~tyerxa/spatial_mmcr/spatial_mmcr/models/imagenet_1k/Barlow/single_crop/lmda_2.0/latest-rank0"
|
29
|
+
fh = urlretrieve(url)
|
30
|
+
state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"]
|
31
|
+
model = load_composer_classifier(state_dict)
|
32
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
33
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
34
|
+
wrapper.image_size = 224
|
35
|
+
return wrapper
|
36
|
+
|
37
|
+
def load_composer_classifier(sd):
|
38
|
+
model = torchvision.models.resnet.resnet50()
|
39
|
+
new_sd = OrderedDict()
|
40
|
+
for k, v in sd.items():
|
41
|
+
if 'lin_cls' in k:
|
42
|
+
new_sd['fc.' + k.split('.')[-1]] = v
|
43
|
+
if ".f." not in k:
|
44
|
+
continue
|
45
|
+
parts = k.split(".")
|
46
|
+
idx = parts.index("f")
|
47
|
+
new_k = ".".join(parts[idx + 1 :])
|
48
|
+
new_sd[new_k] = v
|
49
|
+
model.load_state_dict(new_sd, strict=True)
|
50
|
+
return model
|
51
|
+
|
52
|
+
def get_layers(name):
|
53
|
+
assert name == "scsBarlow_lmda_2"
|
54
|
+
|
55
|
+
outs = ["conv1", "layer1", "layer2", "layer3", "layer4", "avgpool", "fc"]
|
56
|
+
return outs
|
57
|
+
|
58
|
+
|
59
|
+
def get_bibtex(model_identifier):
|
60
|
+
return """xx"""
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["scsBarlow_lmda_4"] = lambda: ModelCommitment(
|
6
|
+
identifier="scsBarlow_lmda_4",
|
7
|
+
activations_model=get_model("scsBarlow_lmda_4"),
|
8
|
+
layers=get_layers("scsBarlow_lmda_4"),
|
9
|
+
)
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import functools
|
3
|
+
import os
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
import torchvision.models
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
8
|
+
from pathlib import Path
|
9
|
+
from brainscore_vision.model_helpers import download_weights
|
10
|
+
import torch
|
11
|
+
from collections import OrderedDict
|
12
|
+
|
13
|
+
# This is an example implementation for submitting resnet-50 as a pytorch model
|
14
|
+
|
15
|
+
# Attention: It is important, that the wrapper identifier is unique per model!
|
16
|
+
# The results will otherwise be the same due to brain-scores internal result caching mechanism.
|
17
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
18
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
19
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
20
|
+
|
21
|
+
|
22
|
+
def get_model_list():
|
23
|
+
return ["scsBarlow_lmda_4"]
|
24
|
+
|
25
|
+
|
26
|
+
def get_model(name):
|
27
|
+
assert name == "scsBarlow_lmda_4"
|
28
|
+
url = "https://users.flatironinstitute.org/~tyerxa/spatial_mmcr/spatial_mmcr/models/imagenet_1k/Barlow/single_crop/lmda_4.0/latest-rank0"
|
29
|
+
fh = urlretrieve(url)
|
30
|
+
state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"]
|
31
|
+
model = load_composer_classifier(state_dict)
|
32
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
33
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
34
|
+
wrapper.image_size = 224
|
35
|
+
return wrapper
|
36
|
+
|
37
|
+
def load_composer_classifier(sd):
|
38
|
+
model = torchvision.models.resnet.resnet50()
|
39
|
+
new_sd = OrderedDict()
|
40
|
+
for k, v in sd.items():
|
41
|
+
if 'lin_cls' in k:
|
42
|
+
new_sd['fc.' + k.split('.')[-1]] = v
|
43
|
+
if ".f." not in k:
|
44
|
+
continue
|
45
|
+
parts = k.split(".")
|
46
|
+
idx = parts.index("f")
|
47
|
+
new_k = ".".join(parts[idx + 1 :])
|
48
|
+
new_sd[new_k] = v
|
49
|
+
model.load_state_dict(new_sd, strict=True)
|
50
|
+
return model
|
51
|
+
|
52
|
+
def get_layers(name):
|
53
|
+
assert name == "scsBarlow_lmda_4"
|
54
|
+
|
55
|
+
outs = ["conv1", "layer1", "layer2", "layer3", "layer4", "avgpool", "fc"]
|
56
|
+
return outs
|
57
|
+
|
58
|
+
|
59
|
+
def get_bibtex(model_identifier):
|
60
|
+
return """xx"""
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,7 @@
|
|
1
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from .model import get_layers,get_model
|
4
|
+
|
5
|
+
|
6
|
+
model_registry['shufflenet_v2_x1_0'] = \
|
7
|
+
lambda: ModelCommitment(identifier='shufflenet_v2_x1_0', activations_model=get_model('shufflenet_v2_x1_0'), layers=get_layers('shufflenet_v2_x1_0'))
|
@@ -0,0 +1,52 @@
|
|
1
|
+
import functools
|
2
|
+
import torchvision.models
|
3
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
4
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
5
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
6
|
+
|
7
|
+
|
8
|
+
device = "cpu"
|
9
|
+
|
10
|
+
|
11
|
+
def get_model(name):
|
12
|
+
"""
|
13
|
+
This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
|
14
|
+
containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
|
15
|
+
keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
|
16
|
+
wrappers.
|
17
|
+
:param name: the name of the model to fetch
|
18
|
+
:return: the model instance
|
19
|
+
"""
|
20
|
+
assert name == 'shufflenet_v2_x1_0'
|
21
|
+
model = torchvision.models.shufflenet_v2_x1_0(pretrained=True)
|
22
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
23
|
+
wrapper = PytorchWrapper(identifier='shufflenet_v2_x1_0', model=model, preprocessing=preprocessing)
|
24
|
+
wrapper.image_size = 224
|
25
|
+
return wrapper
|
26
|
+
|
27
|
+
|
28
|
+
def get_layers(name):
|
29
|
+
assert name == 'shufflenet_v2_x1_0'
|
30
|
+
return ['conv1', 'stage2', 'stage3', 'stage4', 'conv5', 'fc']
|
31
|
+
|
32
|
+
|
33
|
+
def get_bibtex(name):
|
34
|
+
"""
|
35
|
+
A method returning the bibtex reference of the requested model as a string.
|
36
|
+
"""
|
37
|
+
return '''
|
38
|
+
"""@article{DBLP:journals/corr/abs-1807-11164,
|
39
|
+
title = {ShuffleNet {V2:} Practical Guidelines for Efficient {CNN} Architecture Design},
|
40
|
+
author = {Ningning Ma, Xiangyu Zhang, Hai{-}Tao Zheng and Jian Sun},
|
41
|
+
journal = {CoRR},
|
42
|
+
volume = {abs/1807.11164},
|
43
|
+
year = {2018},
|
44
|
+
url = {http://arxiv.org/abs/1807.11164}
|
45
|
+
}"""
|
46
|
+
'''
|
47
|
+
|
48
|
+
|
49
|
+
if __name__ == '__main__':
|
50
|
+
# Use this method to ensure the correctness of the BaseModel implementations.
|
51
|
+
# It executes a mock run of brain-score benchmarks.
|
52
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1 @@
|
|
1
|
+
{"IT": "stage4", "V4": "stage3", "V1": "stage3", "V2": "stage3"}
|
@@ -0,0 +1,193 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, MODEL_CONFIGS
|
4
|
+
|
5
|
+
model_registry["convnext_base:clip_laiona_augreg_ft_in1k_384"] = lambda: ModelCommitment(
|
6
|
+
identifier="convnext_base:clip_laiona_augreg_ft_in1k_384",
|
7
|
+
activations_model=get_model("convnext_base:clip_laiona_augreg_ft_in1k_384"),
|
8
|
+
layers=MODEL_CONFIGS["convnext_base:clip_laiona_augreg_ft_in1k_384"]["model_commitment"]["layers"],
|
9
|
+
behavioral_readout_layer=MODEL_CONFIGS["convnext_base:clip_laiona_augreg_ft_in1k_384"]["model_commitment"]["behavioral_readout_layer"],
|
10
|
+
region_layer_map=MODEL_CONFIGS["convnext_base:clip_laiona_augreg_ft_in1k_384"]["model_commitment"]["region_layer_map"]
|
11
|
+
)
|
12
|
+
|
13
|
+
|
14
|
+
model_registry["convnext_femto_ols:d1_in1k"] = lambda: ModelCommitment(
|
15
|
+
identifier="convnext_femto_ols:d1_in1k",
|
16
|
+
activations_model=get_model("convnext_femto_ols:d1_in1k"),
|
17
|
+
layers=MODEL_CONFIGS["convnext_femto_ols:d1_in1k"]["model_commitment"]["layers"],
|
18
|
+
behavioral_readout_layer=MODEL_CONFIGS["convnext_femto_ols:d1_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
19
|
+
region_layer_map=MODEL_CONFIGS["convnext_femto_ols:d1_in1k"]["model_commitment"]["region_layer_map"]
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
model_registry["convnext_large:fb_in22k_ft_in1k"] = lambda: ModelCommitment(
|
24
|
+
identifier="convnext_large:fb_in22k_ft_in1k",
|
25
|
+
activations_model=get_model("convnext_large:fb_in22k_ft_in1k"),
|
26
|
+
layers=MODEL_CONFIGS["convnext_large:fb_in22k_ft_in1k"]["model_commitment"]["layers"],
|
27
|
+
behavioral_readout_layer=MODEL_CONFIGS["convnext_large:fb_in22k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
28
|
+
region_layer_map=MODEL_CONFIGS["convnext_large:fb_in22k_ft_in1k"]["model_commitment"]["region_layer_map"]
|
29
|
+
)
|
30
|
+
|
31
|
+
|
32
|
+
model_registry["convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"] = lambda: ModelCommitment(
|
33
|
+
identifier="convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384",
|
34
|
+
activations_model=get_model("convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"),
|
35
|
+
layers=MODEL_CONFIGS["convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"]["model_commitment"]["layers"],
|
36
|
+
behavioral_readout_layer=MODEL_CONFIGS["convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"]["model_commitment"]["behavioral_readout_layer"],
|
37
|
+
region_layer_map=MODEL_CONFIGS["convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"]["model_commitment"]["region_layer_map"]
|
38
|
+
)
|
39
|
+
|
40
|
+
|
41
|
+
model_registry["convnext_tiny:in12k_ft_in1k"] = lambda: ModelCommitment(
|
42
|
+
identifier="convnext_tiny:in12k_ft_in1k",
|
43
|
+
activations_model=get_model("convnext_tiny:in12k_ft_in1k"),
|
44
|
+
layers=MODEL_CONFIGS["convnext_tiny:in12k_ft_in1k"]["model_commitment"]["layers"],
|
45
|
+
behavioral_readout_layer=MODEL_CONFIGS["convnext_tiny:in12k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
46
|
+
region_layer_map=MODEL_CONFIGS["convnext_tiny:in12k_ft_in1k"]["model_commitment"]["region_layer_map"]
|
47
|
+
)
|
48
|
+
|
49
|
+
|
50
|
+
model_registry["convnext_xlarge:fb_in22k_ft_in1k"] = lambda: ModelCommitment(
|
51
|
+
identifier="convnext_xlarge:fb_in22k_ft_in1k",
|
52
|
+
activations_model=get_model("convnext_xlarge:fb_in22k_ft_in1k"),
|
53
|
+
layers=MODEL_CONFIGS["convnext_xlarge:fb_in22k_ft_in1k"]["model_commitment"]["layers"],
|
54
|
+
behavioral_readout_layer=MODEL_CONFIGS["convnext_xlarge:fb_in22k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
55
|
+
region_layer_map=MODEL_CONFIGS["convnext_xlarge:fb_in22k_ft_in1k"]["model_commitment"]["region_layer_map"]
|
56
|
+
)
|
57
|
+
|
58
|
+
|
59
|
+
model_registry["convnext_xxlarge:clip_laion2b_soup_ft_in1k"] = lambda: ModelCommitment(
|
60
|
+
identifier="convnext_xxlarge:clip_laion2b_soup_ft_in1k",
|
61
|
+
activations_model=get_model("convnext_xxlarge:clip_laion2b_soup_ft_in1k"),
|
62
|
+
layers=MODEL_CONFIGS["convnext_xxlarge:clip_laion2b_soup_ft_in1k"]["model_commitment"]["layers"],
|
63
|
+
behavioral_readout_layer=MODEL_CONFIGS["convnext_xxlarge:clip_laion2b_soup_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
64
|
+
region_layer_map=MODEL_CONFIGS["convnext_xxlarge:clip_laion2b_soup_ft_in1k"]["model_commitment"]["region_layer_map"]
|
65
|
+
)
|
66
|
+
|
67
|
+
|
68
|
+
model_registry["swin_small_patch4_window7_224:ms_in22k_ft_in1k"] = lambda: ModelCommitment(
|
69
|
+
identifier="swin_small_patch4_window7_224:ms_in22k_ft_in1k",
|
70
|
+
activations_model=get_model("swin_small_patch4_window7_224:ms_in22k_ft_in1k"),
|
71
|
+
layers=MODEL_CONFIGS["swin_small_patch4_window7_224:ms_in22k_ft_in1k"]["model_commitment"]["layers"],
|
72
|
+
behavioral_readout_layer=MODEL_CONFIGS["swin_small_patch4_window7_224:ms_in22k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
73
|
+
region_layer_map=MODEL_CONFIGS["swin_small_patch4_window7_224:ms_in22k_ft_in1k"]["model_commitment"]["region_layer_map"]
|
74
|
+
)
|
75
|
+
|
76
|
+
|
77
|
+
model_registry["vit_base_patch16_clip_224:openai_ft_in12k_in1k"] = lambda: ModelCommitment(
|
78
|
+
identifier="vit_base_patch16_clip_224:openai_ft_in12k_in1k",
|
79
|
+
activations_model=get_model("vit_base_patch16_clip_224:openai_ft_in12k_in1k"),
|
80
|
+
layers=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["layers"],
|
81
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
82
|
+
region_layer_map=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
|
83
|
+
)
|
84
|
+
|
85
|
+
|
86
|
+
model_registry["vit_base_patch16_clip_224:openai_ft_in1k"] = lambda: ModelCommitment(
|
87
|
+
identifier="vit_base_patch16_clip_224:openai_ft_in1k",
|
88
|
+
activations_model=get_model("vit_base_patch16_clip_224:openai_ft_in1k"),
|
89
|
+
layers=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in1k"]["model_commitment"]["layers"],
|
90
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
91
|
+
region_layer_map=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in1k"]["model_commitment"]["region_layer_map"]
|
92
|
+
)
|
93
|
+
|
94
|
+
|
95
|
+
model_registry["vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"] = lambda: ModelCommitment(
|
96
|
+
identifier="vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k",
|
97
|
+
activations_model=get_model("vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"),
|
98
|
+
layers=MODEL_CONFIGS["vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["layers"],
|
99
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
100
|
+
region_layer_map=MODEL_CONFIGS["vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
|
101
|
+
)
|
102
|
+
|
103
|
+
|
104
|
+
model_registry["vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"] = lambda: ModelCommitment(
|
105
|
+
identifier="vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k",
|
106
|
+
activations_model=get_model("vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"),
|
107
|
+
layers=MODEL_CONFIGS["vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"]["model_commitment"]["layers"],
|
108
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
109
|
+
region_layer_map=MODEL_CONFIGS["vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
|
110
|
+
)
|
111
|
+
|
112
|
+
|
113
|
+
model_registry["vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"] = lambda: ModelCommitment(
|
114
|
+
identifier="vit_large_patch14_clip_224:laion2b_ft_in12k_in1k",
|
115
|
+
activations_model=get_model("vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"),
|
116
|
+
layers=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["layers"],
|
117
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
118
|
+
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
|
119
|
+
)
|
120
|
+
|
121
|
+
|
122
|
+
model_registry["vit_large_patch14_clip_224:laion2b_ft_in1k"] = lambda: ModelCommitment(
|
123
|
+
identifier="vit_large_patch14_clip_224:laion2b_ft_in1k",
|
124
|
+
activations_model=get_model("vit_large_patch14_clip_224:laion2b_ft_in1k"),
|
125
|
+
layers=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in1k"]["model_commitment"]["layers"],
|
126
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
127
|
+
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in1k"]["model_commitment"]["region_layer_map"]
|
128
|
+
)
|
129
|
+
|
130
|
+
|
131
|
+
model_registry["vit_large_patch14_clip_224:openai_ft_in12k_in1k"] = lambda: ModelCommitment(
|
132
|
+
identifier="vit_large_patch14_clip_224:openai_ft_in12k_in1k",
|
133
|
+
activations_model=get_model("vit_large_patch14_clip_224:openai_ft_in12k_in1k"),
|
134
|
+
layers=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["layers"],
|
135
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
136
|
+
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
|
137
|
+
)
|
138
|
+
|
139
|
+
|
140
|
+
model_registry["vit_large_patch14_clip_224:openai_ft_in1k"] = lambda: ModelCommitment(
|
141
|
+
identifier="vit_large_patch14_clip_224:openai_ft_in1k",
|
142
|
+
activations_model=get_model("vit_large_patch14_clip_224:openai_ft_in1k"),
|
143
|
+
layers=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in1k"]["model_commitment"]["layers"],
|
144
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
145
|
+
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in1k"]["model_commitment"]["region_layer_map"]
|
146
|
+
)
|
147
|
+
|
148
|
+
|
149
|
+
model_registry["vit_large_patch14_clip_336:laion2b_ft_in1k"] = lambda: ModelCommitment(
|
150
|
+
identifier="vit_large_patch14_clip_336:laion2b_ft_in1k",
|
151
|
+
activations_model=get_model("vit_large_patch14_clip_336:laion2b_ft_in1k"),
|
152
|
+
layers=MODEL_CONFIGS["vit_large_patch14_clip_336:laion2b_ft_in1k"]["model_commitment"]["layers"],
|
153
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_336:laion2b_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
154
|
+
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_336:laion2b_ft_in1k"]["model_commitment"]["region_layer_map"]
|
155
|
+
)
|
156
|
+
|
157
|
+
|
158
|
+
model_registry["vit_large_patch14_clip_336:openai_ft_in12k_in1k"] = lambda: ModelCommitment(
|
159
|
+
identifier="vit_large_patch14_clip_336:openai_ft_in12k_in1k",
|
160
|
+
activations_model=get_model("vit_large_patch14_clip_336:openai_ft_in12k_in1k"),
|
161
|
+
layers=MODEL_CONFIGS["vit_large_patch14_clip_336:openai_ft_in12k_in1k"]["model_commitment"]["layers"],
|
162
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_336:openai_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
163
|
+
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_336:openai_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
|
164
|
+
)
|
165
|
+
|
166
|
+
|
167
|
+
model_registry["vit_relpos_base_patch16_clsgap_224:sw_in1k"] = lambda: ModelCommitment(
|
168
|
+
identifier="vit_relpos_base_patch16_clsgap_224:sw_in1k",
|
169
|
+
activations_model=get_model("vit_relpos_base_patch16_clsgap_224:sw_in1k"),
|
170
|
+
layers=MODEL_CONFIGS["vit_relpos_base_patch16_clsgap_224:sw_in1k"]["model_commitment"]["layers"],
|
171
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_relpos_base_patch16_clsgap_224:sw_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
172
|
+
region_layer_map=MODEL_CONFIGS["vit_relpos_base_patch16_clsgap_224:sw_in1k"]["model_commitment"]["region_layer_map"]
|
173
|
+
)
|
174
|
+
|
175
|
+
|
176
|
+
model_registry["vit_relpos_base_patch32_plus_rpn_256:sw_in1k"] = lambda: ModelCommitment(
|
177
|
+
identifier="vit_relpos_base_patch32_plus_rpn_256:sw_in1k",
|
178
|
+
activations_model=get_model("vit_relpos_base_patch32_plus_rpn_256:sw_in1k"),
|
179
|
+
layers=MODEL_CONFIGS["vit_relpos_base_patch32_plus_rpn_256:sw_in1k"]["model_commitment"]["layers"],
|
180
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_relpos_base_patch32_plus_rpn_256:sw_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
181
|
+
region_layer_map=MODEL_CONFIGS["vit_relpos_base_patch32_plus_rpn_256:sw_in1k"]["model_commitment"]["region_layer_map"]
|
182
|
+
)
|
183
|
+
|
184
|
+
|
185
|
+
model_registry["vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"] = lambda: ModelCommitment(
|
186
|
+
identifier="vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k",
|
187
|
+
activations_model=get_model("vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"),
|
188
|
+
layers=MODEL_CONFIGS["vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"]["model_commitment"]["layers"],
|
189
|
+
behavioral_readout_layer=MODEL_CONFIGS["vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
|
190
|
+
region_layer_map=MODEL_CONFIGS["vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"]["model_commitment"]["region_layer_map"]
|
191
|
+
)
|
192
|
+
|
193
|
+
|