brainscore-vision 2.2.1__py3-none-any.whl → 2.2.3__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/model_helpers/brain_transformation/__init__.py +1 -2
- brainscore_vision/models/alexnet_less_variation_1/__init__.py +6 -0
- brainscore_vision/models/alexnet_less_variation_1/model.py +200 -0
- brainscore_vision/models/alexnet_less_variation_1/region_layer_map/alexnet_less_variation_iteration=1.json +6 -0
- brainscore_vision/models/alexnet_less_variation_1/setup.py +29 -0
- brainscore_vision/models/alexnet_less_variation_1/test.py +3 -0
- brainscore_vision/models/alexnet_less_variation_2/__init__.py +6 -0
- brainscore_vision/models/alexnet_less_variation_2/model.py +200 -0
- brainscore_vision/models/alexnet_less_variation_2/region_layer_map/alexnet_less_variation_iteration=2.json +6 -0
- brainscore_vision/models/alexnet_less_variation_2/setup.py +29 -0
- brainscore_vision/models/alexnet_less_variation_2/test.py +3 -0
- brainscore_vision/models/alexnet_less_variation_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_less_variation_4/model.py +200 -0
- brainscore_vision/models/alexnet_less_variation_4/region_layer_map/alexnet_less_variation_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_less_variation_4/setup.py +29 -0
- brainscore_vision/models/alexnet_less_variation_4/test.py +3 -0
- brainscore_vision/models/alexnet_no_specular_2/__init__.py +6 -0
- brainscore_vision/models/alexnet_no_specular_2/model.py +200 -0
- brainscore_vision/models/alexnet_no_specular_2/region_layer_map/alexnet_no_specular_iteration=2.json +6 -0
- brainscore_vision/models/alexnet_no_specular_2/setup.py +29 -0
- brainscore_vision/models/alexnet_no_specular_2/test.py +3 -0
- brainscore_vision/models/alexnet_no_specular_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_no_specular_4/model.py +200 -0
- brainscore_vision/models/alexnet_no_specular_4/region_layer_map/alexnet_no_specular_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_no_specular_4/setup.py +29 -0
- brainscore_vision/models/alexnet_no_specular_4/test.py +3 -0
- brainscore_vision/models/alexnet_no_variation_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_no_variation_4/model.py +200 -0
- brainscore_vision/models/alexnet_no_variation_4/region_layer_map/alexnet_no_variation_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_no_variation_4/setup.py +29 -0
- brainscore_vision/models/alexnet_no_variation_4/test.py +3 -0
- brainscore_vision/models/alexnet_original_3/__init__.py +6 -0
- brainscore_vision/models/alexnet_original_3/model.py +200 -0
- brainscore_vision/models/alexnet_original_3/region_layer_map/alexnet_original_iteration=3.json +6 -0
- brainscore_vision/models/alexnet_original_3/setup.py +29 -0
- brainscore_vision/models/alexnet_original_3/test.py +3 -0
- brainscore_vision/models/alexnet_wo_shading_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_wo_shading_4/model.py +200 -0
- brainscore_vision/models/alexnet_wo_shading_4/region_layer_map/alexnet_wo_shading_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_wo_shading_4/setup.py +29 -0
- brainscore_vision/models/alexnet_wo_shading_4/test.py +3 -0
- brainscore_vision/models/alexnet_wo_shadows_5/__init__.py +6 -0
- brainscore_vision/models/alexnet_wo_shadows_5/model.py +200 -0
- brainscore_vision/models/alexnet_wo_shadows_5/region_layer_map/alexnet_wo_shadows_iteration=5.json +6 -0
- brainscore_vision/models/alexnet_wo_shadows_5/setup.py +29 -0
- brainscore_vision/models/alexnet_wo_shadows_5/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_1/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_1/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_1/region_layer_map/alexnet_z_axis_iteration=1.json +6 -0
- brainscore_vision/models/alexnet_z_axis_1/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_1/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_2/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_2/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_2/region_layer_map/alexnet_z_axis_iteration=2.json +6 -0
- brainscore_vision/models/alexnet_z_axis_2/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_2/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_3/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_3/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_3/region_layer_map/alexnet_z_axis_iteration=3.json +6 -0
- brainscore_vision/models/alexnet_z_axis_3/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_3/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_4/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_4/region_layer_map/alexnet_z_axis_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_z_axis_4/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_4/test.py +3 -0
- brainscore_vision/models/artResNet18_1/__init__.py +5 -0
- brainscore_vision/models/artResNet18_1/model.py +66 -0
- brainscore_vision/models/artResNet18_1/requirements.txt +4 -0
- brainscore_vision/models/artResNet18_1/test.py +12 -0
- brainscore_vision/models/barlow_twins_custom/__init__.py +5 -0
- brainscore_vision/models/barlow_twins_custom/model.py +58 -0
- brainscore_vision/models/barlow_twins_custom/requirements.txt +4 -0
- brainscore_vision/models/barlow_twins_custom/test.py +12 -0
- brainscore_vision/models/blt-vs/__init__.py +15 -0
- brainscore_vision/models/blt-vs/model.py +962 -0
- brainscore_vision/models/blt-vs/pretrained.py +219 -0
- brainscore_vision/models/blt-vs/region_layer_map/blt_vs.json +6 -0
- brainscore_vision/models/blt-vs/setup.py +22 -0
- brainscore_vision/models/blt-vs/test.py +0 -0
- brainscore_vision/models/cifar_resnet18_1/__init__.py +5 -0
- brainscore_vision/models/cifar_resnet18_1/model.py +68 -0
- brainscore_vision/models/cifar_resnet18_1/requirements.txt +4 -0
- brainscore_vision/models/cifar_resnet18_1/test.py +10 -0
- brainscore_vision/models/resnet18_random/__init__.py +5 -0
- brainscore_vision/models/resnet18_random/archive_name.zip +0 -0
- brainscore_vision/models/resnet18_random/model.py +42 -0
- brainscore_vision/models/resnet18_random/requirements.txt +2 -0
- brainscore_vision/models/resnet18_random/test.py +12 -0
- brainscore_vision/models/resnet50_less_variation_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_1/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_1/region_layer_map/resnet50_less_variation_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_less_variation_1/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_1/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_2/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_2/region_layer_map/resnet50_less_variation_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_less_variation_2/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_2/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_3/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_3/region_layer_map/resnet50_less_variation_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_less_variation_3/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_3/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_4/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_4/region_layer_map/resnet50_less_variation_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_less_variation_4/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_4/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_5/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_5/region_layer_map/resnet50_less_variation_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_less_variation_5/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_5/test.py +3 -0
- brainscore_vision/models/resnet50_no_variation_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_no_variation_1/model.py +200 -0
- brainscore_vision/models/resnet50_no_variation_1/region_layer_map/resnet50_no_variation_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_no_variation_1/setup.py +29 -0
- brainscore_vision/models/resnet50_no_variation_1/test.py +3 -0
- brainscore_vision/models/resnet50_no_variation_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_no_variation_2/model.py +200 -0
- brainscore_vision/models/resnet50_no_variation_2/region_layer_map/resnet50_no_variation_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_no_variation_2/setup.py +29 -0
- brainscore_vision/models/resnet50_no_variation_2/test.py +3 -0
- brainscore_vision/models/resnet50_no_variation_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_no_variation_5/model.py +200 -0
- brainscore_vision/models/resnet50_no_variation_5/region_layer_map/resnet50_no_variation_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_no_variation_5/setup.py +29 -0
- brainscore_vision/models/resnet50_no_variation_5/test.py +3 -0
- brainscore_vision/models/resnet50_original_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_original_1/model.py +200 -0
- brainscore_vision/models/resnet50_original_1/region_layer_map/resnet50_original_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_original_1/setup.py +29 -0
- brainscore_vision/models/resnet50_original_1/test.py +3 -0
- brainscore_vision/models/resnet50_original_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_original_2/model.py +200 -0
- brainscore_vision/models/resnet50_original_2/region_layer_map/resnet50_original_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_original_2/setup.py +29 -0
- brainscore_vision/models/resnet50_original_2/test.py +3 -0
- brainscore_vision/models/resnet50_original_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_original_5/model.py +200 -0
- brainscore_vision/models/resnet50_original_5/region_layer_map/resnet50_original_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_original_5/setup.py +29 -0
- brainscore_vision/models/resnet50_original_5/test.py +3 -0
- brainscore_vision/models/resnet50_textures_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_1/model.py +200 -0
- brainscore_vision/models/resnet50_textures_1/region_layer_map/resnet50_textures_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_textures_1/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_1/test.py +3 -0
- brainscore_vision/models/resnet50_textures_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_2/model.py +200 -0
- brainscore_vision/models/resnet50_textures_2/region_layer_map/resnet50_textures_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_textures_2/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_2/test.py +3 -0
- brainscore_vision/models/resnet50_textures_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_3/model.py +200 -0
- brainscore_vision/models/resnet50_textures_3/region_layer_map/resnet50_textures_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_textures_3/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_3/test.py +3 -0
- brainscore_vision/models/resnet50_textures_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_4/model.py +200 -0
- brainscore_vision/models/resnet50_textures_4/region_layer_map/resnet50_textures_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_textures_4/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_4/test.py +3 -0
- brainscore_vision/models/resnet50_textures_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_5/model.py +200 -0
- brainscore_vision/models/resnet50_textures_5/region_layer_map/resnet50_textures_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_textures_5/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_5/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shading_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shading_1/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shading_1/region_layer_map/resnet50_wo_shading_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_wo_shading_1/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shading_1/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shading_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shading_3/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shading_3/region_layer_map/resnet50_wo_shading_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_wo_shading_3/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shading_3/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shading_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shading_4/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shading_4/region_layer_map/resnet50_wo_shading_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_wo_shading_4/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shading_4/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shadows_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shadows_4/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shadows_4/region_layer_map/resnet50_wo_shadows_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_wo_shadows_4/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shadows_4/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_1/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_1/region_layer_map/resnet50_z_axis_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_z_axis_1/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_1/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_2/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_2/region_layer_map/resnet50_z_axis_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_z_axis_2/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_2/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_3/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_3/region_layer_map/resnet50_z_axis_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_z_axis_3/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_3/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_5/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_5/region_layer_map/resnet50_z_axis_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_z_axis_5/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_5/test.py +3 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/region_layer_map/yudixie_resnet18_distance_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/region_layer_map/yudixie_resnet18_translation_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/region_layer_map/yudixie_resnet18_imagenet1kpret_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/region_layer_map/yudixie_resnet18_random_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/region_layer_map/yudixie_resnet18_rotation_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/region_layer_map/yudixie_resnet18_distance_translation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/__init__.py +12 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/region_layer_map/yudixie_resnet18_distance_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/__init__.py +13 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/region_layer_map/yudixie_resnet18_translation_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/__init__.py +12 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/region_layer_map/yudixie_resnet18_distance_translation_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/region_layer_map/yudixie_resnet18_category_class_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/region_layer_map/yudixie_resnet18_object_class_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/region_layer_map/yudixie_resnet18_cat_obj_class_all_latents_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240312.json +1 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/METADATA +3 -2
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/RECORD +263 -10
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/WHEEL +1 -1
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/__init__.py +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/model.py +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/requirements.txt +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/test.py +0 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,200 @@
|
|
1
|
+
|
2
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
3
|
+
import functools
|
4
|
+
import numpy as np
|
5
|
+
import torch
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from PIL import Image
|
8
|
+
from torch import nn
|
9
|
+
import pytorch_lightning as pl
|
10
|
+
import torchvision.models as models
|
11
|
+
import gdown
|
12
|
+
import glob
|
13
|
+
import os
|
14
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
15
|
+
|
16
|
+
def get_bibtex(model_identifier):
|
17
|
+
return 'VGG16'
|
18
|
+
|
19
|
+
def get_model_list():
|
20
|
+
return ['resnet50_z_axis_iteration=2']
|
21
|
+
|
22
|
+
def get_model(name):
|
23
|
+
keyword = 'z_axis'
|
24
|
+
iteration = 2
|
25
|
+
network = 'resnet50'
|
26
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/z_axis_2.ckpt'
|
27
|
+
output = 'resnet50_z_axis_iteration=2.ckpt'
|
28
|
+
gdown.download(url, output)
|
29
|
+
|
30
|
+
|
31
|
+
if keyword != 'imagenet_trained' and keyword != 'no_training':
|
32
|
+
lx_whole = [f"resnet50_z_axis_iteration=2.ckpt"]
|
33
|
+
if len(lx_whole) > 1:
|
34
|
+
lx_whole = [lx_whole[-1]]
|
35
|
+
elif keyword == 'imagenet_trained' or keyword == 'no_training':
|
36
|
+
print('keyword is imagenet')
|
37
|
+
lx_whole = ['x']
|
38
|
+
|
39
|
+
for model_ckpt in lx_whole:
|
40
|
+
print(model_ckpt)
|
41
|
+
last_module_name = None
|
42
|
+
last_module = None
|
43
|
+
layers = []
|
44
|
+
if keyword == 'imagenet_trained' and network != 'clip':
|
45
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=True)
|
46
|
+
for name, module in model.named_modules():
|
47
|
+
last_module_name = name
|
48
|
+
last_module = module
|
49
|
+
layers.append(name)
|
50
|
+
else:
|
51
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=False)
|
52
|
+
if model_ckpt != 'x':
|
53
|
+
ckpt = torch.load(model_ckpt, map_location='cpu')
|
54
|
+
if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
|
55
|
+
ckpt2 = {}
|
56
|
+
for keys in ckpt['state_dict']:
|
57
|
+
print(keys)
|
58
|
+
print(ckpt['state_dict'][keys].shape)
|
59
|
+
print('---')
|
60
|
+
k2 = keys.split('model.')[1]
|
61
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
62
|
+
model.load_state_dict(ckpt2)
|
63
|
+
if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
|
64
|
+
ckpt2 = {}
|
65
|
+
for keys in ckpt['state_dict']:
|
66
|
+
print(keys)
|
67
|
+
print(ckpt['state_dict'][keys].shape)
|
68
|
+
print('---')
|
69
|
+
k2 = keys.split('model.')[1]
|
70
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
71
|
+
model.load_state_dict(ckpt2)
|
72
|
+
# Add more cases for other networks as needed
|
73
|
+
assert name == 'resnet50_z_axis_iteration=2'
|
74
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/z_axis_2.ckpt'
|
75
|
+
output = 'resnet50_z_axis_iteration=2.ckpt'
|
76
|
+
gdown.download(url, output)
|
77
|
+
layers = []
|
78
|
+
for name, module in model._modules.items():
|
79
|
+
print(name, "->", module)
|
80
|
+
layers.append(name)
|
81
|
+
|
82
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
83
|
+
activations_model = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
84
|
+
|
85
|
+
return activations_model
|
86
|
+
|
87
|
+
def get_layers(name):
|
88
|
+
keyword = 'z_axis'
|
89
|
+
iteration = 2
|
90
|
+
network = 'resnet50'
|
91
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/z_axis_2.ckpt'
|
92
|
+
output = 'resnet50_z_axis_iteration=2.ckpt'
|
93
|
+
gdown.download(url, output)
|
94
|
+
|
95
|
+
|
96
|
+
if keyword != 'imagenet_trained' and keyword != 'no_training':
|
97
|
+
lx_whole = [f"resnet50_z_axis_iteration=2.ckpt"]
|
98
|
+
if len(lx_whole) > 1:
|
99
|
+
lx_whole = [lx_whole[-1]]
|
100
|
+
elif keyword == 'imagenet_trained' or keyword == 'no_training':
|
101
|
+
print('keyword is imagenet')
|
102
|
+
lx_whole = ['x']
|
103
|
+
|
104
|
+
|
105
|
+
for model_ckpt in lx_whole:
|
106
|
+
print(model_ckpt)
|
107
|
+
last_module_name = None
|
108
|
+
last_module = None
|
109
|
+
if keyword == 'imagenet_trained' and network != 'clip':
|
110
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=True)
|
111
|
+
for name, module in model.named_modules():
|
112
|
+
last_module_name = name
|
113
|
+
last_module = module
|
114
|
+
layers.append(name)
|
115
|
+
else:
|
116
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=False)
|
117
|
+
if model_ckpt != 'x':
|
118
|
+
ckpt = torch.load(model_ckpt, map_location='cpu')
|
119
|
+
if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
|
120
|
+
ckpt2 = {}
|
121
|
+
for keys in ckpt['state_dict']:
|
122
|
+
print(keys)
|
123
|
+
print(ckpt['state_dict'][keys].shape)
|
124
|
+
print('---')
|
125
|
+
k2 = keys.split('model.')[1]
|
126
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
127
|
+
model.load_state_dict(ckpt2)
|
128
|
+
if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
|
129
|
+
ckpt2 = {}
|
130
|
+
for keys in ckpt['state_dict']:
|
131
|
+
print(keys)
|
132
|
+
print(ckpt['state_dict'][keys].shape)
|
133
|
+
print('---')
|
134
|
+
k2 = keys.split('model.')[1]
|
135
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
136
|
+
model.load_state_dict(ckpt2)
|
137
|
+
# Add more cases for other networks as needed
|
138
|
+
layers = []
|
139
|
+
for name, module in model._modules.items():
|
140
|
+
print(name, "->", module)
|
141
|
+
layers.append(name)
|
142
|
+
return layers
|
143
|
+
|
144
|
+
if __name__ == '__main__':
|
145
|
+
device = "cpu"
|
146
|
+
global model
|
147
|
+
global keyword
|
148
|
+
global network
|
149
|
+
global iteration
|
150
|
+
keyword = 'z_axis'
|
151
|
+
iteration = 2
|
152
|
+
network = 'resnet50'
|
153
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/z_axis_2.ckpt'
|
154
|
+
output = 'resnet50_z_axis_iteration=2.ckpt'
|
155
|
+
gdown.download(url, output)
|
156
|
+
|
157
|
+
|
158
|
+
if keyword != 'imagenet_trained' and keyword != 'no_training':
|
159
|
+
lx_whole = [f"resnet50_z_axis_iteration=2.ckpt"]
|
160
|
+
if len(lx_whole) > 1:
|
161
|
+
lx_whole = [lx_whole[-1]]
|
162
|
+
elif keyword == 'imagenet_trained' or keyword == 'no_training':
|
163
|
+
print('keyword is imagenet')
|
164
|
+
lx_whole = ['x']
|
165
|
+
|
166
|
+
for model_ckpt in lx_whole:
|
167
|
+
print(model_ckpt)
|
168
|
+
last_module_name = None
|
169
|
+
last_module = None
|
170
|
+
layers = []
|
171
|
+
if keyword == 'imagenet_trained' and network != 'clip':
|
172
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=True)
|
173
|
+
for name, module in model.named_modules():
|
174
|
+
last_module_name = name
|
175
|
+
last_module = module
|
176
|
+
layers.append(name)
|
177
|
+
else:
|
178
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=False)
|
179
|
+
if model_ckpt != 'x':
|
180
|
+
ckpt = torch.load(model_ckpt, map_location='cpu')
|
181
|
+
if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
|
182
|
+
ckpt2 = {}
|
183
|
+
for keys in ckpt['state_dict']:
|
184
|
+
print(keys)
|
185
|
+
print(ckpt['state_dict'][keys].shape)
|
186
|
+
print('---')
|
187
|
+
k2 = keys.split('model.')[1]
|
188
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
189
|
+
model.load_state_dict(ckpt2)
|
190
|
+
if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
|
191
|
+
ckpt2 = {}
|
192
|
+
for keys in ckpt['state_dict']:
|
193
|
+
print(keys)
|
194
|
+
print(ckpt['state_dict'][keys].shape)
|
195
|
+
print('---')
|
196
|
+
k2 = keys.split('model.')[1]
|
197
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
198
|
+
model.load_state_dict(ckpt2)
|
199
|
+
# Add more cases for other networks as needed
|
200
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,29 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [
|
7
|
+
"torchvision",
|
8
|
+
"torch",
|
9
|
+
"gdown",
|
10
|
+
"pytorch_lightning",
|
11
|
+
"brainscore_vision"
|
12
|
+
]
|
13
|
+
|
14
|
+
setup(
|
15
|
+
packages=find_packages(exclude=['tests']),
|
16
|
+
include_package_data=True,
|
17
|
+
install_requires=requirements,
|
18
|
+
license="MIT license",
|
19
|
+
zip_safe=False,
|
20
|
+
keywords='brain-score template',
|
21
|
+
classifiers=[
|
22
|
+
'Development Status :: 2 - Pre-Alpha',
|
23
|
+
'Intended Audience :: Developers',
|
24
|
+
'License :: OSI Approved :: MIT License',
|
25
|
+
'Natural Language :: English',
|
26
|
+
'Programming Language :: Python :: 3.7',
|
27
|
+
],
|
28
|
+
test_suite='tests',
|
29
|
+
)
|
@@ -0,0 +1,6 @@
|
|
1
|
+
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
4
|
+
from .model import get_model, get_layers
|
5
|
+
|
6
|
+
model_registry['resnet50_z_axis_iteration=3'] = lambda: ModelCommitment(identifier='resnet50_z_axis_iteration=3', activations_model=get_model('resnet50_z_axis_iteration=3'), layers=get_layers('resnet50_z_axis_iteration=3'))
|
@@ -0,0 +1,200 @@
|
|
1
|
+
|
2
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
3
|
+
import functools
|
4
|
+
import numpy as np
|
5
|
+
import torch
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from PIL import Image
|
8
|
+
from torch import nn
|
9
|
+
import pytorch_lightning as pl
|
10
|
+
import torchvision.models as models
|
11
|
+
import gdown
|
12
|
+
import glob
|
13
|
+
import os
|
14
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
15
|
+
|
16
|
+
def get_bibtex(model_identifier):
|
17
|
+
return 'VGG16'
|
18
|
+
|
19
|
+
def get_model_list():
|
20
|
+
return ['resnet50_z_axis_iteration=3']
|
21
|
+
|
22
|
+
def get_model(name):
|
23
|
+
keyword = 'z_axis'
|
24
|
+
iteration = 3
|
25
|
+
network = 'resnet50'
|
26
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/z_axis_3.ckpt'
|
27
|
+
output = 'resnet50_z_axis_iteration=3.ckpt'
|
28
|
+
gdown.download(url, output)
|
29
|
+
|
30
|
+
|
31
|
+
if keyword != 'imagenet_trained' and keyword != 'no_training':
|
32
|
+
lx_whole = [f"resnet50_z_axis_iteration=3.ckpt"]
|
33
|
+
if len(lx_whole) > 1:
|
34
|
+
lx_whole = [lx_whole[-1]]
|
35
|
+
elif keyword == 'imagenet_trained' or keyword == 'no_training':
|
36
|
+
print('keyword is imagenet')
|
37
|
+
lx_whole = ['x']
|
38
|
+
|
39
|
+
for model_ckpt in lx_whole:
|
40
|
+
print(model_ckpt)
|
41
|
+
last_module_name = None
|
42
|
+
last_module = None
|
43
|
+
layers = []
|
44
|
+
if keyword == 'imagenet_trained' and network != 'clip':
|
45
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=True)
|
46
|
+
for name, module in model.named_modules():
|
47
|
+
last_module_name = name
|
48
|
+
last_module = module
|
49
|
+
layers.append(name)
|
50
|
+
else:
|
51
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=False)
|
52
|
+
if model_ckpt != 'x':
|
53
|
+
ckpt = torch.load(model_ckpt, map_location='cpu')
|
54
|
+
if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
|
55
|
+
ckpt2 = {}
|
56
|
+
for keys in ckpt['state_dict']:
|
57
|
+
print(keys)
|
58
|
+
print(ckpt['state_dict'][keys].shape)
|
59
|
+
print('---')
|
60
|
+
k2 = keys.split('model.')[1]
|
61
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
62
|
+
model.load_state_dict(ckpt2)
|
63
|
+
if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
|
64
|
+
ckpt2 = {}
|
65
|
+
for keys in ckpt['state_dict']:
|
66
|
+
print(keys)
|
67
|
+
print(ckpt['state_dict'][keys].shape)
|
68
|
+
print('---')
|
69
|
+
k2 = keys.split('model.')[1]
|
70
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
71
|
+
model.load_state_dict(ckpt2)
|
72
|
+
# Add more cases for other networks as needed
|
73
|
+
assert name == 'resnet50_z_axis_iteration=3'
|
74
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/z_axis_3.ckpt'
|
75
|
+
output = 'resnet50_z_axis_iteration=3.ckpt'
|
76
|
+
gdown.download(url, output)
|
77
|
+
layers = []
|
78
|
+
for name, module in model._modules.items():
|
79
|
+
print(name, "->", module)
|
80
|
+
layers.append(name)
|
81
|
+
|
82
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
83
|
+
activations_model = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
84
|
+
|
85
|
+
return activations_model
|
86
|
+
|
87
|
+
def get_layers(name):
|
88
|
+
keyword = 'z_axis'
|
89
|
+
iteration = 3
|
90
|
+
network = 'resnet50'
|
91
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/z_axis_3.ckpt'
|
92
|
+
output = 'resnet50_z_axis_iteration=3.ckpt'
|
93
|
+
gdown.download(url, output)
|
94
|
+
|
95
|
+
|
96
|
+
if keyword != 'imagenet_trained' and keyword != 'no_training':
|
97
|
+
lx_whole = [f"resnet50_z_axis_iteration=3.ckpt"]
|
98
|
+
if len(lx_whole) > 1:
|
99
|
+
lx_whole = [lx_whole[-1]]
|
100
|
+
elif keyword == 'imagenet_trained' or keyword == 'no_training':
|
101
|
+
print('keyword is imagenet')
|
102
|
+
lx_whole = ['x']
|
103
|
+
|
104
|
+
|
105
|
+
for model_ckpt in lx_whole:
|
106
|
+
print(model_ckpt)
|
107
|
+
last_module_name = None
|
108
|
+
last_module = None
|
109
|
+
if keyword == 'imagenet_trained' and network != 'clip':
|
110
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=True)
|
111
|
+
for name, module in model.named_modules():
|
112
|
+
last_module_name = name
|
113
|
+
last_module = module
|
114
|
+
layers.append(name)
|
115
|
+
else:
|
116
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=False)
|
117
|
+
if model_ckpt != 'x':
|
118
|
+
ckpt = torch.load(model_ckpt, map_location='cpu')
|
119
|
+
if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
|
120
|
+
ckpt2 = {}
|
121
|
+
for keys in ckpt['state_dict']:
|
122
|
+
print(keys)
|
123
|
+
print(ckpt['state_dict'][keys].shape)
|
124
|
+
print('---')
|
125
|
+
k2 = keys.split('model.')[1]
|
126
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
127
|
+
model.load_state_dict(ckpt2)
|
128
|
+
if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
|
129
|
+
ckpt2 = {}
|
130
|
+
for keys in ckpt['state_dict']:
|
131
|
+
print(keys)
|
132
|
+
print(ckpt['state_dict'][keys].shape)
|
133
|
+
print('---')
|
134
|
+
k2 = keys.split('model.')[1]
|
135
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
136
|
+
model.load_state_dict(ckpt2)
|
137
|
+
# Add more cases for other networks as needed
|
138
|
+
layers = []
|
139
|
+
for name, module in model._modules.items():
|
140
|
+
print(name, "->", module)
|
141
|
+
layers.append(name)
|
142
|
+
return layers
|
143
|
+
|
144
|
+
if __name__ == '__main__':
|
145
|
+
device = "cpu"
|
146
|
+
global model
|
147
|
+
global keyword
|
148
|
+
global network
|
149
|
+
global iteration
|
150
|
+
keyword = 'z_axis'
|
151
|
+
iteration = 3
|
152
|
+
network = 'resnet50'
|
153
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/z_axis_3.ckpt'
|
154
|
+
output = 'resnet50_z_axis_iteration=3.ckpt'
|
155
|
+
gdown.download(url, output)
|
156
|
+
|
157
|
+
|
158
|
+
if keyword != 'imagenet_trained' and keyword != 'no_training':
|
159
|
+
lx_whole = [f"resnet50_z_axis_iteration=3.ckpt"]
|
160
|
+
if len(lx_whole) > 1:
|
161
|
+
lx_whole = [lx_whole[-1]]
|
162
|
+
elif keyword == 'imagenet_trained' or keyword == 'no_training':
|
163
|
+
print('keyword is imagenet')
|
164
|
+
lx_whole = ['x']
|
165
|
+
|
166
|
+
for model_ckpt in lx_whole:
|
167
|
+
print(model_ckpt)
|
168
|
+
last_module_name = None
|
169
|
+
last_module = None
|
170
|
+
layers = []
|
171
|
+
if keyword == 'imagenet_trained' and network != 'clip':
|
172
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=True)
|
173
|
+
for name, module in model.named_modules():
|
174
|
+
last_module_name = name
|
175
|
+
last_module = module
|
176
|
+
layers.append(name)
|
177
|
+
else:
|
178
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=False)
|
179
|
+
if model_ckpt != 'x':
|
180
|
+
ckpt = torch.load(model_ckpt, map_location='cpu')
|
181
|
+
if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
|
182
|
+
ckpt2 = {}
|
183
|
+
for keys in ckpt['state_dict']:
|
184
|
+
print(keys)
|
185
|
+
print(ckpt['state_dict'][keys].shape)
|
186
|
+
print('---')
|
187
|
+
k2 = keys.split('model.')[1]
|
188
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
189
|
+
model.load_state_dict(ckpt2)
|
190
|
+
if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
|
191
|
+
ckpt2 = {}
|
192
|
+
for keys in ckpt['state_dict']:
|
193
|
+
print(keys)
|
194
|
+
print(ckpt['state_dict'][keys].shape)
|
195
|
+
print('---')
|
196
|
+
k2 = keys.split('model.')[1]
|
197
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
198
|
+
model.load_state_dict(ckpt2)
|
199
|
+
# Add more cases for other networks as needed
|
200
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,29 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [
|
7
|
+
"torchvision",
|
8
|
+
"torch",
|
9
|
+
"gdown",
|
10
|
+
"pytorch_lightning",
|
11
|
+
"brainscore_vision"
|
12
|
+
]
|
13
|
+
|
14
|
+
setup(
|
15
|
+
packages=find_packages(exclude=['tests']),
|
16
|
+
include_package_data=True,
|
17
|
+
install_requires=requirements,
|
18
|
+
license="MIT license",
|
19
|
+
zip_safe=False,
|
20
|
+
keywords='brain-score template',
|
21
|
+
classifiers=[
|
22
|
+
'Development Status :: 2 - Pre-Alpha',
|
23
|
+
'Intended Audience :: Developers',
|
24
|
+
'License :: OSI Approved :: MIT License',
|
25
|
+
'Natural Language :: English',
|
26
|
+
'Programming Language :: Python :: 3.7',
|
27
|
+
],
|
28
|
+
test_suite='tests',
|
29
|
+
)
|
@@ -0,0 +1,6 @@
|
|
1
|
+
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
4
|
+
from .model import get_model, get_layers
|
5
|
+
|
6
|
+
model_registry['resnet50_z_axis_iteration=5'] = lambda: ModelCommitment(identifier='resnet50_z_axis_iteration=5', activations_model=get_model('resnet50_z_axis_iteration=5'), layers=get_layers('resnet50_z_axis_iteration=5'))
|