brainscore-vision 2.2.1__py3-none-any.whl → 2.2.3__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/model_helpers/brain_transformation/__init__.py +1 -2
- brainscore_vision/models/alexnet_less_variation_1/__init__.py +6 -0
- brainscore_vision/models/alexnet_less_variation_1/model.py +200 -0
- brainscore_vision/models/alexnet_less_variation_1/region_layer_map/alexnet_less_variation_iteration=1.json +6 -0
- brainscore_vision/models/alexnet_less_variation_1/setup.py +29 -0
- brainscore_vision/models/alexnet_less_variation_1/test.py +3 -0
- brainscore_vision/models/alexnet_less_variation_2/__init__.py +6 -0
- brainscore_vision/models/alexnet_less_variation_2/model.py +200 -0
- brainscore_vision/models/alexnet_less_variation_2/region_layer_map/alexnet_less_variation_iteration=2.json +6 -0
- brainscore_vision/models/alexnet_less_variation_2/setup.py +29 -0
- brainscore_vision/models/alexnet_less_variation_2/test.py +3 -0
- brainscore_vision/models/alexnet_less_variation_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_less_variation_4/model.py +200 -0
- brainscore_vision/models/alexnet_less_variation_4/region_layer_map/alexnet_less_variation_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_less_variation_4/setup.py +29 -0
- brainscore_vision/models/alexnet_less_variation_4/test.py +3 -0
- brainscore_vision/models/alexnet_no_specular_2/__init__.py +6 -0
- brainscore_vision/models/alexnet_no_specular_2/model.py +200 -0
- brainscore_vision/models/alexnet_no_specular_2/region_layer_map/alexnet_no_specular_iteration=2.json +6 -0
- brainscore_vision/models/alexnet_no_specular_2/setup.py +29 -0
- brainscore_vision/models/alexnet_no_specular_2/test.py +3 -0
- brainscore_vision/models/alexnet_no_specular_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_no_specular_4/model.py +200 -0
- brainscore_vision/models/alexnet_no_specular_4/region_layer_map/alexnet_no_specular_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_no_specular_4/setup.py +29 -0
- brainscore_vision/models/alexnet_no_specular_4/test.py +3 -0
- brainscore_vision/models/alexnet_no_variation_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_no_variation_4/model.py +200 -0
- brainscore_vision/models/alexnet_no_variation_4/region_layer_map/alexnet_no_variation_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_no_variation_4/setup.py +29 -0
- brainscore_vision/models/alexnet_no_variation_4/test.py +3 -0
- brainscore_vision/models/alexnet_original_3/__init__.py +6 -0
- brainscore_vision/models/alexnet_original_3/model.py +200 -0
- brainscore_vision/models/alexnet_original_3/region_layer_map/alexnet_original_iteration=3.json +6 -0
- brainscore_vision/models/alexnet_original_3/setup.py +29 -0
- brainscore_vision/models/alexnet_original_3/test.py +3 -0
- brainscore_vision/models/alexnet_wo_shading_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_wo_shading_4/model.py +200 -0
- brainscore_vision/models/alexnet_wo_shading_4/region_layer_map/alexnet_wo_shading_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_wo_shading_4/setup.py +29 -0
- brainscore_vision/models/alexnet_wo_shading_4/test.py +3 -0
- brainscore_vision/models/alexnet_wo_shadows_5/__init__.py +6 -0
- brainscore_vision/models/alexnet_wo_shadows_5/model.py +200 -0
- brainscore_vision/models/alexnet_wo_shadows_5/region_layer_map/alexnet_wo_shadows_iteration=5.json +6 -0
- brainscore_vision/models/alexnet_wo_shadows_5/setup.py +29 -0
- brainscore_vision/models/alexnet_wo_shadows_5/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_1/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_1/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_1/region_layer_map/alexnet_z_axis_iteration=1.json +6 -0
- brainscore_vision/models/alexnet_z_axis_1/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_1/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_2/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_2/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_2/region_layer_map/alexnet_z_axis_iteration=2.json +6 -0
- brainscore_vision/models/alexnet_z_axis_2/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_2/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_3/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_3/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_3/region_layer_map/alexnet_z_axis_iteration=3.json +6 -0
- brainscore_vision/models/alexnet_z_axis_3/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_3/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_4/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_4/region_layer_map/alexnet_z_axis_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_z_axis_4/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_4/test.py +3 -0
- brainscore_vision/models/artResNet18_1/__init__.py +5 -0
- brainscore_vision/models/artResNet18_1/model.py +66 -0
- brainscore_vision/models/artResNet18_1/requirements.txt +4 -0
- brainscore_vision/models/artResNet18_1/test.py +12 -0
- brainscore_vision/models/barlow_twins_custom/__init__.py +5 -0
- brainscore_vision/models/barlow_twins_custom/model.py +58 -0
- brainscore_vision/models/barlow_twins_custom/requirements.txt +4 -0
- brainscore_vision/models/barlow_twins_custom/test.py +12 -0
- brainscore_vision/models/blt-vs/__init__.py +15 -0
- brainscore_vision/models/blt-vs/model.py +962 -0
- brainscore_vision/models/blt-vs/pretrained.py +219 -0
- brainscore_vision/models/blt-vs/region_layer_map/blt_vs.json +6 -0
- brainscore_vision/models/blt-vs/setup.py +22 -0
- brainscore_vision/models/blt-vs/test.py +0 -0
- brainscore_vision/models/cifar_resnet18_1/__init__.py +5 -0
- brainscore_vision/models/cifar_resnet18_1/model.py +68 -0
- brainscore_vision/models/cifar_resnet18_1/requirements.txt +4 -0
- brainscore_vision/models/cifar_resnet18_1/test.py +10 -0
- brainscore_vision/models/resnet18_random/__init__.py +5 -0
- brainscore_vision/models/resnet18_random/archive_name.zip +0 -0
- brainscore_vision/models/resnet18_random/model.py +42 -0
- brainscore_vision/models/resnet18_random/requirements.txt +2 -0
- brainscore_vision/models/resnet18_random/test.py +12 -0
- brainscore_vision/models/resnet50_less_variation_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_1/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_1/region_layer_map/resnet50_less_variation_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_less_variation_1/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_1/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_2/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_2/region_layer_map/resnet50_less_variation_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_less_variation_2/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_2/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_3/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_3/region_layer_map/resnet50_less_variation_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_less_variation_3/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_3/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_4/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_4/region_layer_map/resnet50_less_variation_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_less_variation_4/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_4/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_5/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_5/region_layer_map/resnet50_less_variation_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_less_variation_5/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_5/test.py +3 -0
- brainscore_vision/models/resnet50_no_variation_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_no_variation_1/model.py +200 -0
- brainscore_vision/models/resnet50_no_variation_1/region_layer_map/resnet50_no_variation_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_no_variation_1/setup.py +29 -0
- brainscore_vision/models/resnet50_no_variation_1/test.py +3 -0
- brainscore_vision/models/resnet50_no_variation_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_no_variation_2/model.py +200 -0
- brainscore_vision/models/resnet50_no_variation_2/region_layer_map/resnet50_no_variation_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_no_variation_2/setup.py +29 -0
- brainscore_vision/models/resnet50_no_variation_2/test.py +3 -0
- brainscore_vision/models/resnet50_no_variation_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_no_variation_5/model.py +200 -0
- brainscore_vision/models/resnet50_no_variation_5/region_layer_map/resnet50_no_variation_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_no_variation_5/setup.py +29 -0
- brainscore_vision/models/resnet50_no_variation_5/test.py +3 -0
- brainscore_vision/models/resnet50_original_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_original_1/model.py +200 -0
- brainscore_vision/models/resnet50_original_1/region_layer_map/resnet50_original_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_original_1/setup.py +29 -0
- brainscore_vision/models/resnet50_original_1/test.py +3 -0
- brainscore_vision/models/resnet50_original_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_original_2/model.py +200 -0
- brainscore_vision/models/resnet50_original_2/region_layer_map/resnet50_original_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_original_2/setup.py +29 -0
- brainscore_vision/models/resnet50_original_2/test.py +3 -0
- brainscore_vision/models/resnet50_original_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_original_5/model.py +200 -0
- brainscore_vision/models/resnet50_original_5/region_layer_map/resnet50_original_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_original_5/setup.py +29 -0
- brainscore_vision/models/resnet50_original_5/test.py +3 -0
- brainscore_vision/models/resnet50_textures_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_1/model.py +200 -0
- brainscore_vision/models/resnet50_textures_1/region_layer_map/resnet50_textures_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_textures_1/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_1/test.py +3 -0
- brainscore_vision/models/resnet50_textures_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_2/model.py +200 -0
- brainscore_vision/models/resnet50_textures_2/region_layer_map/resnet50_textures_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_textures_2/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_2/test.py +3 -0
- brainscore_vision/models/resnet50_textures_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_3/model.py +200 -0
- brainscore_vision/models/resnet50_textures_3/region_layer_map/resnet50_textures_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_textures_3/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_3/test.py +3 -0
- brainscore_vision/models/resnet50_textures_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_4/model.py +200 -0
- brainscore_vision/models/resnet50_textures_4/region_layer_map/resnet50_textures_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_textures_4/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_4/test.py +3 -0
- brainscore_vision/models/resnet50_textures_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_5/model.py +200 -0
- brainscore_vision/models/resnet50_textures_5/region_layer_map/resnet50_textures_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_textures_5/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_5/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shading_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shading_1/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shading_1/region_layer_map/resnet50_wo_shading_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_wo_shading_1/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shading_1/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shading_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shading_3/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shading_3/region_layer_map/resnet50_wo_shading_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_wo_shading_3/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shading_3/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shading_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shading_4/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shading_4/region_layer_map/resnet50_wo_shading_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_wo_shading_4/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shading_4/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shadows_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shadows_4/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shadows_4/region_layer_map/resnet50_wo_shadows_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_wo_shadows_4/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shadows_4/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_1/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_1/region_layer_map/resnet50_z_axis_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_z_axis_1/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_1/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_2/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_2/region_layer_map/resnet50_z_axis_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_z_axis_2/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_2/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_3/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_3/region_layer_map/resnet50_z_axis_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_z_axis_3/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_3/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_5/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_5/region_layer_map/resnet50_z_axis_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_z_axis_5/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_5/test.py +3 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/region_layer_map/yudixie_resnet18_distance_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/region_layer_map/yudixie_resnet18_translation_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/region_layer_map/yudixie_resnet18_imagenet1kpret_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/region_layer_map/yudixie_resnet18_random_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/region_layer_map/yudixie_resnet18_rotation_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/region_layer_map/yudixie_resnet18_distance_translation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/__init__.py +12 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/region_layer_map/yudixie_resnet18_distance_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/__init__.py +13 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/region_layer_map/yudixie_resnet18_translation_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/__init__.py +12 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/region_layer_map/yudixie_resnet18_distance_translation_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/region_layer_map/yudixie_resnet18_category_class_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/region_layer_map/yudixie_resnet18_object_class_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/region_layer_map/yudixie_resnet18_cat_obj_class_all_latents_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240312.json +1 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/METADATA +3 -2
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/RECORD +263 -10
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/WHEEL +1 -1
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/__init__.py +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/model.py +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/requirements.txt +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/test.py +0 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,219 @@
|
|
1
|
+
|
2
|
+
import hashlib
|
3
|
+
import requests
|
4
|
+
from pathlib import Path
|
5
|
+
import zipfile
|
6
|
+
from collections import OrderedDict
|
7
|
+
from warnings import warn
|
8
|
+
import json
|
9
|
+
import torch
|
10
|
+
import zipfile
|
11
|
+
_MODELS = {}
|
12
|
+
_ALIASES = {}
|
13
|
+
|
14
|
+
|
15
|
+
def get_file(fname, origin, file_hash=None, cache_dir=".cache", cache_subdir="datasets", extract=True):
|
16
|
+
"""
|
17
|
+
Download a file from a URL, cache it locally, and optionally verify its hash and extract it.
|
18
|
+
|
19
|
+
Args:
|
20
|
+
fname (str): The name of the file to save locally.
|
21
|
+
origin (str): The URL to download the file from.
|
22
|
+
file_hash (str, optional): The expected hash of the file to verify integrity. Defaults to None.
|
23
|
+
cache_dir (str): The root cache directory. Defaults to ".cache".
|
24
|
+
cache_subdir (str): The subdirectory within the cache directory. Defaults to "datasets".
|
25
|
+
extract (bool): Whether to extract the file if it's a ZIP archive. Defaults to False.
|
26
|
+
|
27
|
+
Returns:
|
28
|
+
str: The path to the cached (and optionally extracted) file.
|
29
|
+
"""
|
30
|
+
cache_path = Path(cache_dir) / cache_subdir
|
31
|
+
cache_path.mkdir(parents=True, exist_ok=True)
|
32
|
+
|
33
|
+
file_path = cache_path / fname
|
34
|
+
|
35
|
+
if not file_path.exists():
|
36
|
+
print(f"Downloading {origin} to {file_path}...")
|
37
|
+
response = requests.get(origin, stream=True)
|
38
|
+
response.raise_for_status()
|
39
|
+
with open(file_path, "wb") as f:
|
40
|
+
for chunk in response.iter_content(chunk_size=8192):
|
41
|
+
f.write(chunk)
|
42
|
+
print(f"Download complete: {file_path}")
|
43
|
+
|
44
|
+
if file_hash:
|
45
|
+
print("Verifying file hash...")
|
46
|
+
sha256 = hashlib.sha256()
|
47
|
+
with open(file_path, "rb") as f:
|
48
|
+
for chunk in iter(lambda: f.read(4096), b""):
|
49
|
+
sha256.update(chunk)
|
50
|
+
downloaded_file_hash = sha256.hexdigest()
|
51
|
+
if downloaded_file_hash != file_hash:
|
52
|
+
raise ValueError(f"File hash does not match! Expected {file_hash}, got {downloaded_file_hash}")
|
53
|
+
print("File hash verified.")
|
54
|
+
|
55
|
+
if extract and zipfile.is_zipfile(file_path):
|
56
|
+
extract_path = cache_path
|
57
|
+
json_file = extract_path / f"{fname.replace('.zip', '')}.json"
|
58
|
+
weight_file = extract_path / f"{fname.replace('.zip', '')}.pth"
|
59
|
+
if not json_file.exists() and not weight_file.exists():
|
60
|
+
print(f"Extracting {file_path} to {extract_path}")
|
61
|
+
with zipfile.ZipFile(file_path, "r") as zip_ref:
|
62
|
+
zip_ref.extractall(extract_path)
|
63
|
+
print(f"Extraction complete: {extract_path}")
|
64
|
+
|
65
|
+
return str(extract_path)
|
66
|
+
|
67
|
+
return str(file_path)
|
68
|
+
|
69
|
+
|
70
|
+
def clear_models_and_aliases(*cls):
|
71
|
+
if len(cls) == 0:
|
72
|
+
_MODELS.clear()
|
73
|
+
_ALIASES.clear()
|
74
|
+
else:
|
75
|
+
for c in cls:
|
76
|
+
if c in _MODELS:
|
77
|
+
del _MODELS[c]
|
78
|
+
if c in _ALIASES:
|
79
|
+
del _ALIASES[c]
|
80
|
+
|
81
|
+
def register_model(cls, key, url, hash):
|
82
|
+
# key must be a valid file/folder name in the file system
|
83
|
+
models = _MODELS.setdefault(cls, OrderedDict())
|
84
|
+
key not in models or warn(
|
85
|
+
"re-registering model '{}' (was already registered for '{}')".format(
|
86
|
+
key, cls.__name__
|
87
|
+
)
|
88
|
+
)
|
89
|
+
models[key] = dict(url=url, hash=hash)
|
90
|
+
|
91
|
+
|
92
|
+
def register_aliases(cls, key, *names):
|
93
|
+
# aliases can be arbitrary strings
|
94
|
+
if len(names) == 0:
|
95
|
+
return
|
96
|
+
models = _MODELS.get(cls, {})
|
97
|
+
key in models or ValueError(f"model '{key}' is not registered for '{cls.__name__}'")
|
98
|
+
|
99
|
+
aliases = _ALIASES.setdefault(cls, OrderedDict())
|
100
|
+
for name in names:
|
101
|
+
aliases.get(name, key) == key or warn(
|
102
|
+
"alias '{}' was previously registered with model '{}' for '{}'".format(
|
103
|
+
name, aliases[name], cls.__name__
|
104
|
+
)
|
105
|
+
)
|
106
|
+
aliases[name] = key
|
107
|
+
|
108
|
+
|
109
|
+
def get_registered_models(cls, return_aliases=True, verbose=False):
|
110
|
+
models = _MODELS.get(cls, {})
|
111
|
+
aliases = _ALIASES.get(cls, {})
|
112
|
+
model_keys = tuple(models.keys())
|
113
|
+
model_aliases = {
|
114
|
+
key: tuple(name for name in aliases if aliases[name] == key) for key in models
|
115
|
+
}
|
116
|
+
if verbose:
|
117
|
+
# this code is very messy and should be refactored...
|
118
|
+
_n = len(models)
|
119
|
+
_str_model = "model" if _n == 1 else "models"
|
120
|
+
_str_is_are = "is" if _n == 1 else "are"
|
121
|
+
_str_colon = ":" if _n > 0 else ""
|
122
|
+
print(
|
123
|
+
"There {is_are} {n} registered {model_s} for '{clazz}'{c}".format(
|
124
|
+
n=_n,
|
125
|
+
clazz=cls.__name__,
|
126
|
+
is_are=_str_is_are,
|
127
|
+
model_s=_str_model,
|
128
|
+
c=_str_colon,
|
129
|
+
)
|
130
|
+
)
|
131
|
+
if _n > 0:
|
132
|
+
print()
|
133
|
+
_maxkeylen = 2 + max(len(key) for key in models)
|
134
|
+
print("Name{s}Alias(es)".format(s=" " * (_maxkeylen - 4 + 3)))
|
135
|
+
print("────{s}─────────".format(s=" " * (_maxkeylen - 4 + 3)))
|
136
|
+
for key in models:
|
137
|
+
_aliases = " "
|
138
|
+
_m = len(model_aliases[key])
|
139
|
+
if _m > 0:
|
140
|
+
_aliases += "'%s'" % "', '".join(model_aliases[key])
|
141
|
+
else:
|
142
|
+
_aliases += "None"
|
143
|
+
_key = ("{s:%d}" % _maxkeylen).format(s="'%s'" % key)
|
144
|
+
print(f"{_key}{_aliases}")
|
145
|
+
return (model_keys, model_aliases) if return_aliases else model_keys
|
146
|
+
|
147
|
+
|
148
|
+
def get_model_details(cls, key_or_alias, verbose=True):
|
149
|
+
models = _MODELS.get(cls, {})
|
150
|
+
|
151
|
+
if key_or_alias in models:
|
152
|
+
key = key_or_alias
|
153
|
+
alias = None
|
154
|
+
else:
|
155
|
+
aliases = _ALIASES.get(cls, {})
|
156
|
+
alias = key_or_alias
|
157
|
+
alias in aliases or ValueError(f"'{alias}' is neither a key or alias for '{cls.__name__}'")
|
158
|
+
key = aliases[alias]
|
159
|
+
if verbose:
|
160
|
+
print(
|
161
|
+
"Found model '{model}'{alias_str} for '{clazz}'.".format(
|
162
|
+
model=key,
|
163
|
+
clazz=cls.__name__,
|
164
|
+
alias_str=("" if alias is None else " with alias '%s'" % alias),
|
165
|
+
)
|
166
|
+
)
|
167
|
+
return key, alias, models[key]
|
168
|
+
|
169
|
+
|
170
|
+
|
171
|
+
def get_model_folder(cls, key_or_alias):
|
172
|
+
key, alias, m = get_model_details(cls, key_or_alias)
|
173
|
+
target = Path("models") / cls.__name__ / key
|
174
|
+
path = Path(
|
175
|
+
get_file(
|
176
|
+
fname=key + ".zip",
|
177
|
+
origin=m["url"],
|
178
|
+
file_hash=m["hash"],
|
179
|
+
cache_subdir=target,
|
180
|
+
extract=True,
|
181
|
+
)
|
182
|
+
)
|
183
|
+
|
184
|
+
assert path.exists() and path.parent.exists()
|
185
|
+
return path.parent
|
186
|
+
|
187
|
+
|
188
|
+
|
189
|
+
def get_model_instance(cls, key_or_alias):
|
190
|
+
path = get_model_folder(cls, key_or_alias)
|
191
|
+
json_file = path /key_or_alias /f"{key_or_alias}.json"
|
192
|
+
weight_file = path / key_or_alias/f"{key_or_alias}.pth"
|
193
|
+
|
194
|
+
if not json_file or not weight_file:
|
195
|
+
raise FileNotFoundError("Required .json or .pth file not found in the model folder.")
|
196
|
+
|
197
|
+
with open(json_file, "r") as f:
|
198
|
+
config = json.load(f)
|
199
|
+
|
200
|
+
timesteps = config.get("timesteps", 1)
|
201
|
+
hook_type = config.get("hook_type", None)
|
202
|
+
bio_unroll = config.get("bio_unroll", False)
|
203
|
+
num_classes = config.get("num_classes", 1)
|
204
|
+
|
205
|
+
model = cls(timesteps=timesteps, hook_type=hook_type, bio_unroll=bio_unroll, num_classes=num_classes)
|
206
|
+
|
207
|
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
208
|
+
state_dict = torch.load(weight_file, map_location=device)
|
209
|
+
|
210
|
+
filtered_state_dict = {
|
211
|
+
k: v if not (isinstance(v, torch.Tensor) and v.dtype != torch.float64) else v.float()
|
212
|
+
for k, v in state_dict.items()
|
213
|
+
if not any(x in k for x in ["total_ops", "total_params"])
|
214
|
+
}
|
215
|
+
|
216
|
+
model.load_state_dict(filtered_state_dict)
|
217
|
+
|
218
|
+
|
219
|
+
return model
|
@@ -0,0 +1,22 @@
|
|
1
|
+
from setuptools import setup, find_packages
|
2
|
+
|
3
|
+
requirements = [ "torchvision",
|
4
|
+
"torch"
|
5
|
+
]
|
6
|
+
|
7
|
+
setup(
|
8
|
+
packages=find_packages(exclude=['tests']),
|
9
|
+
include_package_data=True,
|
10
|
+
install_requires=requirements,
|
11
|
+
license="MIT license",
|
12
|
+
zip_safe=False,
|
13
|
+
keywords='brain-score template',
|
14
|
+
classifiers=[
|
15
|
+
'Development Status :: 2 - Pre-Alpha',
|
16
|
+
'Intended Audience :: Developers',
|
17
|
+
'License :: OSI Approved :: MIT License',
|
18
|
+
'Natural Language :: English',
|
19
|
+
'Programming Language :: Python :: 3.7',
|
20
|
+
],
|
21
|
+
test_suite='tests',
|
22
|
+
)
|
File without changes
|
@@ -0,0 +1,68 @@
|
|
1
|
+
import torch
|
2
|
+
from pathlib import Path
|
3
|
+
from torchvision.models import resnet18
|
4
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
5
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
7
|
+
from collections import OrderedDict
|
8
|
+
from urllib.request import urlretrieve
|
9
|
+
import functools
|
10
|
+
import os
|
11
|
+
|
12
|
+
|
13
|
+
# Custom model loader
|
14
|
+
def get_model(name):
|
15
|
+
assert name == 'cifar_resnet18_1'
|
16
|
+
url = " https://www.dropbox.com/scl/fi/maqzcf3j87m7tp4sm1pab/barlow-cifar10-otu5cw89-ep-999.ckpt?rlkey=ou425fqbxxy6pe9lc4mz400mp&st=va93bqox&dl=1"
|
17
|
+
fh, _ = urlretrieve(url)
|
18
|
+
print(f"Downloaded weights file: {fh}, Size: {os.path.getsize(fh)} bytes")
|
19
|
+
|
20
|
+
checkpoint = torch.load(fh, map_location="cpu")
|
21
|
+
state_dict = checkpoint['state_dict'] # Adjust key if necessary
|
22
|
+
# Filter out projector layers
|
23
|
+
backbone_state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items() if not k.startswith("projector.")}
|
24
|
+
# Initialize ResNet18 backbone
|
25
|
+
|
26
|
+
model = resnet18(pretrained=False)
|
27
|
+
model.conv1 = torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
|
28
|
+
# print("First conv layer weights AFTER loading:")
|
29
|
+
# print(model.conv1.weight[0, 0, 0])
|
30
|
+
model.load_state_dict(backbone_state_dict, strict=False)
|
31
|
+
# print(f"Missing keys: {missing_keys}")
|
32
|
+
# print(f"Unexpected keys: {unexpected_keys}")
|
33
|
+
# print("First conv layer weights AFTER loading:")
|
34
|
+
# print(model.conv1.weight[0, 0, 0])
|
35
|
+
# print(model)
|
36
|
+
|
37
|
+
|
38
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
39
|
+
|
40
|
+
activations_model = PytorchWrapper(identifier='cifar_resnet18_1', model=model, preprocessing=preprocessing)
|
41
|
+
|
42
|
+
|
43
|
+
return ModelCommitment(
|
44
|
+
identifier='cifar_resnet18_1',
|
45
|
+
activations_model=activations_model,
|
46
|
+
layers=['layer1', 'layer2', 'layer3', 'layer4', 'avgpool']
|
47
|
+
)
|
48
|
+
|
49
|
+
def get_model_list():
|
50
|
+
return ['cifar_resnet18_1']
|
51
|
+
|
52
|
+
# Specify layers to test
|
53
|
+
def get_layers(name):
|
54
|
+
assert name == 'cifar_resnet18_1'
|
55
|
+
return ['layer1', 'layer2', 'layer3', 'layer4', 'avgpool']
|
56
|
+
|
57
|
+
def get_bibtex(model_identifier):
|
58
|
+
return """
|
59
|
+
@misc{resnet18_test_consistency,
|
60
|
+
title={ArtResNet18 Barlow Twins},
|
61
|
+
author={Claudia Noche},
|
62
|
+
year={2024},
|
63
|
+
}
|
64
|
+
"""
|
65
|
+
|
66
|
+
if __name__ == '__main__':
|
67
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
68
|
+
check_models.check_base_models(__name__)
|
Binary file
|
@@ -0,0 +1,42 @@
|
|
1
|
+
import torch
|
2
|
+
from torchvision.models import resnet18
|
3
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
4
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
5
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
6
|
+
import functools
|
7
|
+
|
8
|
+
# Define preprocessing (resize to 224x224 as required by ResNet)
|
9
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
10
|
+
|
11
|
+
# Define ResNet18 with random weights
|
12
|
+
def get_model(name):
|
13
|
+
assert name == 'resnet18_random'
|
14
|
+
# Load ResNet18 without pre-trained weights
|
15
|
+
model = resnet18(pretrained=False)
|
16
|
+
# Wrap the model with Brain-Score's PytorchWrapper
|
17
|
+
activations_model = PytorchWrapper(identifier='resnet18_random', model=model, preprocessing=preprocessing)
|
18
|
+
return ModelCommitment(
|
19
|
+
identifier='resnet18_random',
|
20
|
+
activations_model=activations_model,
|
21
|
+
# Specify layers for evaluation
|
22
|
+
layers=['layer1', 'layer2', 'layer3', 'layer4', 'avgpool']
|
23
|
+
)
|
24
|
+
|
25
|
+
# Specify layers to test
|
26
|
+
def get_layers(name):
|
27
|
+
assert name == 'resnet18_random'
|
28
|
+
return ['layer1', 'layer2', 'layer3', 'layer4', 'avgpool']
|
29
|
+
|
30
|
+
# Optional: Provide a BibTeX reference for the model
|
31
|
+
def get_bibtex(model_identifier):
|
32
|
+
return """
|
33
|
+
@misc{resnet18_test_consistency,
|
34
|
+
title={ResNet18 with Random Weights},
|
35
|
+
author={Clear Glue},
|
36
|
+
year={2024},
|
37
|
+
}
|
38
|
+
"""
|
39
|
+
|
40
|
+
if __name__ == '__main__':
|
41
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
42
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,12 @@
|
|
1
|
+
import pytest
|
2
|
+
import brainscore_vision
|
3
|
+
|
4
|
+
@pytest.mark.travis_slow
|
5
|
+
def test_resnet18_random():
|
6
|
+
model = brainscore_vision.load_model('resnet18_random')
|
7
|
+
assert model.identifier == 'resnet18_random'
|
8
|
+
|
9
|
+
|
10
|
+
|
11
|
+
# AssertionError: No registrations found for resnet18_random
|
12
|
+
# ⚡ master ~/vision python -m brainscore_vision score --model_identifier='resnet50_tutorial' --benchmark_identifier='MajajHong2015public.IT-pls'
|
@@ -0,0 +1,6 @@
|
|
1
|
+
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
4
|
+
from .model import get_model, get_layers
|
5
|
+
|
6
|
+
model_registry['resnet50_less_variation_iteration=1'] = lambda: ModelCommitment(identifier='resnet50_less_variation_iteration=1', activations_model=get_model('resnet50_less_variation_iteration=1'), layers=get_layers('resnet50_less_variation_iteration=1'))
|
@@ -0,0 +1,200 @@
|
|
1
|
+
|
2
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
3
|
+
import functools
|
4
|
+
import numpy as np
|
5
|
+
import torch
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from PIL import Image
|
8
|
+
from torch import nn
|
9
|
+
import pytorch_lightning as pl
|
10
|
+
import torchvision.models as models
|
11
|
+
import gdown
|
12
|
+
import glob
|
13
|
+
import os
|
14
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
15
|
+
|
16
|
+
def get_bibtex(model_identifier):
|
17
|
+
return 'VGG16'
|
18
|
+
|
19
|
+
def get_model_list():
|
20
|
+
return ['resnet50_less_variation_iteration=1']
|
21
|
+
|
22
|
+
def get_model(name):
|
23
|
+
keyword = 'less_variation'
|
24
|
+
iteration = 1
|
25
|
+
network = 'resnet50'
|
26
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_1.ckpt'
|
27
|
+
output = 'resnet50_less_variation_iteration=1.ckpt'
|
28
|
+
gdown.download(url, output)
|
29
|
+
|
30
|
+
|
31
|
+
if keyword != 'imagenet_trained' and keyword != 'no_training':
|
32
|
+
lx_whole = [f"resnet50_less_variation_iteration=1.ckpt"]
|
33
|
+
if len(lx_whole) > 1:
|
34
|
+
lx_whole = [lx_whole[-1]]
|
35
|
+
elif keyword == 'imagenet_trained' or keyword == 'no_training':
|
36
|
+
print('keyword is imagenet')
|
37
|
+
lx_whole = ['x']
|
38
|
+
|
39
|
+
for model_ckpt in lx_whole:
|
40
|
+
print(model_ckpt)
|
41
|
+
last_module_name = None
|
42
|
+
last_module = None
|
43
|
+
layers = []
|
44
|
+
if keyword == 'imagenet_trained' and network != 'clip':
|
45
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=True)
|
46
|
+
for name, module in model.named_modules():
|
47
|
+
last_module_name = name
|
48
|
+
last_module = module
|
49
|
+
layers.append(name)
|
50
|
+
else:
|
51
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=False)
|
52
|
+
if model_ckpt != 'x':
|
53
|
+
ckpt = torch.load(model_ckpt, map_location='cpu')
|
54
|
+
if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
|
55
|
+
ckpt2 = {}
|
56
|
+
for keys in ckpt['state_dict']:
|
57
|
+
print(keys)
|
58
|
+
print(ckpt['state_dict'][keys].shape)
|
59
|
+
print('---')
|
60
|
+
k2 = keys.split('model.')[1]
|
61
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
62
|
+
model.load_state_dict(ckpt2)
|
63
|
+
if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
|
64
|
+
ckpt2 = {}
|
65
|
+
for keys in ckpt['state_dict']:
|
66
|
+
print(keys)
|
67
|
+
print(ckpt['state_dict'][keys].shape)
|
68
|
+
print('---')
|
69
|
+
k2 = keys.split('model.')[1]
|
70
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
71
|
+
model.load_state_dict(ckpt2)
|
72
|
+
# Add more cases for other networks as needed
|
73
|
+
assert name == 'resnet50_less_variation_iteration=1'
|
74
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_1.ckpt'
|
75
|
+
output = 'resnet50_less_variation_iteration=1.ckpt'
|
76
|
+
gdown.download(url, output)
|
77
|
+
layers = []
|
78
|
+
for name, module in model._modules.items():
|
79
|
+
print(name, "->", module)
|
80
|
+
layers.append(name)
|
81
|
+
|
82
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
83
|
+
activations_model = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
84
|
+
|
85
|
+
return activations_model
|
86
|
+
|
87
|
+
def get_layers(name):
|
88
|
+
keyword = 'less_variation'
|
89
|
+
iteration = 1
|
90
|
+
network = 'resnet50'
|
91
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_1.ckpt'
|
92
|
+
output = 'resnet50_less_variation_iteration=1.ckpt'
|
93
|
+
gdown.download(url, output)
|
94
|
+
|
95
|
+
|
96
|
+
if keyword != 'imagenet_trained' and keyword != 'no_training':
|
97
|
+
lx_whole = [f"resnet50_less_variation_iteration=1.ckpt"]
|
98
|
+
if len(lx_whole) > 1:
|
99
|
+
lx_whole = [lx_whole[-1]]
|
100
|
+
elif keyword == 'imagenet_trained' or keyword == 'no_training':
|
101
|
+
print('keyword is imagenet')
|
102
|
+
lx_whole = ['x']
|
103
|
+
|
104
|
+
|
105
|
+
for model_ckpt in lx_whole:
|
106
|
+
print(model_ckpt)
|
107
|
+
last_module_name = None
|
108
|
+
last_module = None
|
109
|
+
if keyword == 'imagenet_trained' and network != 'clip':
|
110
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=True)
|
111
|
+
for name, module in model.named_modules():
|
112
|
+
last_module_name = name
|
113
|
+
last_module = module
|
114
|
+
layers.append(name)
|
115
|
+
else:
|
116
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=False)
|
117
|
+
if model_ckpt != 'x':
|
118
|
+
ckpt = torch.load(model_ckpt, map_location='cpu')
|
119
|
+
if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
|
120
|
+
ckpt2 = {}
|
121
|
+
for keys in ckpt['state_dict']:
|
122
|
+
print(keys)
|
123
|
+
print(ckpt['state_dict'][keys].shape)
|
124
|
+
print('---')
|
125
|
+
k2 = keys.split('model.')[1]
|
126
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
127
|
+
model.load_state_dict(ckpt2)
|
128
|
+
if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
|
129
|
+
ckpt2 = {}
|
130
|
+
for keys in ckpt['state_dict']:
|
131
|
+
print(keys)
|
132
|
+
print(ckpt['state_dict'][keys].shape)
|
133
|
+
print('---')
|
134
|
+
k2 = keys.split('model.')[1]
|
135
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
136
|
+
model.load_state_dict(ckpt2)
|
137
|
+
# Add more cases for other networks as needed
|
138
|
+
layers = []
|
139
|
+
for name, module in model._modules.items():
|
140
|
+
print(name, "->", module)
|
141
|
+
layers.append(name)
|
142
|
+
return layers
|
143
|
+
|
144
|
+
if __name__ == '__main__':
|
145
|
+
device = "cpu"
|
146
|
+
global model
|
147
|
+
global keyword
|
148
|
+
global network
|
149
|
+
global iteration
|
150
|
+
keyword = 'less_variation'
|
151
|
+
iteration = 1
|
152
|
+
network = 'resnet50'
|
153
|
+
url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_1.ckpt'
|
154
|
+
output = 'resnet50_less_variation_iteration=1.ckpt'
|
155
|
+
gdown.download(url, output)
|
156
|
+
|
157
|
+
|
158
|
+
if keyword != 'imagenet_trained' and keyword != 'no_training':
|
159
|
+
lx_whole = [f"resnet50_less_variation_iteration=1.ckpt"]
|
160
|
+
if len(lx_whole) > 1:
|
161
|
+
lx_whole = [lx_whole[-1]]
|
162
|
+
elif keyword == 'imagenet_trained' or keyword == 'no_training':
|
163
|
+
print('keyword is imagenet')
|
164
|
+
lx_whole = ['x']
|
165
|
+
|
166
|
+
for model_ckpt in lx_whole:
|
167
|
+
print(model_ckpt)
|
168
|
+
last_module_name = None
|
169
|
+
last_module = None
|
170
|
+
layers = []
|
171
|
+
if keyword == 'imagenet_trained' and network != 'clip':
|
172
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=True)
|
173
|
+
for name, module in model.named_modules():
|
174
|
+
last_module_name = name
|
175
|
+
last_module = module
|
176
|
+
layers.append(name)
|
177
|
+
else:
|
178
|
+
model = torch.hub.load('pytorch/vision', network, pretrained=False)
|
179
|
+
if model_ckpt != 'x':
|
180
|
+
ckpt = torch.load(model_ckpt, map_location='cpu')
|
181
|
+
if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
|
182
|
+
ckpt2 = {}
|
183
|
+
for keys in ckpt['state_dict']:
|
184
|
+
print(keys)
|
185
|
+
print(ckpt['state_dict'][keys].shape)
|
186
|
+
print('---')
|
187
|
+
k2 = keys.split('model.')[1]
|
188
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
189
|
+
model.load_state_dict(ckpt2)
|
190
|
+
if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
|
191
|
+
ckpt2 = {}
|
192
|
+
for keys in ckpt['state_dict']:
|
193
|
+
print(keys)
|
194
|
+
print(ckpt['state_dict'][keys].shape)
|
195
|
+
print('---')
|
196
|
+
k2 = keys.split('model.')[1]
|
197
|
+
ckpt2[k2] = ckpt['state_dict'][keys]
|
198
|
+
model.load_state_dict(ckpt2)
|
199
|
+
# Add more cases for other networks as needed
|
200
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,29 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [
|
7
|
+
"torchvision",
|
8
|
+
"torch",
|
9
|
+
"gdown",
|
10
|
+
"pytorch_lightning",
|
11
|
+
"brainscore_vision"
|
12
|
+
]
|
13
|
+
|
14
|
+
setup(
|
15
|
+
packages=find_packages(exclude=['tests']),
|
16
|
+
include_package_data=True,
|
17
|
+
install_requires=requirements,
|
18
|
+
license="MIT license",
|
19
|
+
zip_safe=False,
|
20
|
+
keywords='brain-score template',
|
21
|
+
classifiers=[
|
22
|
+
'Development Status :: 2 - Pre-Alpha',
|
23
|
+
'Intended Audience :: Developers',
|
24
|
+
'License :: OSI Approved :: MIT License',
|
25
|
+
'Natural Language :: English',
|
26
|
+
'Programming Language :: Python :: 3.7',
|
27
|
+
],
|
28
|
+
test_suite='tests',
|
29
|
+
)
|
@@ -0,0 +1,6 @@
|
|
1
|
+
|
2
|
+
from brainscore_vision import model_registry
|
3
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
4
|
+
from .model import get_model, get_layers
|
5
|
+
|
6
|
+
model_registry['resnet50_less_variation_iteration=2'] = lambda: ModelCommitment(identifier='resnet50_less_variation_iteration=2', activations_model=get_model('resnet50_less_variation_iteration=2'), layers=get_layers('resnet50_less_variation_iteration=2'))
|