brainscore-vision 2.2.1__py3-none-any.whl → 2.2.3__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/model_helpers/brain_transformation/__init__.py +1 -2
- brainscore_vision/models/alexnet_less_variation_1/__init__.py +6 -0
- brainscore_vision/models/alexnet_less_variation_1/model.py +200 -0
- brainscore_vision/models/alexnet_less_variation_1/region_layer_map/alexnet_less_variation_iteration=1.json +6 -0
- brainscore_vision/models/alexnet_less_variation_1/setup.py +29 -0
- brainscore_vision/models/alexnet_less_variation_1/test.py +3 -0
- brainscore_vision/models/alexnet_less_variation_2/__init__.py +6 -0
- brainscore_vision/models/alexnet_less_variation_2/model.py +200 -0
- brainscore_vision/models/alexnet_less_variation_2/region_layer_map/alexnet_less_variation_iteration=2.json +6 -0
- brainscore_vision/models/alexnet_less_variation_2/setup.py +29 -0
- brainscore_vision/models/alexnet_less_variation_2/test.py +3 -0
- brainscore_vision/models/alexnet_less_variation_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_less_variation_4/model.py +200 -0
- brainscore_vision/models/alexnet_less_variation_4/region_layer_map/alexnet_less_variation_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_less_variation_4/setup.py +29 -0
- brainscore_vision/models/alexnet_less_variation_4/test.py +3 -0
- brainscore_vision/models/alexnet_no_specular_2/__init__.py +6 -0
- brainscore_vision/models/alexnet_no_specular_2/model.py +200 -0
- brainscore_vision/models/alexnet_no_specular_2/region_layer_map/alexnet_no_specular_iteration=2.json +6 -0
- brainscore_vision/models/alexnet_no_specular_2/setup.py +29 -0
- brainscore_vision/models/alexnet_no_specular_2/test.py +3 -0
- brainscore_vision/models/alexnet_no_specular_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_no_specular_4/model.py +200 -0
- brainscore_vision/models/alexnet_no_specular_4/region_layer_map/alexnet_no_specular_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_no_specular_4/setup.py +29 -0
- brainscore_vision/models/alexnet_no_specular_4/test.py +3 -0
- brainscore_vision/models/alexnet_no_variation_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_no_variation_4/model.py +200 -0
- brainscore_vision/models/alexnet_no_variation_4/region_layer_map/alexnet_no_variation_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_no_variation_4/setup.py +29 -0
- brainscore_vision/models/alexnet_no_variation_4/test.py +3 -0
- brainscore_vision/models/alexnet_original_3/__init__.py +6 -0
- brainscore_vision/models/alexnet_original_3/model.py +200 -0
- brainscore_vision/models/alexnet_original_3/region_layer_map/alexnet_original_iteration=3.json +6 -0
- brainscore_vision/models/alexnet_original_3/setup.py +29 -0
- brainscore_vision/models/alexnet_original_3/test.py +3 -0
- brainscore_vision/models/alexnet_wo_shading_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_wo_shading_4/model.py +200 -0
- brainscore_vision/models/alexnet_wo_shading_4/region_layer_map/alexnet_wo_shading_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_wo_shading_4/setup.py +29 -0
- brainscore_vision/models/alexnet_wo_shading_4/test.py +3 -0
- brainscore_vision/models/alexnet_wo_shadows_5/__init__.py +6 -0
- brainscore_vision/models/alexnet_wo_shadows_5/model.py +200 -0
- brainscore_vision/models/alexnet_wo_shadows_5/region_layer_map/alexnet_wo_shadows_iteration=5.json +6 -0
- brainscore_vision/models/alexnet_wo_shadows_5/setup.py +29 -0
- brainscore_vision/models/alexnet_wo_shadows_5/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_1/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_1/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_1/region_layer_map/alexnet_z_axis_iteration=1.json +6 -0
- brainscore_vision/models/alexnet_z_axis_1/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_1/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_2/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_2/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_2/region_layer_map/alexnet_z_axis_iteration=2.json +6 -0
- brainscore_vision/models/alexnet_z_axis_2/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_2/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_3/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_3/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_3/region_layer_map/alexnet_z_axis_iteration=3.json +6 -0
- brainscore_vision/models/alexnet_z_axis_3/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_3/test.py +3 -0
- brainscore_vision/models/alexnet_z_axis_4/__init__.py +6 -0
- brainscore_vision/models/alexnet_z_axis_4/model.py +200 -0
- brainscore_vision/models/alexnet_z_axis_4/region_layer_map/alexnet_z_axis_iteration=4.json +6 -0
- brainscore_vision/models/alexnet_z_axis_4/setup.py +29 -0
- brainscore_vision/models/alexnet_z_axis_4/test.py +3 -0
- brainscore_vision/models/artResNet18_1/__init__.py +5 -0
- brainscore_vision/models/artResNet18_1/model.py +66 -0
- brainscore_vision/models/artResNet18_1/requirements.txt +4 -0
- brainscore_vision/models/artResNet18_1/test.py +12 -0
- brainscore_vision/models/barlow_twins_custom/__init__.py +5 -0
- brainscore_vision/models/barlow_twins_custom/model.py +58 -0
- brainscore_vision/models/barlow_twins_custom/requirements.txt +4 -0
- brainscore_vision/models/barlow_twins_custom/test.py +12 -0
- brainscore_vision/models/blt-vs/__init__.py +15 -0
- brainscore_vision/models/blt-vs/model.py +962 -0
- brainscore_vision/models/blt-vs/pretrained.py +219 -0
- brainscore_vision/models/blt-vs/region_layer_map/blt_vs.json +6 -0
- brainscore_vision/models/blt-vs/setup.py +22 -0
- brainscore_vision/models/blt-vs/test.py +0 -0
- brainscore_vision/models/cifar_resnet18_1/__init__.py +5 -0
- brainscore_vision/models/cifar_resnet18_1/model.py +68 -0
- brainscore_vision/models/cifar_resnet18_1/requirements.txt +4 -0
- brainscore_vision/models/cifar_resnet18_1/test.py +10 -0
- brainscore_vision/models/resnet18_random/__init__.py +5 -0
- brainscore_vision/models/resnet18_random/archive_name.zip +0 -0
- brainscore_vision/models/resnet18_random/model.py +42 -0
- brainscore_vision/models/resnet18_random/requirements.txt +2 -0
- brainscore_vision/models/resnet18_random/test.py +12 -0
- brainscore_vision/models/resnet50_less_variation_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_1/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_1/region_layer_map/resnet50_less_variation_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_less_variation_1/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_1/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_2/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_2/region_layer_map/resnet50_less_variation_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_less_variation_2/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_2/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_3/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_3/region_layer_map/resnet50_less_variation_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_less_variation_3/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_3/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_4/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_4/region_layer_map/resnet50_less_variation_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_less_variation_4/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_4/test.py +3 -0
- brainscore_vision/models/resnet50_less_variation_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_less_variation_5/model.py +200 -0
- brainscore_vision/models/resnet50_less_variation_5/region_layer_map/resnet50_less_variation_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_less_variation_5/setup.py +29 -0
- brainscore_vision/models/resnet50_less_variation_5/test.py +3 -0
- brainscore_vision/models/resnet50_no_variation_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_no_variation_1/model.py +200 -0
- brainscore_vision/models/resnet50_no_variation_1/region_layer_map/resnet50_no_variation_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_no_variation_1/setup.py +29 -0
- brainscore_vision/models/resnet50_no_variation_1/test.py +3 -0
- brainscore_vision/models/resnet50_no_variation_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_no_variation_2/model.py +200 -0
- brainscore_vision/models/resnet50_no_variation_2/region_layer_map/resnet50_no_variation_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_no_variation_2/setup.py +29 -0
- brainscore_vision/models/resnet50_no_variation_2/test.py +3 -0
- brainscore_vision/models/resnet50_no_variation_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_no_variation_5/model.py +200 -0
- brainscore_vision/models/resnet50_no_variation_5/region_layer_map/resnet50_no_variation_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_no_variation_5/setup.py +29 -0
- brainscore_vision/models/resnet50_no_variation_5/test.py +3 -0
- brainscore_vision/models/resnet50_original_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_original_1/model.py +200 -0
- brainscore_vision/models/resnet50_original_1/region_layer_map/resnet50_original_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_original_1/setup.py +29 -0
- brainscore_vision/models/resnet50_original_1/test.py +3 -0
- brainscore_vision/models/resnet50_original_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_original_2/model.py +200 -0
- brainscore_vision/models/resnet50_original_2/region_layer_map/resnet50_original_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_original_2/setup.py +29 -0
- brainscore_vision/models/resnet50_original_2/test.py +3 -0
- brainscore_vision/models/resnet50_original_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_original_5/model.py +200 -0
- brainscore_vision/models/resnet50_original_5/region_layer_map/resnet50_original_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_original_5/setup.py +29 -0
- brainscore_vision/models/resnet50_original_5/test.py +3 -0
- brainscore_vision/models/resnet50_textures_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_1/model.py +200 -0
- brainscore_vision/models/resnet50_textures_1/region_layer_map/resnet50_textures_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_textures_1/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_1/test.py +3 -0
- brainscore_vision/models/resnet50_textures_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_2/model.py +200 -0
- brainscore_vision/models/resnet50_textures_2/region_layer_map/resnet50_textures_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_textures_2/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_2/test.py +3 -0
- brainscore_vision/models/resnet50_textures_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_3/model.py +200 -0
- brainscore_vision/models/resnet50_textures_3/region_layer_map/resnet50_textures_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_textures_3/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_3/test.py +3 -0
- brainscore_vision/models/resnet50_textures_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_4/model.py +200 -0
- brainscore_vision/models/resnet50_textures_4/region_layer_map/resnet50_textures_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_textures_4/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_4/test.py +3 -0
- brainscore_vision/models/resnet50_textures_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_textures_5/model.py +200 -0
- brainscore_vision/models/resnet50_textures_5/region_layer_map/resnet50_textures_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_textures_5/setup.py +29 -0
- brainscore_vision/models/resnet50_textures_5/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shading_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shading_1/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shading_1/region_layer_map/resnet50_wo_shading_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_wo_shading_1/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shading_1/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shading_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shading_3/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shading_3/region_layer_map/resnet50_wo_shading_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_wo_shading_3/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shading_3/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shading_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shading_4/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shading_4/region_layer_map/resnet50_wo_shading_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_wo_shading_4/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shading_4/test.py +3 -0
- brainscore_vision/models/resnet50_wo_shadows_4/__init__.py +6 -0
- brainscore_vision/models/resnet50_wo_shadows_4/model.py +200 -0
- brainscore_vision/models/resnet50_wo_shadows_4/region_layer_map/resnet50_wo_shadows_iteration=4.json +6 -0
- brainscore_vision/models/resnet50_wo_shadows_4/setup.py +29 -0
- brainscore_vision/models/resnet50_wo_shadows_4/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_1/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_1/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_1/region_layer_map/resnet50_z_axis_iteration=1.json +6 -0
- brainscore_vision/models/resnet50_z_axis_1/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_1/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_2/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_2/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_2/region_layer_map/resnet50_z_axis_iteration=2.json +6 -0
- brainscore_vision/models/resnet50_z_axis_2/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_2/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_3/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_3/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_3/region_layer_map/resnet50_z_axis_iteration=3.json +6 -0
- brainscore_vision/models/resnet50_z_axis_3/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_3/test.py +3 -0
- brainscore_vision/models/resnet50_z_axis_5/__init__.py +6 -0
- brainscore_vision/models/resnet50_z_axis_5/model.py +200 -0
- brainscore_vision/models/resnet50_z_axis_5/region_layer_map/resnet50_z_axis_iteration=5.json +6 -0
- brainscore_vision/models/resnet50_z_axis_5/setup.py +29 -0
- brainscore_vision/models/resnet50_z_axis_5/test.py +3 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/region_layer_map/yudixie_resnet18_distance_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/region_layer_map/yudixie_resnet18_translation_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/region_layer_map/yudixie_resnet18_imagenet1kpret_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/region_layer_map/yudixie_resnet18_random_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/region_layer_map/yudixie_resnet18_rotation_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/region_layer_map/yudixie_resnet18_distance_translation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/__init__.py +12 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/region_layer_map/yudixie_resnet18_distance_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/__init__.py +13 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/region_layer_map/yudixie_resnet18_translation_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/__init__.py +12 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/region_layer_map/yudixie_resnet18_distance_translation_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/region_layer_map/yudixie_resnet18_category_class_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/region_layer_map/yudixie_resnet18_object_class_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/region_layer_map/yudixie_resnet18_cat_obj_class_all_latents_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240312.json +1 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/METADATA +3 -2
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/RECORD +263 -10
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/WHEEL +1 -1
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/__init__.py +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/model.py +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/requirements.txt +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/test.py +0 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,12 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_distance_translation_rotation_0_240719'] = lambda: commit_model('yudixie_resnet18_distance_translation_rotation_0_240719')
|
12
|
+
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_category_class_0_240719'] = lambda: commit_model('yudixie_resnet18_category_class_0_240719')
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_object_class_0_240719'] = lambda: commit_model('yudixie_resnet18_object_class_0_240719')
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_cat_obj_class_all_latents_0_240719'] = lambda: commit_model('yudixie_resnet18_cat_obj_class_all_latents_0_240719')
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1 @@
|
|
1
|
+
{"V4": "layer2", "IT": "layer3", "V2": "layer2", "V1": "layer2"}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: brainscore_vision
|
3
|
-
Version: 2.2.
|
3
|
+
Version: 2.2.3
|
4
4
|
Summary: The Brain-Score library enables model comparisons to behavioral and neural experiments
|
5
5
|
License:
|
6
6
|
MIT License
|
@@ -37,12 +37,13 @@ Requires-Dist: eva-decord
|
|
37
37
|
Requires-Dist: psutil
|
38
38
|
Provides-Extra: test
|
39
39
|
Requires-Dist: pytest; extra == "test"
|
40
|
-
Requires-Dist:
|
40
|
+
Requires-Dist: pytest_check; extra == "test"
|
41
41
|
Requires-Dist: pytest-mock; extra == "test"
|
42
42
|
Requires-Dist: pytest-timeout; extra == "test"
|
43
43
|
Requires-Dist: torch; extra == "test"
|
44
44
|
Requires-Dist: torchvision; extra == "test"
|
45
45
|
Requires-Dist: matplotlib; extra == "test"
|
46
|
+
Requires-Dist: pytest-mock; extra == "test"
|
46
47
|
|
47
48
|
[](https://app.travis-ci.com/brain-score/vision)
|
48
49
|
[](https://brain-score.readthedocs.io/en/latest/?badge=latest)
|