brainscore-vision 2.2.1__py3-none-any.whl → 2.2.3__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (263) hide show
  1. brainscore_vision/model_helpers/brain_transformation/__init__.py +1 -2
  2. brainscore_vision/models/alexnet_less_variation_1/__init__.py +6 -0
  3. brainscore_vision/models/alexnet_less_variation_1/model.py +200 -0
  4. brainscore_vision/models/alexnet_less_variation_1/region_layer_map/alexnet_less_variation_iteration=1.json +6 -0
  5. brainscore_vision/models/alexnet_less_variation_1/setup.py +29 -0
  6. brainscore_vision/models/alexnet_less_variation_1/test.py +3 -0
  7. brainscore_vision/models/alexnet_less_variation_2/__init__.py +6 -0
  8. brainscore_vision/models/alexnet_less_variation_2/model.py +200 -0
  9. brainscore_vision/models/alexnet_less_variation_2/region_layer_map/alexnet_less_variation_iteration=2.json +6 -0
  10. brainscore_vision/models/alexnet_less_variation_2/setup.py +29 -0
  11. brainscore_vision/models/alexnet_less_variation_2/test.py +3 -0
  12. brainscore_vision/models/alexnet_less_variation_4/__init__.py +6 -0
  13. brainscore_vision/models/alexnet_less_variation_4/model.py +200 -0
  14. brainscore_vision/models/alexnet_less_variation_4/region_layer_map/alexnet_less_variation_iteration=4.json +6 -0
  15. brainscore_vision/models/alexnet_less_variation_4/setup.py +29 -0
  16. brainscore_vision/models/alexnet_less_variation_4/test.py +3 -0
  17. brainscore_vision/models/alexnet_no_specular_2/__init__.py +6 -0
  18. brainscore_vision/models/alexnet_no_specular_2/model.py +200 -0
  19. brainscore_vision/models/alexnet_no_specular_2/region_layer_map/alexnet_no_specular_iteration=2.json +6 -0
  20. brainscore_vision/models/alexnet_no_specular_2/setup.py +29 -0
  21. brainscore_vision/models/alexnet_no_specular_2/test.py +3 -0
  22. brainscore_vision/models/alexnet_no_specular_4/__init__.py +6 -0
  23. brainscore_vision/models/alexnet_no_specular_4/model.py +200 -0
  24. brainscore_vision/models/alexnet_no_specular_4/region_layer_map/alexnet_no_specular_iteration=4.json +6 -0
  25. brainscore_vision/models/alexnet_no_specular_4/setup.py +29 -0
  26. brainscore_vision/models/alexnet_no_specular_4/test.py +3 -0
  27. brainscore_vision/models/alexnet_no_variation_4/__init__.py +6 -0
  28. brainscore_vision/models/alexnet_no_variation_4/model.py +200 -0
  29. brainscore_vision/models/alexnet_no_variation_4/region_layer_map/alexnet_no_variation_iteration=4.json +6 -0
  30. brainscore_vision/models/alexnet_no_variation_4/setup.py +29 -0
  31. brainscore_vision/models/alexnet_no_variation_4/test.py +3 -0
  32. brainscore_vision/models/alexnet_original_3/__init__.py +6 -0
  33. brainscore_vision/models/alexnet_original_3/model.py +200 -0
  34. brainscore_vision/models/alexnet_original_3/region_layer_map/alexnet_original_iteration=3.json +6 -0
  35. brainscore_vision/models/alexnet_original_3/setup.py +29 -0
  36. brainscore_vision/models/alexnet_original_3/test.py +3 -0
  37. brainscore_vision/models/alexnet_wo_shading_4/__init__.py +6 -0
  38. brainscore_vision/models/alexnet_wo_shading_4/model.py +200 -0
  39. brainscore_vision/models/alexnet_wo_shading_4/region_layer_map/alexnet_wo_shading_iteration=4.json +6 -0
  40. brainscore_vision/models/alexnet_wo_shading_4/setup.py +29 -0
  41. brainscore_vision/models/alexnet_wo_shading_4/test.py +3 -0
  42. brainscore_vision/models/alexnet_wo_shadows_5/__init__.py +6 -0
  43. brainscore_vision/models/alexnet_wo_shadows_5/model.py +200 -0
  44. brainscore_vision/models/alexnet_wo_shadows_5/region_layer_map/alexnet_wo_shadows_iteration=5.json +6 -0
  45. brainscore_vision/models/alexnet_wo_shadows_5/setup.py +29 -0
  46. brainscore_vision/models/alexnet_wo_shadows_5/test.py +3 -0
  47. brainscore_vision/models/alexnet_z_axis_1/__init__.py +6 -0
  48. brainscore_vision/models/alexnet_z_axis_1/model.py +200 -0
  49. brainscore_vision/models/alexnet_z_axis_1/region_layer_map/alexnet_z_axis_iteration=1.json +6 -0
  50. brainscore_vision/models/alexnet_z_axis_1/setup.py +29 -0
  51. brainscore_vision/models/alexnet_z_axis_1/test.py +3 -0
  52. brainscore_vision/models/alexnet_z_axis_2/__init__.py +6 -0
  53. brainscore_vision/models/alexnet_z_axis_2/model.py +200 -0
  54. brainscore_vision/models/alexnet_z_axis_2/region_layer_map/alexnet_z_axis_iteration=2.json +6 -0
  55. brainscore_vision/models/alexnet_z_axis_2/setup.py +29 -0
  56. brainscore_vision/models/alexnet_z_axis_2/test.py +3 -0
  57. brainscore_vision/models/alexnet_z_axis_3/__init__.py +6 -0
  58. brainscore_vision/models/alexnet_z_axis_3/model.py +200 -0
  59. brainscore_vision/models/alexnet_z_axis_3/region_layer_map/alexnet_z_axis_iteration=3.json +6 -0
  60. brainscore_vision/models/alexnet_z_axis_3/setup.py +29 -0
  61. brainscore_vision/models/alexnet_z_axis_3/test.py +3 -0
  62. brainscore_vision/models/alexnet_z_axis_4/__init__.py +6 -0
  63. brainscore_vision/models/alexnet_z_axis_4/model.py +200 -0
  64. brainscore_vision/models/alexnet_z_axis_4/region_layer_map/alexnet_z_axis_iteration=4.json +6 -0
  65. brainscore_vision/models/alexnet_z_axis_4/setup.py +29 -0
  66. brainscore_vision/models/alexnet_z_axis_4/test.py +3 -0
  67. brainscore_vision/models/artResNet18_1/__init__.py +5 -0
  68. brainscore_vision/models/artResNet18_1/model.py +66 -0
  69. brainscore_vision/models/artResNet18_1/requirements.txt +4 -0
  70. brainscore_vision/models/artResNet18_1/test.py +12 -0
  71. brainscore_vision/models/barlow_twins_custom/__init__.py +5 -0
  72. brainscore_vision/models/barlow_twins_custom/model.py +58 -0
  73. brainscore_vision/models/barlow_twins_custom/requirements.txt +4 -0
  74. brainscore_vision/models/barlow_twins_custom/test.py +12 -0
  75. brainscore_vision/models/blt-vs/__init__.py +15 -0
  76. brainscore_vision/models/blt-vs/model.py +962 -0
  77. brainscore_vision/models/blt-vs/pretrained.py +219 -0
  78. brainscore_vision/models/blt-vs/region_layer_map/blt_vs.json +6 -0
  79. brainscore_vision/models/blt-vs/setup.py +22 -0
  80. brainscore_vision/models/blt-vs/test.py +0 -0
  81. brainscore_vision/models/cifar_resnet18_1/__init__.py +5 -0
  82. brainscore_vision/models/cifar_resnet18_1/model.py +68 -0
  83. brainscore_vision/models/cifar_resnet18_1/requirements.txt +4 -0
  84. brainscore_vision/models/cifar_resnet18_1/test.py +10 -0
  85. brainscore_vision/models/resnet18_random/__init__.py +5 -0
  86. brainscore_vision/models/resnet18_random/archive_name.zip +0 -0
  87. brainscore_vision/models/resnet18_random/model.py +42 -0
  88. brainscore_vision/models/resnet18_random/requirements.txt +2 -0
  89. brainscore_vision/models/resnet18_random/test.py +12 -0
  90. brainscore_vision/models/resnet50_less_variation_1/__init__.py +6 -0
  91. brainscore_vision/models/resnet50_less_variation_1/model.py +200 -0
  92. brainscore_vision/models/resnet50_less_variation_1/region_layer_map/resnet50_less_variation_iteration=1.json +6 -0
  93. brainscore_vision/models/resnet50_less_variation_1/setup.py +29 -0
  94. brainscore_vision/models/resnet50_less_variation_1/test.py +3 -0
  95. brainscore_vision/models/resnet50_less_variation_2/__init__.py +6 -0
  96. brainscore_vision/models/resnet50_less_variation_2/model.py +200 -0
  97. brainscore_vision/models/resnet50_less_variation_2/region_layer_map/resnet50_less_variation_iteration=2.json +6 -0
  98. brainscore_vision/models/resnet50_less_variation_2/setup.py +29 -0
  99. brainscore_vision/models/resnet50_less_variation_2/test.py +3 -0
  100. brainscore_vision/models/resnet50_less_variation_3/__init__.py +6 -0
  101. brainscore_vision/models/resnet50_less_variation_3/model.py +200 -0
  102. brainscore_vision/models/resnet50_less_variation_3/region_layer_map/resnet50_less_variation_iteration=3.json +6 -0
  103. brainscore_vision/models/resnet50_less_variation_3/setup.py +29 -0
  104. brainscore_vision/models/resnet50_less_variation_3/test.py +3 -0
  105. brainscore_vision/models/resnet50_less_variation_4/__init__.py +6 -0
  106. brainscore_vision/models/resnet50_less_variation_4/model.py +200 -0
  107. brainscore_vision/models/resnet50_less_variation_4/region_layer_map/resnet50_less_variation_iteration=4.json +6 -0
  108. brainscore_vision/models/resnet50_less_variation_4/setup.py +29 -0
  109. brainscore_vision/models/resnet50_less_variation_4/test.py +3 -0
  110. brainscore_vision/models/resnet50_less_variation_5/__init__.py +6 -0
  111. brainscore_vision/models/resnet50_less_variation_5/model.py +200 -0
  112. brainscore_vision/models/resnet50_less_variation_5/region_layer_map/resnet50_less_variation_iteration=5.json +6 -0
  113. brainscore_vision/models/resnet50_less_variation_5/setup.py +29 -0
  114. brainscore_vision/models/resnet50_less_variation_5/test.py +3 -0
  115. brainscore_vision/models/resnet50_no_variation_1/__init__.py +6 -0
  116. brainscore_vision/models/resnet50_no_variation_1/model.py +200 -0
  117. brainscore_vision/models/resnet50_no_variation_1/region_layer_map/resnet50_no_variation_iteration=1.json +6 -0
  118. brainscore_vision/models/resnet50_no_variation_1/setup.py +29 -0
  119. brainscore_vision/models/resnet50_no_variation_1/test.py +3 -0
  120. brainscore_vision/models/resnet50_no_variation_2/__init__.py +6 -0
  121. brainscore_vision/models/resnet50_no_variation_2/model.py +200 -0
  122. brainscore_vision/models/resnet50_no_variation_2/region_layer_map/resnet50_no_variation_iteration=2.json +6 -0
  123. brainscore_vision/models/resnet50_no_variation_2/setup.py +29 -0
  124. brainscore_vision/models/resnet50_no_variation_2/test.py +3 -0
  125. brainscore_vision/models/resnet50_no_variation_5/__init__.py +6 -0
  126. brainscore_vision/models/resnet50_no_variation_5/model.py +200 -0
  127. brainscore_vision/models/resnet50_no_variation_5/region_layer_map/resnet50_no_variation_iteration=5.json +6 -0
  128. brainscore_vision/models/resnet50_no_variation_5/setup.py +29 -0
  129. brainscore_vision/models/resnet50_no_variation_5/test.py +3 -0
  130. brainscore_vision/models/resnet50_original_1/__init__.py +6 -0
  131. brainscore_vision/models/resnet50_original_1/model.py +200 -0
  132. brainscore_vision/models/resnet50_original_1/region_layer_map/resnet50_original_iteration=1.json +6 -0
  133. brainscore_vision/models/resnet50_original_1/setup.py +29 -0
  134. brainscore_vision/models/resnet50_original_1/test.py +3 -0
  135. brainscore_vision/models/resnet50_original_2/__init__.py +6 -0
  136. brainscore_vision/models/resnet50_original_2/model.py +200 -0
  137. brainscore_vision/models/resnet50_original_2/region_layer_map/resnet50_original_iteration=2.json +6 -0
  138. brainscore_vision/models/resnet50_original_2/setup.py +29 -0
  139. brainscore_vision/models/resnet50_original_2/test.py +3 -0
  140. brainscore_vision/models/resnet50_original_5/__init__.py +6 -0
  141. brainscore_vision/models/resnet50_original_5/model.py +200 -0
  142. brainscore_vision/models/resnet50_original_5/region_layer_map/resnet50_original_iteration=5.json +6 -0
  143. brainscore_vision/models/resnet50_original_5/setup.py +29 -0
  144. brainscore_vision/models/resnet50_original_5/test.py +3 -0
  145. brainscore_vision/models/resnet50_textures_1/__init__.py +6 -0
  146. brainscore_vision/models/resnet50_textures_1/model.py +200 -0
  147. brainscore_vision/models/resnet50_textures_1/region_layer_map/resnet50_textures_iteration=1.json +6 -0
  148. brainscore_vision/models/resnet50_textures_1/setup.py +29 -0
  149. brainscore_vision/models/resnet50_textures_1/test.py +3 -0
  150. brainscore_vision/models/resnet50_textures_2/__init__.py +6 -0
  151. brainscore_vision/models/resnet50_textures_2/model.py +200 -0
  152. brainscore_vision/models/resnet50_textures_2/region_layer_map/resnet50_textures_iteration=2.json +6 -0
  153. brainscore_vision/models/resnet50_textures_2/setup.py +29 -0
  154. brainscore_vision/models/resnet50_textures_2/test.py +3 -0
  155. brainscore_vision/models/resnet50_textures_3/__init__.py +6 -0
  156. brainscore_vision/models/resnet50_textures_3/model.py +200 -0
  157. brainscore_vision/models/resnet50_textures_3/region_layer_map/resnet50_textures_iteration=3.json +6 -0
  158. brainscore_vision/models/resnet50_textures_3/setup.py +29 -0
  159. brainscore_vision/models/resnet50_textures_3/test.py +3 -0
  160. brainscore_vision/models/resnet50_textures_4/__init__.py +6 -0
  161. brainscore_vision/models/resnet50_textures_4/model.py +200 -0
  162. brainscore_vision/models/resnet50_textures_4/region_layer_map/resnet50_textures_iteration=4.json +6 -0
  163. brainscore_vision/models/resnet50_textures_4/setup.py +29 -0
  164. brainscore_vision/models/resnet50_textures_4/test.py +3 -0
  165. brainscore_vision/models/resnet50_textures_5/__init__.py +6 -0
  166. brainscore_vision/models/resnet50_textures_5/model.py +200 -0
  167. brainscore_vision/models/resnet50_textures_5/region_layer_map/resnet50_textures_iteration=5.json +6 -0
  168. brainscore_vision/models/resnet50_textures_5/setup.py +29 -0
  169. brainscore_vision/models/resnet50_textures_5/test.py +3 -0
  170. brainscore_vision/models/resnet50_wo_shading_1/__init__.py +6 -0
  171. brainscore_vision/models/resnet50_wo_shading_1/model.py +200 -0
  172. brainscore_vision/models/resnet50_wo_shading_1/region_layer_map/resnet50_wo_shading_iteration=1.json +6 -0
  173. brainscore_vision/models/resnet50_wo_shading_1/setup.py +29 -0
  174. brainscore_vision/models/resnet50_wo_shading_1/test.py +3 -0
  175. brainscore_vision/models/resnet50_wo_shading_3/__init__.py +6 -0
  176. brainscore_vision/models/resnet50_wo_shading_3/model.py +200 -0
  177. brainscore_vision/models/resnet50_wo_shading_3/region_layer_map/resnet50_wo_shading_iteration=3.json +6 -0
  178. brainscore_vision/models/resnet50_wo_shading_3/setup.py +29 -0
  179. brainscore_vision/models/resnet50_wo_shading_3/test.py +3 -0
  180. brainscore_vision/models/resnet50_wo_shading_4/__init__.py +6 -0
  181. brainscore_vision/models/resnet50_wo_shading_4/model.py +200 -0
  182. brainscore_vision/models/resnet50_wo_shading_4/region_layer_map/resnet50_wo_shading_iteration=4.json +6 -0
  183. brainscore_vision/models/resnet50_wo_shading_4/setup.py +29 -0
  184. brainscore_vision/models/resnet50_wo_shading_4/test.py +3 -0
  185. brainscore_vision/models/resnet50_wo_shadows_4/__init__.py +6 -0
  186. brainscore_vision/models/resnet50_wo_shadows_4/model.py +200 -0
  187. brainscore_vision/models/resnet50_wo_shadows_4/region_layer_map/resnet50_wo_shadows_iteration=4.json +6 -0
  188. brainscore_vision/models/resnet50_wo_shadows_4/setup.py +29 -0
  189. brainscore_vision/models/resnet50_wo_shadows_4/test.py +3 -0
  190. brainscore_vision/models/resnet50_z_axis_1/__init__.py +6 -0
  191. brainscore_vision/models/resnet50_z_axis_1/model.py +200 -0
  192. brainscore_vision/models/resnet50_z_axis_1/region_layer_map/resnet50_z_axis_iteration=1.json +6 -0
  193. brainscore_vision/models/resnet50_z_axis_1/setup.py +29 -0
  194. brainscore_vision/models/resnet50_z_axis_1/test.py +3 -0
  195. brainscore_vision/models/resnet50_z_axis_2/__init__.py +6 -0
  196. brainscore_vision/models/resnet50_z_axis_2/model.py +200 -0
  197. brainscore_vision/models/resnet50_z_axis_2/region_layer_map/resnet50_z_axis_iteration=2.json +6 -0
  198. brainscore_vision/models/resnet50_z_axis_2/setup.py +29 -0
  199. brainscore_vision/models/resnet50_z_axis_2/test.py +3 -0
  200. brainscore_vision/models/resnet50_z_axis_3/__init__.py +6 -0
  201. brainscore_vision/models/resnet50_z_axis_3/model.py +200 -0
  202. brainscore_vision/models/resnet50_z_axis_3/region_layer_map/resnet50_z_axis_iteration=3.json +6 -0
  203. brainscore_vision/models/resnet50_z_axis_3/setup.py +29 -0
  204. brainscore_vision/models/resnet50_z_axis_3/test.py +3 -0
  205. brainscore_vision/models/resnet50_z_axis_5/__init__.py +6 -0
  206. brainscore_vision/models/resnet50_z_axis_5/model.py +200 -0
  207. brainscore_vision/models/resnet50_z_axis_5/region_layer_map/resnet50_z_axis_iteration=5.json +6 -0
  208. brainscore_vision/models/resnet50_z_axis_5/setup.py +29 -0
  209. brainscore_vision/models/resnet50_z_axis_5/test.py +3 -0
  210. brainscore_vision/models/yudixie_resnet18_240719_0/region_layer_map/yudixie_resnet18_distance_reg_0_240719.json +1 -0
  211. brainscore_vision/models/yudixie_resnet18_240719_1/region_layer_map/yudixie_resnet18_translation_reg_0_240719.json +1 -0
  212. brainscore_vision/models/yudixie_resnet18_240719_10/region_layer_map/yudixie_resnet18_imagenet1kpret_0_240719.json +1 -0
  213. brainscore_vision/models/yudixie_resnet18_240719_11/__init__.py +11 -0
  214. brainscore_vision/models/yudixie_resnet18_240719_11/model.py +60 -0
  215. brainscore_vision/models/yudixie_resnet18_240719_11/region_layer_map/yudixie_resnet18_random_0_240719.json +6 -0
  216. brainscore_vision/models/yudixie_resnet18_240719_11/setup.py +25 -0
  217. brainscore_vision/models/yudixie_resnet18_240719_11/test.py +1 -0
  218. brainscore_vision/models/yudixie_resnet18_240719_2/region_layer_map/yudixie_resnet18_rotation_reg_0_240719.json +1 -0
  219. brainscore_vision/models/yudixie_resnet18_240719_3/__init__.py +11 -0
  220. brainscore_vision/models/yudixie_resnet18_240719_3/model.py +60 -0
  221. brainscore_vision/models/yudixie_resnet18_240719_3/region_layer_map/yudixie_resnet18_distance_translation_0_240719.json +6 -0
  222. brainscore_vision/models/yudixie_resnet18_240719_3/setup.py +25 -0
  223. brainscore_vision/models/yudixie_resnet18_240719_3/test.py +1 -0
  224. brainscore_vision/models/yudixie_resnet18_240719_4/__init__.py +12 -0
  225. brainscore_vision/models/yudixie_resnet18_240719_4/model.py +60 -0
  226. brainscore_vision/models/yudixie_resnet18_240719_4/region_layer_map/yudixie_resnet18_distance_rotation_0_240719.json +6 -0
  227. brainscore_vision/models/yudixie_resnet18_240719_4/setup.py +25 -0
  228. brainscore_vision/models/yudixie_resnet18_240719_4/test.py +1 -0
  229. brainscore_vision/models/yudixie_resnet18_240719_5/__init__.py +13 -0
  230. brainscore_vision/models/yudixie_resnet18_240719_5/model.py +60 -0
  231. brainscore_vision/models/yudixie_resnet18_240719_5/region_layer_map/yudixie_resnet18_translation_rotation_0_240719.json +6 -0
  232. brainscore_vision/models/yudixie_resnet18_240719_5/setup.py +25 -0
  233. brainscore_vision/models/yudixie_resnet18_240719_5/test.py +1 -0
  234. brainscore_vision/models/yudixie_resnet18_240719_6/__init__.py +12 -0
  235. brainscore_vision/models/yudixie_resnet18_240719_6/model.py +60 -0
  236. brainscore_vision/models/yudixie_resnet18_240719_6/region_layer_map/yudixie_resnet18_distance_translation_rotation_0_240719.json +6 -0
  237. brainscore_vision/models/yudixie_resnet18_240719_6/setup.py +25 -0
  238. brainscore_vision/models/yudixie_resnet18_240719_6/test.py +1 -0
  239. brainscore_vision/models/yudixie_resnet18_240719_7/__init__.py +11 -0
  240. brainscore_vision/models/yudixie_resnet18_240719_7/model.py +60 -0
  241. brainscore_vision/models/yudixie_resnet18_240719_7/region_layer_map/yudixie_resnet18_category_class_0_240719.json +6 -0
  242. brainscore_vision/models/yudixie_resnet18_240719_7/setup.py +25 -0
  243. brainscore_vision/models/yudixie_resnet18_240719_7/test.py +1 -0
  244. brainscore_vision/models/yudixie_resnet18_240719_8/__init__.py +11 -0
  245. brainscore_vision/models/yudixie_resnet18_240719_8/model.py +60 -0
  246. brainscore_vision/models/yudixie_resnet18_240719_8/region_layer_map/yudixie_resnet18_object_class_0_240719.json +6 -0
  247. brainscore_vision/models/yudixie_resnet18_240719_8/setup.py +25 -0
  248. brainscore_vision/models/yudixie_resnet18_240719_8/test.py +1 -0
  249. brainscore_vision/models/yudixie_resnet18_240719_9/__init__.py +11 -0
  250. brainscore_vision/models/yudixie_resnet18_240719_9/model.py +60 -0
  251. brainscore_vision/models/yudixie_resnet18_240719_9/region_layer_map/yudixie_resnet18_cat_obj_class_all_latents_0_240719.json +6 -0
  252. brainscore_vision/models/yudixie_resnet18_240719_9/setup.py +25 -0
  253. brainscore_vision/models/yudixie_resnet18_240719_9/test.py +1 -0
  254. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240312.json +1 -0
  255. {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/METADATA +3 -2
  256. {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/RECORD +263 -10
  257. {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/WHEEL +1 -1
  258. /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/__init__.py +0 -0
  259. /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/model.py +0 -0
  260. /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/requirements.txt +0 -0
  261. /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/test.py +0 -0
  262. {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/LICENSE +0 -0
  263. {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,200 @@
1
+
2
+ from brainscore_vision.model_helpers.check_submission import check_models
3
+ import functools
4
+ import numpy as np
5
+ import torch
6
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
7
+ from PIL import Image
8
+ from torch import nn
9
+ import pytorch_lightning as pl
10
+ import torchvision.models as models
11
+ import gdown
12
+ import glob
13
+ import os
14
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
15
+
16
+ def get_bibtex(model_identifier):
17
+ return 'VGG16'
18
+
19
+ def get_model_list():
20
+ return ['resnet50_less_variation_iteration=4']
21
+
22
+ def get_model(name):
23
+ keyword = 'less_variation'
24
+ iteration = 4
25
+ network = 'resnet50'
26
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_4.ckpt'
27
+ output = 'resnet50_less_variation_iteration=4.ckpt'
28
+ gdown.download(url, output)
29
+
30
+
31
+ if keyword != 'imagenet_trained' and keyword != 'no_training':
32
+ lx_whole = [f"resnet50_less_variation_iteration=4.ckpt"]
33
+ if len(lx_whole) > 1:
34
+ lx_whole = [lx_whole[-1]]
35
+ elif keyword == 'imagenet_trained' or keyword == 'no_training':
36
+ print('keyword is imagenet')
37
+ lx_whole = ['x']
38
+
39
+ for model_ckpt in lx_whole:
40
+ print(model_ckpt)
41
+ last_module_name = None
42
+ last_module = None
43
+ layers = []
44
+ if keyword == 'imagenet_trained' and network != 'clip':
45
+ model = torch.hub.load('pytorch/vision', network, pretrained=True)
46
+ for name, module in model.named_modules():
47
+ last_module_name = name
48
+ last_module = module
49
+ layers.append(name)
50
+ else:
51
+ model = torch.hub.load('pytorch/vision', network, pretrained=False)
52
+ if model_ckpt != 'x':
53
+ ckpt = torch.load(model_ckpt, map_location='cpu')
54
+ if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
55
+ ckpt2 = {}
56
+ for keys in ckpt['state_dict']:
57
+ print(keys)
58
+ print(ckpt['state_dict'][keys].shape)
59
+ print('---')
60
+ k2 = keys.split('model.')[1]
61
+ ckpt2[k2] = ckpt['state_dict'][keys]
62
+ model.load_state_dict(ckpt2)
63
+ if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
64
+ ckpt2 = {}
65
+ for keys in ckpt['state_dict']:
66
+ print(keys)
67
+ print(ckpt['state_dict'][keys].shape)
68
+ print('---')
69
+ k2 = keys.split('model.')[1]
70
+ ckpt2[k2] = ckpt['state_dict'][keys]
71
+ model.load_state_dict(ckpt2)
72
+ # Add more cases for other networks as needed
73
+ assert name == 'resnet50_less_variation_iteration=4'
74
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_4.ckpt'
75
+ output = 'resnet50_less_variation_iteration=4.ckpt'
76
+ gdown.download(url, output)
77
+ layers = []
78
+ for name, module in model._modules.items():
79
+ print(name, "->", module)
80
+ layers.append(name)
81
+
82
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
83
+ activations_model = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
84
+
85
+ return activations_model
86
+
87
+ def get_layers(name):
88
+ keyword = 'less_variation'
89
+ iteration = 4
90
+ network = 'resnet50'
91
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_4.ckpt'
92
+ output = 'resnet50_less_variation_iteration=4.ckpt'
93
+ gdown.download(url, output)
94
+
95
+
96
+ if keyword != 'imagenet_trained' and keyword != 'no_training':
97
+ lx_whole = [f"resnet50_less_variation_iteration=4.ckpt"]
98
+ if len(lx_whole) > 1:
99
+ lx_whole = [lx_whole[-1]]
100
+ elif keyword == 'imagenet_trained' or keyword == 'no_training':
101
+ print('keyword is imagenet')
102
+ lx_whole = ['x']
103
+
104
+
105
+ for model_ckpt in lx_whole:
106
+ print(model_ckpt)
107
+ last_module_name = None
108
+ last_module = None
109
+ if keyword == 'imagenet_trained' and network != 'clip':
110
+ model = torch.hub.load('pytorch/vision', network, pretrained=True)
111
+ for name, module in model.named_modules():
112
+ last_module_name = name
113
+ last_module = module
114
+ layers.append(name)
115
+ else:
116
+ model = torch.hub.load('pytorch/vision', network, pretrained=False)
117
+ if model_ckpt != 'x':
118
+ ckpt = torch.load(model_ckpt, map_location='cpu')
119
+ if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
120
+ ckpt2 = {}
121
+ for keys in ckpt['state_dict']:
122
+ print(keys)
123
+ print(ckpt['state_dict'][keys].shape)
124
+ print('---')
125
+ k2 = keys.split('model.')[1]
126
+ ckpt2[k2] = ckpt['state_dict'][keys]
127
+ model.load_state_dict(ckpt2)
128
+ if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
129
+ ckpt2 = {}
130
+ for keys in ckpt['state_dict']:
131
+ print(keys)
132
+ print(ckpt['state_dict'][keys].shape)
133
+ print('---')
134
+ k2 = keys.split('model.')[1]
135
+ ckpt2[k2] = ckpt['state_dict'][keys]
136
+ model.load_state_dict(ckpt2)
137
+ # Add more cases for other networks as needed
138
+ layers = []
139
+ for name, module in model._modules.items():
140
+ print(name, "->", module)
141
+ layers.append(name)
142
+ return layers
143
+
144
+ if __name__ == '__main__':
145
+ device = "cpu"
146
+ global model
147
+ global keyword
148
+ global network
149
+ global iteration
150
+ keyword = 'less_variation'
151
+ iteration = 4
152
+ network = 'resnet50'
153
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_4.ckpt'
154
+ output = 'resnet50_less_variation_iteration=4.ckpt'
155
+ gdown.download(url, output)
156
+
157
+
158
+ if keyword != 'imagenet_trained' and keyword != 'no_training':
159
+ lx_whole = [f"resnet50_less_variation_iteration=4.ckpt"]
160
+ if len(lx_whole) > 1:
161
+ lx_whole = [lx_whole[-1]]
162
+ elif keyword == 'imagenet_trained' or keyword == 'no_training':
163
+ print('keyword is imagenet')
164
+ lx_whole = ['x']
165
+
166
+ for model_ckpt in lx_whole:
167
+ print(model_ckpt)
168
+ last_module_name = None
169
+ last_module = None
170
+ layers = []
171
+ if keyword == 'imagenet_trained' and network != 'clip':
172
+ model = torch.hub.load('pytorch/vision', network, pretrained=True)
173
+ for name, module in model.named_modules():
174
+ last_module_name = name
175
+ last_module = module
176
+ layers.append(name)
177
+ else:
178
+ model = torch.hub.load('pytorch/vision', network, pretrained=False)
179
+ if model_ckpt != 'x':
180
+ ckpt = torch.load(model_ckpt, map_location='cpu')
181
+ if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
182
+ ckpt2 = {}
183
+ for keys in ckpt['state_dict']:
184
+ print(keys)
185
+ print(ckpt['state_dict'][keys].shape)
186
+ print('---')
187
+ k2 = keys.split('model.')[1]
188
+ ckpt2[k2] = ckpt['state_dict'][keys]
189
+ model.load_state_dict(ckpt2)
190
+ if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
191
+ ckpt2 = {}
192
+ for keys in ckpt['state_dict']:
193
+ print(keys)
194
+ print(ckpt['state_dict'][keys].shape)
195
+ print('---')
196
+ k2 = keys.split('model.')[1]
197
+ ckpt2[k2] = ckpt['state_dict'][keys]
198
+ model.load_state_dict(ckpt2)
199
+ # Add more cases for other networks as needed
200
+ check_models.check_base_models(__name__)
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "maxpool",
3
+ "V2": "layer1",
4
+ "V4": "maxpool",
5
+ "IT": "layer3"
6
+ }
@@ -0,0 +1,29 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ from setuptools import setup, find_packages
5
+
6
+ requirements = [
7
+ "torchvision",
8
+ "torch",
9
+ "gdown",
10
+ "pytorch_lightning",
11
+ "brainscore_vision"
12
+ ]
13
+
14
+ setup(
15
+ packages=find_packages(exclude=['tests']),
16
+ include_package_data=True,
17
+ install_requires=requirements,
18
+ license="MIT license",
19
+ zip_safe=False,
20
+ keywords='brain-score template',
21
+ classifiers=[
22
+ 'Development Status :: 2 - Pre-Alpha',
23
+ 'Intended Audience :: Developers',
24
+ 'License :: OSI Approved :: MIT License',
25
+ 'Natural Language :: English',
26
+ 'Programming Language :: Python :: 3.7',
27
+ ],
28
+ test_suite='tests',
29
+ )
@@ -0,0 +1,3 @@
1
+
2
+ import pytest
3
+
@@ -0,0 +1,6 @@
1
+
2
+ from brainscore_vision import model_registry
3
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
4
+ from .model import get_model, get_layers
5
+
6
+ model_registry['resnet50_less_variation_iteration=5'] = lambda: ModelCommitment(identifier='resnet50_less_variation_iteration=5', activations_model=get_model('resnet50_less_variation_iteration=5'), layers=get_layers('resnet50_less_variation_iteration=5'))
@@ -0,0 +1,200 @@
1
+
2
+ from brainscore_vision.model_helpers.check_submission import check_models
3
+ import functools
4
+ import numpy as np
5
+ import torch
6
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
7
+ from PIL import Image
8
+ from torch import nn
9
+ import pytorch_lightning as pl
10
+ import torchvision.models as models
11
+ import gdown
12
+ import glob
13
+ import os
14
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
15
+
16
+ def get_bibtex(model_identifier):
17
+ return 'VGG16'
18
+
19
+ def get_model_list():
20
+ return ['resnet50_less_variation_iteration=5']
21
+
22
+ def get_model(name):
23
+ keyword = 'less_variation'
24
+ iteration = 5
25
+ network = 'resnet50'
26
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_5.ckpt'
27
+ output = 'resnet50_less_variation_iteration=5.ckpt'
28
+ gdown.download(url, output)
29
+
30
+
31
+ if keyword != 'imagenet_trained' and keyword != 'no_training':
32
+ lx_whole = [f"resnet50_less_variation_iteration=5.ckpt"]
33
+ if len(lx_whole) > 1:
34
+ lx_whole = [lx_whole[-1]]
35
+ elif keyword == 'imagenet_trained' or keyword == 'no_training':
36
+ print('keyword is imagenet')
37
+ lx_whole = ['x']
38
+
39
+ for model_ckpt in lx_whole:
40
+ print(model_ckpt)
41
+ last_module_name = None
42
+ last_module = None
43
+ layers = []
44
+ if keyword == 'imagenet_trained' and network != 'clip':
45
+ model = torch.hub.load('pytorch/vision', network, pretrained=True)
46
+ for name, module in model.named_modules():
47
+ last_module_name = name
48
+ last_module = module
49
+ layers.append(name)
50
+ else:
51
+ model = torch.hub.load('pytorch/vision', network, pretrained=False)
52
+ if model_ckpt != 'x':
53
+ ckpt = torch.load(model_ckpt, map_location='cpu')
54
+ if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
55
+ ckpt2 = {}
56
+ for keys in ckpt['state_dict']:
57
+ print(keys)
58
+ print(ckpt['state_dict'][keys].shape)
59
+ print('---')
60
+ k2 = keys.split('model.')[1]
61
+ ckpt2[k2] = ckpt['state_dict'][keys]
62
+ model.load_state_dict(ckpt2)
63
+ if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
64
+ ckpt2 = {}
65
+ for keys in ckpt['state_dict']:
66
+ print(keys)
67
+ print(ckpt['state_dict'][keys].shape)
68
+ print('---')
69
+ k2 = keys.split('model.')[1]
70
+ ckpt2[k2] = ckpt['state_dict'][keys]
71
+ model.load_state_dict(ckpt2)
72
+ # Add more cases for other networks as needed
73
+ assert name == 'resnet50_less_variation_iteration=5'
74
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_5.ckpt'
75
+ output = 'resnet50_less_variation_iteration=5.ckpt'
76
+ gdown.download(url, output)
77
+ layers = []
78
+ for name, module in model._modules.items():
79
+ print(name, "->", module)
80
+ layers.append(name)
81
+
82
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
83
+ activations_model = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
84
+
85
+ return activations_model
86
+
87
+ def get_layers(name):
88
+ keyword = 'less_variation'
89
+ iteration = 5
90
+ network = 'resnet50'
91
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_5.ckpt'
92
+ output = 'resnet50_less_variation_iteration=5.ckpt'
93
+ gdown.download(url, output)
94
+
95
+
96
+ if keyword != 'imagenet_trained' and keyword != 'no_training':
97
+ lx_whole = [f"resnet50_less_variation_iteration=5.ckpt"]
98
+ if len(lx_whole) > 1:
99
+ lx_whole = [lx_whole[-1]]
100
+ elif keyword == 'imagenet_trained' or keyword == 'no_training':
101
+ print('keyword is imagenet')
102
+ lx_whole = ['x']
103
+
104
+
105
+ for model_ckpt in lx_whole:
106
+ print(model_ckpt)
107
+ last_module_name = None
108
+ last_module = None
109
+ if keyword == 'imagenet_trained' and network != 'clip':
110
+ model = torch.hub.load('pytorch/vision', network, pretrained=True)
111
+ for name, module in model.named_modules():
112
+ last_module_name = name
113
+ last_module = module
114
+ layers.append(name)
115
+ else:
116
+ model = torch.hub.load('pytorch/vision', network, pretrained=False)
117
+ if model_ckpt != 'x':
118
+ ckpt = torch.load(model_ckpt, map_location='cpu')
119
+ if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
120
+ ckpt2 = {}
121
+ for keys in ckpt['state_dict']:
122
+ print(keys)
123
+ print(ckpt['state_dict'][keys].shape)
124
+ print('---')
125
+ k2 = keys.split('model.')[1]
126
+ ckpt2[k2] = ckpt['state_dict'][keys]
127
+ model.load_state_dict(ckpt2)
128
+ if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
129
+ ckpt2 = {}
130
+ for keys in ckpt['state_dict']:
131
+ print(keys)
132
+ print(ckpt['state_dict'][keys].shape)
133
+ print('---')
134
+ k2 = keys.split('model.')[1]
135
+ ckpt2[k2] = ckpt['state_dict'][keys]
136
+ model.load_state_dict(ckpt2)
137
+ # Add more cases for other networks as needed
138
+ layers = []
139
+ for name, module in model._modules.items():
140
+ print(name, "->", module)
141
+ layers.append(name)
142
+ return layers
143
+
144
+ if __name__ == '__main__':
145
+ device = "cpu"
146
+ global model
147
+ global keyword
148
+ global network
149
+ global iteration
150
+ keyword = 'less_variation'
151
+ iteration = 5
152
+ network = 'resnet50'
153
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_5.ckpt'
154
+ output = 'resnet50_less_variation_iteration=5.ckpt'
155
+ gdown.download(url, output)
156
+
157
+
158
+ if keyword != 'imagenet_trained' and keyword != 'no_training':
159
+ lx_whole = [f"resnet50_less_variation_iteration=5.ckpt"]
160
+ if len(lx_whole) > 1:
161
+ lx_whole = [lx_whole[-1]]
162
+ elif keyword == 'imagenet_trained' or keyword == 'no_training':
163
+ print('keyword is imagenet')
164
+ lx_whole = ['x']
165
+
166
+ for model_ckpt in lx_whole:
167
+ print(model_ckpt)
168
+ last_module_name = None
169
+ last_module = None
170
+ layers = []
171
+ if keyword == 'imagenet_trained' and network != 'clip':
172
+ model = torch.hub.load('pytorch/vision', network, pretrained=True)
173
+ for name, module in model.named_modules():
174
+ last_module_name = name
175
+ last_module = module
176
+ layers.append(name)
177
+ else:
178
+ model = torch.hub.load('pytorch/vision', network, pretrained=False)
179
+ if model_ckpt != 'x':
180
+ ckpt = torch.load(model_ckpt, map_location='cpu')
181
+ if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
182
+ ckpt2 = {}
183
+ for keys in ckpt['state_dict']:
184
+ print(keys)
185
+ print(ckpt['state_dict'][keys].shape)
186
+ print('---')
187
+ k2 = keys.split('model.')[1]
188
+ ckpt2[k2] = ckpt['state_dict'][keys]
189
+ model.load_state_dict(ckpt2)
190
+ if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
191
+ ckpt2 = {}
192
+ for keys in ckpt['state_dict']:
193
+ print(keys)
194
+ print(ckpt['state_dict'][keys].shape)
195
+ print('---')
196
+ k2 = keys.split('model.')[1]
197
+ ckpt2[k2] = ckpt['state_dict'][keys]
198
+ model.load_state_dict(ckpt2)
199
+ # Add more cases for other networks as needed
200
+ check_models.check_base_models(__name__)
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "maxpool",
3
+ "V2": "layer1",
4
+ "V4": "maxpool",
5
+ "IT": "maxpool"
6
+ }
@@ -0,0 +1,29 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ from setuptools import setup, find_packages
5
+
6
+ requirements = [
7
+ "torchvision",
8
+ "torch",
9
+ "gdown",
10
+ "pytorch_lightning",
11
+ "brainscore_vision"
12
+ ]
13
+
14
+ setup(
15
+ packages=find_packages(exclude=['tests']),
16
+ include_package_data=True,
17
+ install_requires=requirements,
18
+ license="MIT license",
19
+ zip_safe=False,
20
+ keywords='brain-score template',
21
+ classifiers=[
22
+ 'Development Status :: 2 - Pre-Alpha',
23
+ 'Intended Audience :: Developers',
24
+ 'License :: OSI Approved :: MIT License',
25
+ 'Natural Language :: English',
26
+ 'Programming Language :: Python :: 3.7',
27
+ ],
28
+ test_suite='tests',
29
+ )
@@ -0,0 +1,3 @@
1
+
2
+ import pytest
3
+
@@ -0,0 +1,6 @@
1
+
2
+ from brainscore_vision import model_registry
3
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
4
+ from .model import get_model, get_layers
5
+
6
+ model_registry['resnet50_no_variation_iteration=1'] = lambda: ModelCommitment(identifier='resnet50_no_variation_iteration=1', activations_model=get_model('resnet50_no_variation_iteration=1'), layers=get_layers('resnet50_no_variation_iteration=1'))