brainscore-vision 2.2.4__py3-none-any.whl → 2.2.6__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (722) hide show
  1. brainscore_vision/data/baker2022/__init__.py +10 -10
  2. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
  3. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
  4. brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
  5. brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
  6. brainscore_vision/data/barbumayo2019/__init__.py +3 -3
  7. brainscore_vision/data/bashivankar2019/__init__.py +10 -10
  8. brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
  9. brainscore_vision/data/bmd2024/__init__.py +20 -20
  10. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
  11. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
  12. brainscore_vision/data/bracci2019/__init__.py +5 -5
  13. brainscore_vision/data/bracci2019/data_packaging.py +1 -1
  14. brainscore_vision/data/cadena2017/__init__.py +5 -5
  15. brainscore_vision/data/cichy2019/__init__.py +5 -5
  16. brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
  17. brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
  18. brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
  19. brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
  20. brainscore_vision/data/david2004/__init__.py +5 -5
  21. brainscore_vision/data/deng2009/__init__.py +3 -3
  22. brainscore_vision/data/ferguson2024/__init__.py +112 -112
  23. brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
  24. brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
  25. brainscore_vision/data/geirhos2021/__init__.py +85 -85
  26. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
  27. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
  28. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
  29. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
  30. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
  31. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
  32. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
  33. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
  34. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
  35. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
  36. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
  37. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
  38. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
  39. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
  40. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
  41. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
  42. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
  43. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
  44. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
  45. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
  46. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
  47. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
  48. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
  49. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
  50. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
  51. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
  52. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
  53. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
  54. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
  55. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
  56. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
  57. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
  58. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
  59. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
  60. brainscore_vision/data/hebart2023/__init__.py +5 -5
  61. brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
  62. brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
  63. brainscore_vision/data/hendrycks2019/__init__.py +12 -12
  64. brainscore_vision/data/igustibagus2024/__init__.py +5 -5
  65. brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
  66. brainscore_vision/data/islam2021/__init__.py +3 -3
  67. brainscore_vision/data/kar2018/__init__.py +7 -7
  68. brainscore_vision/data/kar2019/__init__.py +5 -5
  69. brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
  70. brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
  71. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
  72. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
  73. brainscore_vision/data/majajhong2015/__init__.py +23 -23
  74. brainscore_vision/data/malania2007/__init__.py +77 -77
  75. brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
  76. brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
  77. brainscore_vision/data/maniquet2024/__init__.py +11 -11
  78. brainscore_vision/data/marques2020/__init__.py +30 -30
  79. brainscore_vision/data/rajalingham2018/__init__.py +10 -10
  80. brainscore_vision/data/rajalingham2020/__init__.py +5 -5
  81. brainscore_vision/data/rust2012/__init__.py +7 -7
  82. brainscore_vision/data/sanghavi2020/__init__.py +19 -19
  83. brainscore_vision/data/scialom2024/__init__.py +110 -110
  84. brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
  85. brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
  86. brainscore_vision/data/seibert2019/__init__.py +2 -2
  87. brainscore_vision/data/zhang2018/__init__.py +5 -5
  88. brainscore_vision/data_helpers/s3.py +25 -6
  89. brainscore_vision/model_helpers/activations/pytorch.py +34 -12
  90. brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
  91. brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
  92. brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
  93. brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
  94. brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
  95. brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
  96. brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
  97. brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
  98. brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
  99. brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
  100. brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
  101. brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
  102. brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
  103. brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
  104. brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
  105. brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
  106. brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
  107. brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
  108. brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
  109. brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
  110. brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
  111. brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
  112. brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
  113. brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
  114. brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
  115. brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
  116. brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
  117. brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
  118. brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
  119. brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
  120. brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
  121. brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
  122. brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
  123. brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
  124. brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
  125. brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
  126. brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
  127. brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
  128. brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
  129. brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
  130. brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
  131. brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
  132. brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
  133. brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
  134. brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
  135. brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
  136. brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
  137. brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
  138. brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
  139. brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
  140. brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
  141. brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
  142. brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
  143. brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
  144. brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
  145. brainscore_vision/models/ReAlnet/__init__.py +64 -0
  146. brainscore_vision/models/ReAlnet/model.py +237 -0
  147. brainscore_vision/models/ReAlnet/requirements.txt +7 -0
  148. brainscore_vision/models/ReAlnet/test.py +0 -0
  149. brainscore_vision/models/ReAlnet/weights.json +26 -0
  150. brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
  151. brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
  152. brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
  153. brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
  154. brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
  155. brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
  156. brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
  157. brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
  158. brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
  159. brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
  160. brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
  161. brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
  162. brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
  163. brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
  164. brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
  165. brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
  166. brainscore_vision/models/VOneCORnet_S/model.py +25 -0
  167. brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
  168. brainscore_vision/models/VOneCORnet_S/test.py +8 -0
  169. brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
  170. brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
  171. brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
  172. brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
  173. brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
  174. brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
  175. brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
  176. brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
  177. brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
  178. brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
  179. brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
  180. brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
  181. brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
  182. brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
  183. brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
  184. brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
  185. brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
  186. brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
  187. brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
  188. brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
  189. brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
  190. brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
  191. brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
  192. brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
  193. brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
  194. brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
  195. brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
  196. brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
  197. brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
  198. brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
  199. brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
  200. brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
  201. brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
  202. brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
  203. brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
  204. brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
  205. brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
  206. brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
  207. brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
  208. brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
  209. brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
  210. brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
  211. brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
  212. brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
  213. brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
  214. brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
  215. brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
  216. brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
  217. brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
  218. brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
  219. brainscore_vision/models/antialiased-r50/__init__.py +7 -0
  220. brainscore_vision/models/antialiased-r50/model.py +62 -0
  221. brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
  222. brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
  223. brainscore_vision/models/antialiased-r50/test.py +8 -0
  224. brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
  225. brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
  226. brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
  227. brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
  228. brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
  229. brainscore_vision/models/cornet_s/model.py +2 -2
  230. brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
  231. brainscore_vision/models/densenet_121/__init__.py +7 -0
  232. brainscore_vision/models/densenet_121/model.py +63 -0
  233. brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
  234. brainscore_vision/models/densenet_121/requirements.txt +1 -0
  235. brainscore_vision/models/densenet_121/test.py +8 -0
  236. brainscore_vision/models/densenet_169/__init__.py +7 -0
  237. brainscore_vision/models/densenet_169/model.py +63 -0
  238. brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
  239. brainscore_vision/models/densenet_169/requirements.txt +1 -0
  240. brainscore_vision/models/densenet_169/test.py +9 -0
  241. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
  242. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
  243. brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
  244. brainscore_vision/models/densenet_201/test.py +8 -0
  245. brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
  246. brainscore_vision/models/efficientnet_b0/model.py +45 -0
  247. brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
  248. brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
  249. brainscore_vision/models/efficientnet_b0/test.py +8 -0
  250. brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
  251. brainscore_vision/models/efficientnet_b7/model.py +61 -0
  252. brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
  253. brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
  254. brainscore_vision/models/efficientnet_b7/test.py +9 -0
  255. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
  256. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
  257. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
  258. brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
  259. brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
  260. brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
  261. brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
  262. brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
  263. brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
  264. brainscore_vision/models/evresnet_50_1/model.py +62 -0
  265. brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
  266. brainscore_vision/models/evresnet_50_1/test.py +8 -0
  267. brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
  268. brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
  269. brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
  270. brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
  271. brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
  272. brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
  273. brainscore_vision/models/evresnet_50_4/model.py +67 -0
  274. brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
  275. brainscore_vision/models/evresnet_50_4/test.py +8 -0
  276. brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
  277. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
  278. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
  279. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
  280. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
  281. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
  282. brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
  283. brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
  284. brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
  285. brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
  286. brainscore_vision/models/grcnn/__init__.py +7 -0
  287. brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
  288. brainscore_vision/models/grcnn/model.py +54 -0
  289. brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
  290. brainscore_vision/models/grcnn/requirements.txt +2 -0
  291. brainscore_vision/models/grcnn/test.py +9 -0
  292. brainscore_vision/models/grcnn_109/__init__.py +5 -0
  293. brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
  294. brainscore_vision/models/grcnn_109/model.py +53 -0
  295. brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
  296. brainscore_vision/models/grcnn_109/requirements.txt +2 -0
  297. brainscore_vision/models/grcnn_109/test.py +9 -0
  298. brainscore_vision/models/hmax/model.py +2 -2
  299. brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
  300. brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
  301. brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
  302. brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
  303. brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
  304. brainscore_vision/models/inception_v1/__init__.py +7 -0
  305. brainscore_vision/models/inception_v1/model.py +67 -0
  306. brainscore_vision/models/inception_v1/requirements.txt +1 -0
  307. brainscore_vision/models/inception_v1/test.py +8 -0
  308. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
  309. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
  310. brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
  311. brainscore_vision/models/inception_v3/test.py +8 -0
  312. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
  313. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
  314. brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
  315. brainscore_vision/models/inception_v4/test.py +8 -0
  316. brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
  317. brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
  318. brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
  319. brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
  320. brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
  321. brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
  322. brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
  323. brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
  324. brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
  325. brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
  326. brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
  327. brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
  328. brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
  329. brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
  330. brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
  331. brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
  332. brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
  333. brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
  334. brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
  335. brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
  336. brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
  337. brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
  338. brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
  339. brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
  340. brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
  341. brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
  342. brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
  343. brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
  344. brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
  345. brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
  346. brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
  347. brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
  348. brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
  349. brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
  350. brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
  351. brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
  352. brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
  353. brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
  354. brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
  355. brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
  356. brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
  357. brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
  358. brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
  359. brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
  360. brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
  361. brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
  362. brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
  363. brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
  364. brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
  365. brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
  366. brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
  367. brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
  368. brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
  369. brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
  370. brainscore_vision/models/nasnet_large/__init__.py +7 -0
  371. brainscore_vision/models/nasnet_large/model.py +60 -0
  372. brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
  373. brainscore_vision/models/nasnet_large/test.py +8 -0
  374. brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
  375. brainscore_vision/models/nasnet_mobile/model.py +685 -0
  376. brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
  377. brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
  378. brainscore_vision/models/nasnet_mobile/test.py +8 -0
  379. brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
  380. brainscore_vision/models/omnivore_swinB/model.py +79 -0
  381. brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
  382. brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
  383. brainscore_vision/models/omnivore_swinB/test.py +9 -0
  384. brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
  385. brainscore_vision/models/omnivore_swinS/model.py +79 -0
  386. brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
  387. brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
  388. brainscore_vision/models/omnivore_swinS/test.py +9 -0
  389. brainscore_vision/models/pnasnet_large/__init__.py +7 -0
  390. brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
  391. brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
  392. brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
  393. brainscore_vision/models/pnasnet_large/test.py +8 -0
  394. brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
  395. brainscore_vision/models/resnet50_SIN/model.py +63 -0
  396. brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
  397. brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
  398. brainscore_vision/models/resnet50_SIN/test.py +9 -0
  399. brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
  400. brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
  401. brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
  402. brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
  403. brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
  404. brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
  405. brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
  406. brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
  407. brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
  408. brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
  409. brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
  410. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
  411. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
  412. brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
  413. brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
  414. brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
  415. brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
  416. brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
  417. brainscore_vision/models/resnet50_barlow/model.py +53 -0
  418. brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
  419. brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
  420. brainscore_vision/models/resnet50_barlow/test.py +9 -0
  421. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
  422. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
  423. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
  424. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
  425. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
  426. brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
  427. brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
  428. brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
  429. brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
  430. brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
  431. brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
  432. brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
  433. brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
  434. brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
  435. brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
  436. brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
  437. brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
  438. brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
  439. brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
  440. brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
  441. brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
  442. brainscore_vision/models/resnet50_sup/__init__.py +5 -0
  443. brainscore_vision/models/resnet50_sup/model.py +55 -0
  444. brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
  445. brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
  446. brainscore_vision/models/resnet50_sup/test.py +8 -0
  447. brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
  448. brainscore_vision/models/resnet50_vicreg/model.py +62 -0
  449. brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
  450. brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
  451. brainscore_vision/models/resnet50_vicreg/test.py +9 -0
  452. brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
  453. brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
  454. brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
  455. brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
  456. brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
  457. brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
  458. brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
  459. brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
  460. brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
  461. brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
  462. brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
  463. brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
  464. brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
  465. brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
  466. brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
  467. brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
  468. brainscore_vision/models/resnet_101_v1/model.py +42 -0
  469. brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
  470. brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
  471. brainscore_vision/models/resnet_101_v1/test.py +8 -0
  472. brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
  473. brainscore_vision/models/resnet_101_v2/model.py +33 -0
  474. brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
  475. brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
  476. brainscore_vision/models/resnet_101_v2/test.py +8 -0
  477. brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
  478. brainscore_vision/models/resnet_152_v1/model.py +42 -0
  479. brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
  480. brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
  481. brainscore_vision/models/resnet_152_v1/test.py +8 -0
  482. brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
  483. brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
  484. brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
  485. brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
  486. brainscore_vision/models/resnet_152_v2/test.py +8 -0
  487. brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
  488. brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
  489. brainscore_vision/models/resnet_18_test_m/model.py +80 -0
  490. brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
  491. brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
  492. brainscore_vision/models/resnet_18_test_m/test.py +8 -0
  493. brainscore_vision/models/resnet_50_2/__init__.py +9 -0
  494. brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
  495. brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
  496. brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
  497. brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
  498. brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
  499. brainscore_vision/models/resnet_50_2/model.py +46 -0
  500. brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
  501. brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
  502. brainscore_vision/models/resnet_50_2/test.py +8 -0
  503. brainscore_vision/models/resnet_50_robust/model.py +2 -2
  504. brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
  505. brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
  506. brainscore_vision/models/resnet_50_v1/model.py +42 -0
  507. brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
  508. brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
  509. brainscore_vision/models/resnet_50_v1/test.py +8 -0
  510. brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
  511. brainscore_vision/models/resnet_50_v2/model.py +33 -0
  512. brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
  513. brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
  514. brainscore_vision/models/resnet_50_v2/test.py +8 -0
  515. brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
  516. brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
  517. brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
  518. brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
  519. brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
  520. brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
  521. brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
  522. brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
  523. brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
  524. brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
  525. brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
  526. brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
  527. brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
  528. brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
  529. brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
  530. brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
  531. brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
  532. brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
  533. brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
  534. brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
  535. brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
  536. brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
  537. brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
  538. brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
  539. brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
  540. brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
  541. brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
  542. brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
  543. brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
  544. brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
  545. brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
  546. brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
  547. brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
  548. brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
  549. brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
  550. brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
  551. brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
  552. brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
  553. brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
  554. brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
  555. brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
  556. brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
  557. brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
  558. brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
  559. brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
  560. brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
  561. brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
  562. brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
  563. brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
  564. brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
  565. brainscore_vision/models/timm_models/__init__.py +193 -0
  566. brainscore_vision/models/timm_models/model.py +90 -0
  567. brainscore_vision/models/timm_models/model_configs.json +464 -0
  568. brainscore_vision/models/timm_models/requirements.txt +3 -0
  569. brainscore_vision/models/timm_models/test.py +0 -0
  570. brainscore_vision/models/vgg_16/__init__.py +7 -0
  571. brainscore_vision/models/vgg_16/model.py +52 -0
  572. brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
  573. brainscore_vision/models/vgg_16/requirements.txt +1 -0
  574. brainscore_vision/models/vgg_16/test.py +8 -0
  575. brainscore_vision/models/vgg_19/__init__.py +7 -0
  576. brainscore_vision/models/vgg_19/model.py +52 -0
  577. brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
  578. brainscore_vision/models/vgg_19/requirements.txt +1 -0
  579. brainscore_vision/models/vgg_19/test.py +8 -0
  580. brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
  581. brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
  582. brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
  583. brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
  584. brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
  585. brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
  586. brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
  587. brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
  588. brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
  589. brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
  590. brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
  591. brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
  592. brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
  593. brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
  594. brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
  595. brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
  596. brainscore_vision/models/voneresnet_50/__init__.py +7 -0
  597. brainscore_vision/models/voneresnet_50/model.py +37 -0
  598. brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
  599. brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
  600. brainscore_vision/models/voneresnet_50/test.py +8 -0
  601. brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
  602. brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
  603. brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
  604. brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
  605. brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
  606. brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
  607. brainscore_vision/models/voneresnet_50_1/model.py +68 -0
  608. brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
  609. brainscore_vision/models/voneresnet_50_1/test.py +7 -0
  610. brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
  611. brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
  612. brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
  613. brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
  614. brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
  615. brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
  616. brainscore_vision/models/voneresnet_50_3/model.py +66 -0
  617. brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
  618. brainscore_vision/models/voneresnet_50_3/test.py +7 -0
  619. brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
  620. brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
  621. brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
  622. brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
  623. brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
  624. brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
  625. brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
  626. brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
  627. brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
  628. brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
  629. brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
  630. brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
  631. brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
  632. brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
  633. brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
  634. brainscore_vision/models/xception/__init__.py +7 -0
  635. brainscore_vision/models/xception/model.py +64 -0
  636. brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
  637. brainscore_vision/models/xception/requirements.txt +2 -0
  638. brainscore_vision/models/xception/test.py +8 -0
  639. brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
  640. brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
  641. brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
  642. brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
  643. brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
  644. brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
  645. brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
  646. brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
  647. brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
  648. brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
  649. brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
  650. brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
  651. brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
  652. brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
  653. brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
  654. brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
  655. brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
  656. brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
  657. brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
  658. brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
  659. brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
  660. brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
  661. brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
  662. brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
  663. brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
  664. brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
  665. brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
  666. brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
  667. brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
  668. brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
  669. brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
  670. brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
  671. brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
  672. brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
  673. brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
  674. brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
  675. brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
  676. brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
  677. brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
  678. brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
  679. brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
  680. brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
  681. brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
  682. brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
  683. brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
  684. brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
  685. brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
  686. brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
  687. brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
  688. brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
  689. brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
  690. brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
  691. brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
  692. brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
  693. brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
  694. brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
  695. brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
  696. brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
  697. brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
  698. brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
  699. brainscore_vision/submission/actions_helpers.py +2 -3
  700. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/METADATA +6 -6
  701. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/RECORD +714 -130
  702. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/WHEEL +1 -1
  703. docs/source/index.rst +1 -0
  704. docs/source/modules/submission.rst +1 -1
  705. docs/source/modules/version_bumping.rst +43 -0
  706. tests/test_submission/test_actions_helpers.py +2 -6
  707. brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
  708. brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
  709. brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
  710. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
  711. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
  712. brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
  713. brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
  714. brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
  715. /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
  716. /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
  717. /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
  718. /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
  719. /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
  720. /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
  721. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/LICENSE +0 -0
  722. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,586 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ # adapted from
4
+ # https://pytorch.org/vision/stable/_modules/torchvision/models/resnet.html
5
+
6
+ def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
7
+ """3x3 convolution with padding"""
8
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
9
+ padding=dilation, groups=groups, bias=False, dilation=dilation)
10
+
11
+
12
+ def conv1x1(in_planes, out_planes, stride=1):
13
+ """1x1 convolution"""
14
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
15
+
16
+
17
+ class BasicBlock(nn.Module):
18
+ expansion = 1
19
+
20
+ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
21
+ base_width=64, dilation=1, norm_layer=None):
22
+ super(BasicBlock, self).__init__()
23
+ if norm_layer is None:
24
+ norm_layer = nn.BatchNorm2d
25
+ if groups != 1:
26
+ raise ValueError('BasicBlock only supports groups=1')
27
+ if dilation > 1:
28
+ raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
29
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
30
+ self.conv1 = conv3x3(inplanes, planes, stride)
31
+ self.bn1 = norm_layer(planes)
32
+ self.relu1 = nn.ReLU(inplace=True)
33
+ self.relu2 = nn.ReLU(inplace=True)
34
+ self.conv2 = conv3x3(planes, planes)
35
+ self.bn2 = norm_layer(planes)
36
+ self.downsample = downsample
37
+ self.stride = stride
38
+
39
+ def forward(self, x):
40
+ identity = x
41
+
42
+ out = self.conv1(x)
43
+ out = self.bn1(out)
44
+ out = self.relu1(out)
45
+
46
+ out = self.conv2(out)
47
+ out = self.bn2(out)
48
+
49
+ if self.downsample is not None:
50
+ identity = self.downsample(x)
51
+
52
+ out += identity
53
+ out = self.relu2(out)
54
+
55
+ return out
56
+
57
+
58
+ class Bottleneck(nn.Module):
59
+ # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
60
+ # while original implementation places the stride at the first 1x1 convolution(self.conv1)
61
+ # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
62
+ # This variant is also known as ResNet V1.5 and improves accuracy according to
63
+ # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
64
+
65
+ expansion = 4
66
+
67
+ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
68
+ base_width=64, dilation=1, norm_layer=None):
69
+ super(Bottleneck, self).__init__()
70
+ if norm_layer is None:
71
+ norm_layer = nn.BatchNorm2d
72
+ width = int(planes * (base_width / 64.)) * groups
73
+ # Both self.conv2 and self.downsample layers downsample the input when stride != 1
74
+ self.conv1 = conv1x1(inplanes, width)
75
+ self.bn1 = norm_layer(width)
76
+ self.conv2 = conv3x3(width, width, stride, groups, dilation)
77
+ self.bn2 = norm_layer(width)
78
+ self.conv3 = conv1x1(width, planes * self.expansion)
79
+ self.bn3 = norm_layer(planes * self.expansion)
80
+ self.relu = nn.ReLU(inplace=True)
81
+ self.downsample = downsample
82
+ self.stride = stride
83
+
84
+ def forward(self, x):
85
+ identity = x
86
+
87
+ out = self.conv1(x)
88
+ out = self.bn1(out)
89
+ out = self.relu(out)
90
+
91
+ out = self.conv2(out)
92
+ out = self.bn2(out)
93
+ out = self.relu(out)
94
+
95
+ out = self.conv3(out)
96
+ out = self.bn3(out)
97
+
98
+ if self.downsample is not None:
99
+ identity = self.downsample(x)
100
+
101
+ out += identity
102
+ out = self.relu(out)
103
+
104
+ return out
105
+
106
+
107
+ class ResNet(nn.Module):
108
+ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
109
+ groups=1, width_per_group=64, replace_stride_with_dilation=None,
110
+ norm_layer=None, brain_score=False):
111
+ super(ResNet, self).__init__()
112
+ self.brain_score = brain_score
113
+ if norm_layer is None:
114
+ norm_layer = nn.BatchNorm2d
115
+ self._norm_layer = norm_layer
116
+
117
+ self.inplanes = width_per_group
118
+ self.dilation = 1
119
+ if replace_stride_with_dilation is None:
120
+ # each element in the tuple indicates if we should replace
121
+ # the 2x2 stride with a dilated convolution instead
122
+ replace_stride_with_dilation = [False, False, False]
123
+ if len(replace_stride_with_dilation) != 3:
124
+ raise ValueError("replace_stride_with_dilation should be None "
125
+ "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
126
+ self.groups = groups
127
+ self.base_width = width_per_group
128
+ self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
129
+ bias=False)
130
+ self.bn1 = norm_layer(self.inplanes)
131
+ self.relu = nn.ReLU(inplace=True)
132
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
133
+ self.layer1 = self._make_layer(block, self.base_width, layers[0])
134
+ self.layer2 = self._make_layer(block, self.base_width * 2, layers[1], stride=2,
135
+ dilate=replace_stride_with_dilation[0])
136
+ self.layer3 = self._make_layer(block, self.base_width * 4, layers[2], stride=2,
137
+ dilate=replace_stride_with_dilation[1])
138
+ self.layer4 = self._make_layer(block, self.base_width * 8, layers[3], stride=2,
139
+ dilate=replace_stride_with_dilation[2])
140
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
141
+ self.fc = nn.Linear(self.base_width * 8 * block.expansion, num_classes)
142
+ self.logits = nn.Identity()
143
+
144
+ for m in self.modules():
145
+ if isinstance(m, nn.Conv2d):
146
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
147
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
148
+ nn.init.constant_(m.weight, 1)
149
+ nn.init.constant_(m.bias, 0)
150
+
151
+ # Zero-initialize the last BN in each residual branch,
152
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
153
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
154
+ if zero_init_residual:
155
+ for m in self.modules():
156
+ if isinstance(m, Bottleneck):
157
+ nn.init.constant_(m.bn3.weight, 0)
158
+ elif isinstance(m, BasicBlock):
159
+ nn.init.constant_(m.bn2.weight, 0)
160
+
161
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
162
+ norm_layer = self._norm_layer
163
+ downsample = None
164
+ previous_dilation = self.dilation
165
+ if dilate:
166
+ self.dilation *= stride
167
+ stride = 1
168
+ if stride != 1 or self.inplanes != planes * block.expansion:
169
+ downsample = nn.Sequential(
170
+ conv1x1(self.inplanes, planes * block.expansion, stride),
171
+ norm_layer(planes * block.expansion),
172
+ )
173
+
174
+ layers = []
175
+ layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
176
+ self.base_width, previous_dilation, norm_layer))
177
+ self.inplanes = planes * block.expansion
178
+ for _ in range(1, blocks):
179
+ layers.append(block(self.inplanes, planes, groups=self.groups,
180
+ base_width=self.base_width, dilation=self.dilation,
181
+ norm_layer=norm_layer))
182
+
183
+ return nn.Sequential(*layers)
184
+
185
+ # def _forward_impl(self, x):
186
+ # # See note [TorchScript super()]
187
+ # x = self.conv1(x)
188
+ # x = self.bn1(x)
189
+ # x = self.relu(x)
190
+ # x = self.maxpool(x)
191
+ #
192
+ # x = self.layer1(x)
193
+ # x = self.layer2(x)
194
+ # x = self.layer3(x)
195
+ # x = self.layer4(x)
196
+ #
197
+ # x = self.avgpool(x)
198
+ # x = torch.flatten(x, 1)
199
+ # x = self.fc(x)
200
+ # x = self.logits(x)
201
+ #
202
+ # return x
203
+
204
+ def _forward_impl_brain_score(self, x):
205
+ # Same, but no fc layer
206
+ x = self.conv1(x)
207
+ x = self.bn1(x)
208
+ x = self.relu(x)
209
+ x = self.maxpool(x)
210
+
211
+ x = self.layer1(x)
212
+ x = self.layer2(x)
213
+ x = self.layer3(x)
214
+ x = self.layer4(x)
215
+
216
+ x = self.avgpool(x)
217
+ x = torch.flatten(x, 1)
218
+ x = self.logits(x)
219
+
220
+ return x
221
+
222
+ def forward(self, x):
223
+ return self._forward_impl_brain_score(x)
224
+ # if self.brain_score:
225
+ # return self._forward_impl_brain_score(x)
226
+ # return self._forward_impl(x)
227
+
228
+
229
+ class ResNetCIFAR(nn.Module):
230
+ def __init__(self, block, layers, num_classes=10, zero_init_residual=False,
231
+ groups=1, width_per_group=16, replace_stride_with_dilation=None,
232
+ norm_layer=None):
233
+ super().__init__()
234
+ if norm_layer is None:
235
+ norm_layer = nn.BatchNorm2d
236
+ self._norm_layer = norm_layer
237
+
238
+ self.inplanes = width_per_group
239
+ self.dilation = 1
240
+ if replace_stride_with_dilation is None:
241
+ # each element in the tuple indicates if we should replace
242
+ # the 2x2 stride with a dilated convolution instead
243
+ replace_stride_with_dilation = [False, False, False]
244
+ if len(replace_stride_with_dilation) != 3:
245
+ raise ValueError("replace_stride_with_dilation should be None "
246
+ "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
247
+ self.groups = groups
248
+ self.base_width = width_per_group
249
+ self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
250
+ self.bn1 = norm_layer(self.inplanes)
251
+ self.relu = nn.ReLU(inplace=True)
252
+ # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
253
+ self.layer1 = self._make_layer(block, self.base_width, layers[0])
254
+ self.layer2 = self._make_layer(block, self.base_width * 2, layers[1], stride=2,
255
+ dilate=replace_stride_with_dilation[0])
256
+ self.layer3 = self._make_layer(block, self.base_width * 4, layers[2], stride=2,
257
+ dilate=replace_stride_with_dilation[1])
258
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
259
+ self.fc = nn.Linear(self.base_width * 4 * block.expansion, num_classes)
260
+
261
+ for m in self.modules():
262
+ if isinstance(m, nn.Conv2d):
263
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
264
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
265
+ nn.init.constant_(m.weight, 1)
266
+ nn.init.constant_(m.bias, 0)
267
+
268
+ # Zero-initialize the last BN in each residual branch,
269
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
270
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
271
+ if zero_init_residual:
272
+ for m in self.modules():
273
+ if isinstance(m, Bottleneck):
274
+ nn.init.constant_(m.bn3.weight, 0)
275
+ elif isinstance(m, BasicBlock):
276
+ nn.init.constant_(m.bn2.weight, 0)
277
+
278
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
279
+ norm_layer = self._norm_layer
280
+ downsample = None
281
+ previous_dilation = self.dilation
282
+ if dilate:
283
+ self.dilation *= stride
284
+ stride = 1
285
+ if stride != 1 or self.inplanes != planes * block.expansion:
286
+ downsample = nn.Sequential(
287
+ conv1x1(self.inplanes, planes * block.expansion, stride),
288
+ norm_layer(planes * block.expansion),
289
+ )
290
+
291
+ layers = []
292
+ layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
293
+ self.base_width, previous_dilation, norm_layer))
294
+ self.inplanes = planes * block.expansion
295
+ for _ in range(1, blocks):
296
+ layers.append(block(self.inplanes, planes, groups=self.groups,
297
+ base_width=self.base_width, dilation=self.dilation,
298
+ norm_layer=norm_layer))
299
+
300
+ return nn.Sequential(*layers)
301
+
302
+ def _forward_impl(self, x):
303
+ # See note [TorchScript super()]
304
+ x = self.conv1(x)
305
+ x = self.bn1(x)
306
+ x = self.relu(x)
307
+ # x = self.maxpool(x)
308
+
309
+ x = self.layer1(x)
310
+ x = self.layer2(x)
311
+ x = self.layer3(x)
312
+ # x = self.layer4(x)
313
+
314
+ x = self.avgpool(x)
315
+ x = torch.flatten(x, 1)
316
+ x = self.fc(x)
317
+
318
+ return x
319
+
320
+ def forward(self, x):
321
+ return self._forward_impl(x)
322
+
323
+
324
+ def resnet18(**kwargs):
325
+ r"""ResNet-18 model from
326
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
327
+ """
328
+ return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
329
+
330
+
331
+ def resnet_6n2_cifar(n=3, **kwargs):
332
+ r"""ResNet-20 model for CIFAR10 from
333
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
334
+ """
335
+ return ResNetCIFAR(BasicBlock, [n, n, n], **kwargs)
336
+
337
+
338
+ def resnet10(**kwargs):
339
+ r"""ResNet-18 model from
340
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
341
+
342
+ Args:
343
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
344
+ progress (bool): If True, displays a progress bar of the download to stderr
345
+ """
346
+ return ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
347
+
348
+
349
+ class ResNetTwoBlock(nn.Module):
350
+ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
351
+ groups=1, width_per_group=64, replace_stride_with_dilation=None,
352
+ norm_layer=None, brain_score=False):
353
+ super().__init__()
354
+ self.brain_score = brain_score
355
+ if norm_layer is None:
356
+ norm_layer = nn.BatchNorm2d
357
+ self._norm_layer = norm_layer
358
+
359
+ self.inplanes = width_per_group
360
+ self.dilation = 1
361
+ if replace_stride_with_dilation is None:
362
+ # each element in the tuple indicates if we should replace
363
+ # the 2x2 stride with a dilated convolution instead
364
+ replace_stride_with_dilation = [False, False, False]
365
+ if len(replace_stride_with_dilation) != 3:
366
+ raise ValueError("replace_stride_with_dilation should be None "
367
+ "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
368
+ self.groups = groups
369
+ self.base_width = width_per_group
370
+ self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
371
+ bias=False)
372
+ self.bn1 = norm_layer(self.inplanes)
373
+ self.relu = nn.ReLU(inplace=True)
374
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
375
+ self.layer1 = self._make_layer(block, self.base_width, layers[0])
376
+ self.layer2 = self._make_layer(block, self.base_width * 2, layers[1], stride=2,
377
+ dilate=replace_stride_with_dilation[0])
378
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
379
+ self.fc = nn.Linear(self.base_width * 2 * block.expansion, num_classes)
380
+ self.logits = nn.Identity()
381
+
382
+ for m in self.modules():
383
+ if isinstance(m, nn.Conv2d):
384
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
385
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
386
+ nn.init.constant_(m.weight, 1)
387
+ nn.init.constant_(m.bias, 0)
388
+
389
+ # Zero-initialize the last BN in each residual branch,
390
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
391
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
392
+ if zero_init_residual:
393
+ for m in self.modules():
394
+ if isinstance(m, Bottleneck):
395
+ nn.init.constant_(m.bn3.weight, 0)
396
+ elif isinstance(m, BasicBlock):
397
+ nn.init.constant_(m.bn2.weight, 0)
398
+
399
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
400
+ norm_layer = self._norm_layer
401
+ downsample = None
402
+ previous_dilation = self.dilation
403
+ if dilate:
404
+ self.dilation *= stride
405
+ stride = 1
406
+ if stride != 1 or self.inplanes != planes * block.expansion:
407
+ downsample = nn.Sequential(
408
+ conv1x1(self.inplanes, planes * block.expansion, stride),
409
+ norm_layer(planes * block.expansion),
410
+ )
411
+
412
+ layers = []
413
+ layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
414
+ self.base_width, previous_dilation, norm_layer))
415
+ self.inplanes = planes * block.expansion
416
+ for _ in range(1, blocks):
417
+ layers.append(block(self.inplanes, planes, groups=self.groups,
418
+ base_width=self.base_width, dilation=self.dilation,
419
+ norm_layer=norm_layer))
420
+
421
+ return nn.Sequential(*layers)
422
+
423
+ def _forward_impl(self, x):
424
+ # See note [TorchScript super()]
425
+ x = self.conv1(x)
426
+ x = self.bn1(x)
427
+ x = self.relu(x)
428
+ x = self.maxpool(x)
429
+
430
+ x = self.layer1(x)
431
+ x = self.layer2(x)
432
+
433
+ x = self.avgpool(x)
434
+ x = torch.flatten(x, 1)
435
+ x = self.fc(x)
436
+ x = self.logits(x)
437
+
438
+ return x
439
+
440
+ def _forward_impl_brain_score(self, x):
441
+ # Same, but no fc layer
442
+ x = self.conv1(x)
443
+ x = self.bn1(x)
444
+ x = self.relu(x)
445
+ x = self.maxpool(x)
446
+
447
+ x = self.layer1(x)
448
+ x = self.layer2(x)
449
+
450
+ x = self.avgpool(x)
451
+ x = torch.flatten(x, 1)
452
+ x = self.logits(x)
453
+
454
+ return x
455
+
456
+ def forward(self, x):
457
+ if self.brain_score:
458
+ return self._forward_impl_brain_score(x)
459
+ return self._forward_impl(x)
460
+
461
+
462
+ class ResNetTwoBlockM(nn.Module):
463
+ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
464
+ groups=1, width_per_group=64, replace_stride_with_dilation=None,
465
+ norm_layer=None, brain_score=False):
466
+ super().__init__()
467
+ self.brain_score = brain_score
468
+ if norm_layer is None:
469
+ norm_layer = nn.BatchNorm2d
470
+ self._norm_layer = norm_layer
471
+
472
+ self.inplanes = width_per_group
473
+ self.dilation = 1
474
+ if replace_stride_with_dilation is None:
475
+ # each element in the tuple indicates if we should replace
476
+ # the 2x2 stride with a dilated convolution instead
477
+ replace_stride_with_dilation = [False, False, False]
478
+ if len(replace_stride_with_dilation) != 3:
479
+ raise ValueError("replace_stride_with_dilation should be None "
480
+ "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
481
+ self.groups = groups
482
+ self.base_width = width_per_group
483
+ self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
484
+ bias=False)
485
+ self.bn1 = norm_layer(self.inplanes)
486
+ self.relu = nn.ReLU(inplace=True)
487
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
488
+ self.layer1 = self._make_layer(block, self.base_width, layers[0])
489
+ self.layer2 = self._make_layer(block, self.base_width * 2, layers[1], stride=2,
490
+ dilate=replace_stride_with_dilation[0])
491
+ self.avgpool = nn.AdaptiveAvgPool2d((2, 2))
492
+ self.fc = nn.Linear(self.base_width * 8 * block.expansion, num_classes)
493
+ self.logits = nn.Identity()
494
+
495
+ for m in self.modules():
496
+ if isinstance(m, nn.Conv2d):
497
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
498
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
499
+ nn.init.constant_(m.weight, 1)
500
+ nn.init.constant_(m.bias, 0)
501
+
502
+ # Zero-initialize the last BN in each residual branch,
503
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
504
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
505
+ if zero_init_residual:
506
+ for m in self.modules():
507
+ if isinstance(m, Bottleneck):
508
+ nn.init.constant_(m.bn3.weight, 0)
509
+ elif isinstance(m, BasicBlock):
510
+ nn.init.constant_(m.bn2.weight, 0)
511
+
512
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
513
+ norm_layer = self._norm_layer
514
+ downsample = None
515
+ previous_dilation = self.dilation
516
+ if dilate:
517
+ self.dilation *= stride
518
+ stride = 1
519
+ if stride != 1 or self.inplanes != planes * block.expansion:
520
+ downsample = nn.Sequential(
521
+ conv1x1(self.inplanes, planes * block.expansion, stride),
522
+ norm_layer(planes * block.expansion),
523
+ )
524
+
525
+ layers = []
526
+ layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
527
+ self.base_width, previous_dilation, norm_layer))
528
+ self.inplanes = planes * block.expansion
529
+ for _ in range(1, blocks):
530
+ layers.append(block(self.inplanes, planes, groups=self.groups,
531
+ base_width=self.base_width, dilation=self.dilation,
532
+ norm_layer=norm_layer))
533
+
534
+ return nn.Sequential(*layers)
535
+
536
+ def _forward_impl(self, x):
537
+ # See note [TorchScript super()]
538
+ x = self.conv1(x)
539
+ x = self.bn1(x)
540
+ x = self.relu(x)
541
+ x = self.maxpool(x)
542
+
543
+ x = self.layer1(x)
544
+ x = self.layer2(x)
545
+
546
+ x = self.avgpool(x)
547
+ x = torch.flatten(x, 1)
548
+ x = self.fc(x)
549
+ x = self.logits(x)
550
+
551
+ return x
552
+
553
+ def _forward_impl_brain_score(self, x):
554
+ # Same, but no fc layer
555
+ x = self.conv1(x)
556
+ x = self.bn1(x)
557
+ x = self.relu(x)
558
+ x = self.maxpool(x)
559
+
560
+ x = self.layer1(x)
561
+ x = self.layer2(x)
562
+
563
+ x = self.avgpool(x)
564
+ x = torch.flatten(x, 1)
565
+ x = self.logits(x)
566
+
567
+ return x
568
+
569
+ def forward(self, x):
570
+ if self.brain_score:
571
+ return self._forward_impl_brain_score(x)
572
+ return self._forward_impl(x)
573
+
574
+
575
+ def resnet10_two_block(**kwargs):
576
+ r"""ResNet-18 model from
577
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
578
+ """
579
+ return ResNetTwoBlock(BasicBlock, [2, 2], **kwargs)
580
+
581
+
582
+ def resnet10_two_block_m(**kwargs):
583
+ r"""ResNet-18 model from
584
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
585
+ """
586
+ return ResNetTwoBlockM(BasicBlock, [2, 2], **kwargs)
@@ -0,0 +1,80 @@
1
+ import functools
2
+ import torch
3
+ from .helpers import resnet
4
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
5
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
6
+ from brainscore_vision.model_helpers.s3 import load_weight_file
7
+ from brainscore_vision.model_helpers.check_submission import check_models
8
+
9
+
10
+ def get_model(name):
11
+ """
12
+ This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
13
+ containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
14
+ keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
15
+ wrappers.
16
+ :param name: the name of the model to fetch
17
+ :return: the model instance
18
+ """
19
+ assert name == "resnet-18_test_m"
20
+ if name[-2:] == '_m':
21
+ name = name[:-2]
22
+
23
+ model = resnet.resnet18(width_per_group=64, brain_score=True)
24
+ weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
25
+ relative_path="resnet-18_test_m/resnet18-f37072fd.pth",
26
+ version_id="null",
27
+ sha1="93e13d94f74fdf476689608f146f47bde96b30b0")
28
+ checkpoint = torch.load(weights_path)
29
+ model.load_state_dict(checkpoint)
30
+ model.eval()
31
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
32
+ wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
33
+ wrapper.image_size = 224
34
+ return wrapper
35
+
36
+
37
+ def get_layers(name):
38
+ """
39
+ This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to
40
+ layers and uses this list as a set of possible layers. The lists doesn't have to contain all layers, the less the
41
+ faster the benchmark process works. Additionally the given layers have to produce an activations vector of at least
42
+ size 25! The layer names are delivered back to the model instance and have to be resolved in there. For a pytorch
43
+ model, the layer name are for instance dot concatenated per module, e.g. "features.2".
44
+ :param name: the name of the model, to return the layers for
45
+ :return: a list of strings containing all layers, that should be considered as brain area.
46
+ """
47
+ assert name == "resnet-18_test_m"
48
+ layers = ['relu']
49
+ n_blocks = 2 if 'two-block' in name else 4
50
+ for layer in range(1, n_blocks + 1):
51
+ for block in range(2):
52
+ layers.append('layer%d.%d.relu1' % (layer, block))
53
+ layers.append('layer%d.%d.relu2' % (layer, block))
54
+ layers.append('logits')
55
+ return layers
56
+
57
+
58
+ def get_bibtex(model_identifier):
59
+ """
60
+ A method returning the bibtex reference of the requested model as a string.
61
+ """
62
+ return """@inproceedings{KubiliusSchrimpf2019CORnet,
63
+ abstract = {Deep convolutional artificial neural networks (ANNs) are the leading class of candidate models of the mechanisms of visual processing in the primate ventral stream. While initially inspired by brain anatomy, over the past years, these ANNs have evolved from a simple eight-layer architecture in AlexNet to extremely deep and branching architectures, demonstrating increasingly better object categorization performance, yet bringing into question how brain-like they still are. In particular, typical deep models from the machine learning community are often hard to map onto the brain's anatomy due to their vast number of layers and missing biologically-important connections, such as recurrence. Here we demonstrate that better anatomical alignment to the brain and high performance on machine learning as well as neuroscience measures do not have to be in contradiction. We developed CORnet-S, a shallow ANN with four anatomically mapped areas and recurrent connectivity, guided by Brain-Score, a new large-scale composite of neural and behavioral benchmarks for quantifying the functional fidelity of models of the primate ventral visual stream. Despite being significantly shallower than most models, CORnet-S is the top model on Brain-Score and outperforms similarly compact models on ImageNet. Moreover, our extensive analyses of CORnet-S circuitry variants reveal that recurrence is the main predictive factor of both Brain-Score and ImageNet top-1 performance. Finally, we report that the temporal evolution of the CORnet-S "IT" neural population resembles the actual monkey IT population dynamics. Taken together, these results establish CORnet-S, a compact, recurrent ANN, as the current best model of the primate ventral visual stream.},
64
+ archivePrefix = {arXiv},
65
+ arxivId = {1909.06161},
66
+ author = {Kubilius, Jonas and Schrimpf, Martin and Hong, Ha and Majaj, Najib J. and Rajalingham, Rishi and Issa, Elias B. and Kar, Kohitij and Bashivan, Pouya and Prescott-Roy, Jonathan and Schmidt, Kailyn and Nayebi, Aran and Bear, Daniel and Yamins, Daniel L. K. and DiCarlo, James J.},
67
+ booktitle = {Neural Information Processing Systems (NeurIPS)},
68
+ editor = {Wallach, H. and Larochelle, H. and Beygelzimer, A. and D'Alch{\'{e}}-Buc, F. and Fox, E. and Garnett, R.},
69
+ pages = {12785----12796},
70
+ publisher = {Curran Associates, Inc.},
71
+ title = {{Brain-Like Object Recognition with High-Performing Shallow Recurrent ANNs}},
72
+ url = {http://papers.nips.cc/paper/9441-brain-like-object-recognition-with-high-performing-shallow-recurrent-anns},
73
+ year = {2019}
74
+ }"""
75
+
76
+
77
+ if __name__ == '__main__':
78
+ # Use this method to ensure the correctness of the BaseModel implementations.
79
+ # It executes a mock run of brain-score benchmarks.
80
+ check_models.check_base_models(__name__)
@@ -0,0 +1 @@
1
+ {"IT": "layer4.0.relu2", "V4": "layer3.0.relu1", "V1": "layer1.0.relu1", "V2": "layer3.0.relu2"}
@@ -0,0 +1,2 @@
1
+ torch
2
+ torchvision
@@ -0,0 +1,8 @@
1
+ import brainscore_vision
2
+ import pytest
3
+
4
+
5
+ @pytest.mark.travis_slow
6
+ def test_has_identifier():
7
+ model = brainscore_vision.load_model('resnet-18_test_m')
8
+ assert model.identifier == 'resnet-18_test_m'