brainscore-vision 2.2.3__py3-none-any.whl → 2.2.5__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (722) hide show
  1. brainscore_vision/data/baker2022/__init__.py +10 -10
  2. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
  3. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
  4. brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
  5. brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
  6. brainscore_vision/data/barbumayo2019/__init__.py +3 -3
  7. brainscore_vision/data/bashivankar2019/__init__.py +10 -10
  8. brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
  9. brainscore_vision/data/bmd2024/__init__.py +20 -20
  10. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
  11. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
  12. brainscore_vision/data/bracci2019/__init__.py +5 -5
  13. brainscore_vision/data/bracci2019/data_packaging.py +1 -1
  14. brainscore_vision/data/cadena2017/__init__.py +5 -5
  15. brainscore_vision/data/cichy2019/__init__.py +5 -5
  16. brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
  17. brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
  18. brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
  19. brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
  20. brainscore_vision/data/david2004/__init__.py +5 -5
  21. brainscore_vision/data/deng2009/__init__.py +3 -3
  22. brainscore_vision/data/ferguson2024/__init__.py +112 -112
  23. brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
  24. brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
  25. brainscore_vision/data/geirhos2021/__init__.py +85 -85
  26. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
  27. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
  28. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
  29. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
  30. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
  31. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
  32. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
  33. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
  34. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
  35. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
  36. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
  37. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
  38. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
  39. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
  40. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
  41. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
  42. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
  43. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
  44. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
  45. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
  46. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
  47. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
  48. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
  49. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
  50. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
  51. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
  52. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
  53. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
  54. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
  55. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
  56. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
  57. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
  58. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
  59. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
  60. brainscore_vision/data/hebart2023/__init__.py +5 -5
  61. brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
  62. brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
  63. brainscore_vision/data/hendrycks2019/__init__.py +12 -12
  64. brainscore_vision/data/igustibagus2024/__init__.py +5 -5
  65. brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
  66. brainscore_vision/data/islam2021/__init__.py +3 -3
  67. brainscore_vision/data/kar2018/__init__.py +7 -7
  68. brainscore_vision/data/kar2019/__init__.py +5 -5
  69. brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
  70. brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
  71. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
  72. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
  73. brainscore_vision/data/majajhong2015/__init__.py +23 -23
  74. brainscore_vision/data/malania2007/__init__.py +77 -77
  75. brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
  76. brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
  77. brainscore_vision/data/maniquet2024/__init__.py +11 -11
  78. brainscore_vision/data/marques2020/__init__.py +30 -30
  79. brainscore_vision/data/rajalingham2018/__init__.py +10 -10
  80. brainscore_vision/data/rajalingham2020/__init__.py +5 -5
  81. brainscore_vision/data/rust2012/__init__.py +7 -7
  82. brainscore_vision/data/sanghavi2020/__init__.py +19 -19
  83. brainscore_vision/data/scialom2024/__init__.py +110 -110
  84. brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
  85. brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
  86. brainscore_vision/data/seibert2019/__init__.py +2 -2
  87. brainscore_vision/data/zhang2018/__init__.py +5 -5
  88. brainscore_vision/data_helpers/s3.py +25 -6
  89. brainscore_vision/model_helpers/activations/pytorch.py +34 -12
  90. brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
  91. brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
  92. brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
  93. brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
  94. brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
  95. brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
  96. brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
  97. brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
  98. brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
  99. brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
  100. brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
  101. brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
  102. brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
  103. brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
  104. brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
  105. brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
  106. brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
  107. brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
  108. brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
  109. brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
  110. brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
  111. brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
  112. brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
  113. brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
  114. brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
  115. brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
  116. brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
  117. brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
  118. brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
  119. brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
  120. brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
  121. brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
  122. brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
  123. brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
  124. brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
  125. brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
  126. brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
  127. brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
  128. brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
  129. brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
  130. brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
  131. brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
  132. brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
  133. brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
  134. brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
  135. brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
  136. brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
  137. brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
  138. brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
  139. brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
  140. brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
  141. brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
  142. brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
  143. brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
  144. brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
  145. brainscore_vision/models/ReAlnet/__init__.py +64 -0
  146. brainscore_vision/models/ReAlnet/model.py +237 -0
  147. brainscore_vision/models/ReAlnet/requirements.txt +7 -0
  148. brainscore_vision/models/ReAlnet/test.py +0 -0
  149. brainscore_vision/models/ReAlnet/weights.json +26 -0
  150. brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
  151. brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
  152. brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
  153. brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
  154. brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
  155. brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
  156. brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
  157. brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
  158. brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
  159. brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
  160. brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
  161. brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
  162. brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
  163. brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
  164. brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
  165. brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
  166. brainscore_vision/models/VOneCORnet_S/model.py +25 -0
  167. brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
  168. brainscore_vision/models/VOneCORnet_S/test.py +8 -0
  169. brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
  170. brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
  171. brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
  172. brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
  173. brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
  174. brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
  175. brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
  176. brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
  177. brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
  178. brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
  179. brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
  180. brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
  181. brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
  182. brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
  183. brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
  184. brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
  185. brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
  186. brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
  187. brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
  188. brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
  189. brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
  190. brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
  191. brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
  192. brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
  193. brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
  194. brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
  195. brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
  196. brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
  197. brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
  198. brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
  199. brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
  200. brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
  201. brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
  202. brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
  203. brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
  204. brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
  205. brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
  206. brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
  207. brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
  208. brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
  209. brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
  210. brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
  211. brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
  212. brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
  213. brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
  214. brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
  215. brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
  216. brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
  217. brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
  218. brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
  219. brainscore_vision/models/antialiased-r50/__init__.py +7 -0
  220. brainscore_vision/models/antialiased-r50/model.py +62 -0
  221. brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
  222. brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
  223. brainscore_vision/models/antialiased-r50/test.py +8 -0
  224. brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
  225. brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
  226. brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
  227. brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
  228. brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
  229. brainscore_vision/models/cornet_s/model.py +2 -2
  230. brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
  231. brainscore_vision/models/densenet_121/__init__.py +7 -0
  232. brainscore_vision/models/densenet_121/model.py +63 -0
  233. brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
  234. brainscore_vision/models/densenet_121/requirements.txt +1 -0
  235. brainscore_vision/models/densenet_121/test.py +8 -0
  236. brainscore_vision/models/densenet_169/__init__.py +7 -0
  237. brainscore_vision/models/densenet_169/model.py +63 -0
  238. brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
  239. brainscore_vision/models/densenet_169/requirements.txt +1 -0
  240. brainscore_vision/models/densenet_169/test.py +9 -0
  241. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
  242. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
  243. brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
  244. brainscore_vision/models/densenet_201/test.py +8 -0
  245. brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
  246. brainscore_vision/models/efficientnet_b0/model.py +45 -0
  247. brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
  248. brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
  249. brainscore_vision/models/efficientnet_b0/test.py +8 -0
  250. brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
  251. brainscore_vision/models/efficientnet_b7/model.py +61 -0
  252. brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
  253. brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
  254. brainscore_vision/models/efficientnet_b7/test.py +9 -0
  255. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
  256. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
  257. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
  258. brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
  259. brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
  260. brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
  261. brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
  262. brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
  263. brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
  264. brainscore_vision/models/evresnet_50_1/model.py +62 -0
  265. brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
  266. brainscore_vision/models/evresnet_50_1/test.py +8 -0
  267. brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
  268. brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
  269. brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
  270. brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
  271. brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
  272. brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
  273. brainscore_vision/models/evresnet_50_4/model.py +67 -0
  274. brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
  275. brainscore_vision/models/evresnet_50_4/test.py +8 -0
  276. brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
  277. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
  278. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
  279. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
  280. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
  281. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
  282. brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
  283. brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
  284. brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
  285. brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
  286. brainscore_vision/models/grcnn/__init__.py +7 -0
  287. brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
  288. brainscore_vision/models/grcnn/model.py +54 -0
  289. brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
  290. brainscore_vision/models/grcnn/requirements.txt +2 -0
  291. brainscore_vision/models/grcnn/test.py +9 -0
  292. brainscore_vision/models/grcnn_109/__init__.py +5 -0
  293. brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
  294. brainscore_vision/models/grcnn_109/model.py +53 -0
  295. brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
  296. brainscore_vision/models/grcnn_109/requirements.txt +2 -0
  297. brainscore_vision/models/grcnn_109/test.py +9 -0
  298. brainscore_vision/models/hmax/model.py +2 -2
  299. brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
  300. brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
  301. brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
  302. brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
  303. brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
  304. brainscore_vision/models/inception_v1/__init__.py +7 -0
  305. brainscore_vision/models/inception_v1/model.py +67 -0
  306. brainscore_vision/models/inception_v1/requirements.txt +1 -0
  307. brainscore_vision/models/inception_v1/test.py +8 -0
  308. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
  309. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
  310. brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
  311. brainscore_vision/models/inception_v3/test.py +8 -0
  312. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
  313. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
  314. brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
  315. brainscore_vision/models/inception_v4/test.py +8 -0
  316. brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
  317. brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
  318. brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
  319. brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
  320. brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
  321. brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
  322. brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
  323. brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
  324. brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
  325. brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
  326. brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
  327. brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
  328. brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
  329. brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
  330. brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
  331. brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
  332. brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
  333. brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
  334. brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
  335. brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
  336. brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
  337. brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
  338. brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
  339. brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
  340. brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
  341. brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
  342. brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
  343. brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
  344. brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
  345. brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
  346. brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
  347. brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
  348. brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
  349. brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
  350. brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
  351. brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
  352. brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
  353. brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
  354. brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
  355. brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
  356. brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
  357. brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
  358. brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
  359. brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
  360. brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
  361. brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
  362. brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
  363. brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
  364. brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
  365. brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
  366. brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
  367. brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
  368. brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
  369. brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
  370. brainscore_vision/models/nasnet_large/__init__.py +7 -0
  371. brainscore_vision/models/nasnet_large/model.py +60 -0
  372. brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
  373. brainscore_vision/models/nasnet_large/test.py +8 -0
  374. brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
  375. brainscore_vision/models/nasnet_mobile/model.py +685 -0
  376. brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
  377. brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
  378. brainscore_vision/models/nasnet_mobile/test.py +8 -0
  379. brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
  380. brainscore_vision/models/omnivore_swinB/model.py +79 -0
  381. brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
  382. brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
  383. brainscore_vision/models/omnivore_swinB/test.py +9 -0
  384. brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
  385. brainscore_vision/models/omnivore_swinS/model.py +79 -0
  386. brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
  387. brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
  388. brainscore_vision/models/omnivore_swinS/test.py +9 -0
  389. brainscore_vision/models/pnasnet_large/__init__.py +7 -0
  390. brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
  391. brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
  392. brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
  393. brainscore_vision/models/pnasnet_large/test.py +8 -0
  394. brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
  395. brainscore_vision/models/resnet50_SIN/model.py +63 -0
  396. brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
  397. brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
  398. brainscore_vision/models/resnet50_SIN/test.py +9 -0
  399. brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
  400. brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
  401. brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
  402. brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
  403. brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
  404. brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
  405. brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
  406. brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
  407. brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
  408. brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
  409. brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
  410. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
  411. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
  412. brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
  413. brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
  414. brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
  415. brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
  416. brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
  417. brainscore_vision/models/resnet50_barlow/model.py +53 -0
  418. brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
  419. brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
  420. brainscore_vision/models/resnet50_barlow/test.py +9 -0
  421. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
  422. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
  423. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
  424. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
  425. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
  426. brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
  427. brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
  428. brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
  429. brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
  430. brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
  431. brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
  432. brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
  433. brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
  434. brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
  435. brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
  436. brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
  437. brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
  438. brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
  439. brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
  440. brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
  441. brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
  442. brainscore_vision/models/resnet50_sup/__init__.py +5 -0
  443. brainscore_vision/models/resnet50_sup/model.py +55 -0
  444. brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
  445. brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
  446. brainscore_vision/models/resnet50_sup/test.py +8 -0
  447. brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
  448. brainscore_vision/models/resnet50_vicreg/model.py +62 -0
  449. brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
  450. brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
  451. brainscore_vision/models/resnet50_vicreg/test.py +9 -0
  452. brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
  453. brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
  454. brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
  455. brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
  456. brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
  457. brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
  458. brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
  459. brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
  460. brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
  461. brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
  462. brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
  463. brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
  464. brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
  465. brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
  466. brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
  467. brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
  468. brainscore_vision/models/resnet_101_v1/model.py +42 -0
  469. brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
  470. brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
  471. brainscore_vision/models/resnet_101_v1/test.py +8 -0
  472. brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
  473. brainscore_vision/models/resnet_101_v2/model.py +33 -0
  474. brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
  475. brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
  476. brainscore_vision/models/resnet_101_v2/test.py +8 -0
  477. brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
  478. brainscore_vision/models/resnet_152_v1/model.py +42 -0
  479. brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
  480. brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
  481. brainscore_vision/models/resnet_152_v1/test.py +8 -0
  482. brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
  483. brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
  484. brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
  485. brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
  486. brainscore_vision/models/resnet_152_v2/test.py +8 -0
  487. brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
  488. brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
  489. brainscore_vision/models/resnet_18_test_m/model.py +80 -0
  490. brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
  491. brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
  492. brainscore_vision/models/resnet_18_test_m/test.py +8 -0
  493. brainscore_vision/models/resnet_50_2/__init__.py +9 -0
  494. brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
  495. brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
  496. brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
  497. brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
  498. brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
  499. brainscore_vision/models/resnet_50_2/model.py +46 -0
  500. brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
  501. brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
  502. brainscore_vision/models/resnet_50_2/test.py +8 -0
  503. brainscore_vision/models/resnet_50_robust/model.py +2 -2
  504. brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
  505. brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
  506. brainscore_vision/models/resnet_50_v1/model.py +42 -0
  507. brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
  508. brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
  509. brainscore_vision/models/resnet_50_v1/test.py +8 -0
  510. brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
  511. brainscore_vision/models/resnet_50_v2/model.py +33 -0
  512. brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
  513. brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
  514. brainscore_vision/models/resnet_50_v2/test.py +8 -0
  515. brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
  516. brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
  517. brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
  518. brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
  519. brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
  520. brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
  521. brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
  522. brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
  523. brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
  524. brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
  525. brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
  526. brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
  527. brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
  528. brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
  529. brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
  530. brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
  531. brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
  532. brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
  533. brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
  534. brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
  535. brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
  536. brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
  537. brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
  538. brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
  539. brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
  540. brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
  541. brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
  542. brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
  543. brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
  544. brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
  545. brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
  546. brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
  547. brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
  548. brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
  549. brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
  550. brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
  551. brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
  552. brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
  553. brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
  554. brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
  555. brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
  556. brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
  557. brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
  558. brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
  559. brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
  560. brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
  561. brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
  562. brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
  563. brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
  564. brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
  565. brainscore_vision/models/timm_models/__init__.py +193 -0
  566. brainscore_vision/models/timm_models/model.py +90 -0
  567. brainscore_vision/models/timm_models/model_configs.json +464 -0
  568. brainscore_vision/models/timm_models/requirements.txt +3 -0
  569. brainscore_vision/models/timm_models/test.py +0 -0
  570. brainscore_vision/models/vgg_16/__init__.py +7 -0
  571. brainscore_vision/models/vgg_16/model.py +52 -0
  572. brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
  573. brainscore_vision/models/vgg_16/requirements.txt +1 -0
  574. brainscore_vision/models/vgg_16/test.py +8 -0
  575. brainscore_vision/models/vgg_19/__init__.py +7 -0
  576. brainscore_vision/models/vgg_19/model.py +52 -0
  577. brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
  578. brainscore_vision/models/vgg_19/requirements.txt +1 -0
  579. brainscore_vision/models/vgg_19/test.py +8 -0
  580. brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
  581. brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
  582. brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
  583. brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
  584. brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
  585. brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
  586. brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
  587. brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
  588. brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
  589. brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
  590. brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
  591. brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
  592. brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
  593. brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
  594. brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
  595. brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
  596. brainscore_vision/models/voneresnet_50/__init__.py +7 -0
  597. brainscore_vision/models/voneresnet_50/model.py +37 -0
  598. brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
  599. brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
  600. brainscore_vision/models/voneresnet_50/test.py +8 -0
  601. brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
  602. brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
  603. brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
  604. brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
  605. brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
  606. brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
  607. brainscore_vision/models/voneresnet_50_1/model.py +68 -0
  608. brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
  609. brainscore_vision/models/voneresnet_50_1/test.py +7 -0
  610. brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
  611. brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
  612. brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
  613. brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
  614. brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
  615. brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
  616. brainscore_vision/models/voneresnet_50_3/model.py +66 -0
  617. brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
  618. brainscore_vision/models/voneresnet_50_3/test.py +7 -0
  619. brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
  620. brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
  621. brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
  622. brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
  623. brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
  624. brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
  625. brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
  626. brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
  627. brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
  628. brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
  629. brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
  630. brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
  631. brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
  632. brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
  633. brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
  634. brainscore_vision/models/xception/__init__.py +7 -0
  635. brainscore_vision/models/xception/model.py +64 -0
  636. brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
  637. brainscore_vision/models/xception/requirements.txt +2 -0
  638. brainscore_vision/models/xception/test.py +8 -0
  639. brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
  640. brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
  641. brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
  642. brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
  643. brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
  644. brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
  645. brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
  646. brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
  647. brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
  648. brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
  649. brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
  650. brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
  651. brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
  652. brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
  653. brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
  654. brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
  655. brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
  656. brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
  657. brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
  658. brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
  659. brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
  660. brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
  661. brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
  662. brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
  663. brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
  664. brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
  665. brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
  666. brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
  667. brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
  668. brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
  669. brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
  670. brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
  671. brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
  672. brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
  673. brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
  674. brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
  675. brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
  676. brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
  677. brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
  678. brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
  679. brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
  680. brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
  681. brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
  682. brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
  683. brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
  684. brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
  685. brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
  686. brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
  687. brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
  688. brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
  689. brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
  690. brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
  691. brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
  692. brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
  693. brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
  694. brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
  695. brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
  696. brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
  697. brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
  698. brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
  699. brainscore_vision/submission/actions_helpers.py +2 -3
  700. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/METADATA +6 -6
  701. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/RECORD +714 -130
  702. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/WHEEL +1 -1
  703. docs/source/index.rst +1 -0
  704. docs/source/modules/submission.rst +1 -1
  705. docs/source/modules/version_bumping.rst +43 -0
  706. tests/test_submission/test_actions_helpers.py +2 -6
  707. brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
  708. brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
  709. brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
  710. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
  711. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
  712. brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
  713. brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
  714. brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
  715. /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
  716. /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
  717. /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
  718. /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
  719. /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
  720. /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
  721. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/LICENSE +0 -0
  722. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,544 @@
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from torch.nn import init
6
+ from functools import reduce
7
+ import scipy.stats as stats
8
+
9
+ device = "cpu"
10
+
11
+
12
+ def gabor_kernel(frequency, sigma_x, sigma_y, theta=0, offset=0, ks=61):
13
+ w = ks // 2
14
+ grid_val = torch.arange(-w, w + 1, dtype=torch.float)
15
+ x, y = torch.meshgrid(grid_val, grid_val)
16
+ rotx = x * np.cos(theta) + y * np.sin(theta)
17
+ roty = -x * np.sin(theta) + y * np.cos(theta)
18
+ g = torch.zeros(y.shape)
19
+ g[:] = torch.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
20
+ g /= 2 * np.pi * sigma_x * sigma_y
21
+ g *= torch.cos(2 * np.pi * frequency * rotx + offset)
22
+
23
+ return g
24
+
25
+
26
+ def sample_dist(hist, bins, ns, scale='linear'):
27
+ rand_sample = np.random.rand(ns)
28
+ if scale == 'linear':
29
+ rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), bins)
30
+ elif scale == 'log2':
31
+ rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log2(bins))
32
+ rand_sample = 2 ** rand_sample
33
+ elif scale == 'log10':
34
+ rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log10(bins))
35
+ rand_sample = 10 ** rand_sample
36
+ return rand_sample
37
+
38
+
39
+ def generate_gabor_param(features, seed=0, rand_flag=False, sf_corr=0, sf_max=9, sf_min=0):
40
+ # Generates random sample
41
+ np.random.seed(seed)
42
+
43
+ phase_bins = np.array([0, 360])
44
+ phase_dist = np.array([1])
45
+
46
+ if rand_flag:
47
+ print('Uniform gabor parameters')
48
+ ori_bins = np.array([0, 180])
49
+ ori_dist = np.array([1])
50
+
51
+ nx_bins = np.array([0.1, 10 ** 0.2])
52
+ nx_dist = np.array([1])
53
+
54
+ ny_bins = np.array([0.1, 10 ** 0.2])
55
+ ny_dist = np.array([1])
56
+
57
+ # sf_bins = np.array([0.5, 8])
58
+ # sf_dist = np.array([1])
59
+
60
+ sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
61
+ sf_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1])
62
+
63
+ sfmax_ind = np.where(sf_bins < sf_max)[0][-1]
64
+ sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
65
+
66
+ sf_bins = sf_bins[sfmin_ind:sfmax_ind + 1]
67
+ sf_dist = sf_dist[sfmin_ind:sfmax_ind]
68
+
69
+ sf_dist = sf_dist / sf_dist.sum()
70
+ else:
71
+ print('Neuronal distributions gabor parameters')
72
+ # DeValois 1982a
73
+ ori_bins = np.array([-22.5, 22.5, 67.5, 112.5, 157.5])
74
+ ori_dist = np.array([66, 49, 77, 54])
75
+ ori_dist = ori_dist / ori_dist.sum()
76
+
77
+ # Schiller 1976
78
+ cov_mat = np.array([[1, sf_corr], [sf_corr, 1]])
79
+
80
+ # Ringach 2002b
81
+ nx_bins = np.logspace(-1, 0.2, 6, base=10)
82
+ ny_bins = np.logspace(-1, 0.2, 6, base=10)
83
+ n_joint_dist = np.array([[2., 0., 1., 0., 0.],
84
+ [8., 9., 4., 1., 0.],
85
+ [1., 2., 19., 17., 3.],
86
+ [0., 0., 1., 7., 4.],
87
+ [0., 0., 0., 0., 0.]])
88
+ n_joint_dist = n_joint_dist / n_joint_dist.sum()
89
+ nx_dist = n_joint_dist.sum(axis=1)
90
+ nx_dist = nx_dist / nx_dist.sum()
91
+ ny_dist_marg = n_joint_dist / n_joint_dist.sum(axis=1, keepdims=True)
92
+
93
+ # DeValois 1982b
94
+ sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
95
+ sf_dist = np.array([4, 4, 8, 25, 32, 26, 28, 12])
96
+
97
+ sfmax_ind = np.where(sf_bins <= sf_max)[0][-1]
98
+ sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
99
+
100
+ sf_bins = sf_bins[sfmin_ind:sfmax_ind + 1]
101
+ sf_dist = sf_dist[sfmin_ind:sfmax_ind]
102
+
103
+ sf_dist = sf_dist / sf_dist.sum()
104
+
105
+ phase = sample_dist(phase_dist, phase_bins, features)
106
+ ori = sample_dist(ori_dist, ori_bins, features)
107
+ ori[ori < 0] = ori[ori < 0] + 180
108
+
109
+ if rand_flag:
110
+ sf = sample_dist(sf_dist, sf_bins, features, scale='log2')
111
+ nx = sample_dist(nx_dist, nx_bins, features, scale='log10')
112
+ ny = sample_dist(ny_dist, ny_bins, features, scale='log10')
113
+ else:
114
+
115
+ samps = np.random.multivariate_normal([0, 0], cov_mat, features)
116
+ samps_cdf = stats.norm.cdf(samps)
117
+
118
+ nx = np.interp(samps_cdf[:, 0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
119
+ nx = 10 ** nx
120
+
121
+ ny_samp = np.random.rand(features)
122
+ ny = np.zeros(features)
123
+ for samp_ind, nx_samp in enumerate(nx):
124
+ bin_id = np.argwhere(nx_bins < nx_samp)[-1]
125
+ ny[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
126
+ np.log10(ny_bins))
127
+ ny = 10 ** ny
128
+
129
+ sf = np.interp(samps_cdf[:, 1], np.hstack(([0], sf_dist.cumsum())), np.log2(sf_bins))
130
+ sf = 2 ** sf
131
+
132
+ return sf, ori, phase, nx, ny
133
+
134
+
135
+ class Identity(nn.Module):
136
+ def forward(self, x):
137
+ return x
138
+
139
+
140
+ class GFB(nn.Module):
141
+ def __init__(self, in_channels, out_channels, kernel_size, stride=4):
142
+ super().__init__()
143
+ self.in_channels = in_channels
144
+ self.out_channels = out_channels
145
+ self.kernel_size = (kernel_size, kernel_size)
146
+ self.stride = (stride, stride)
147
+ self.padding = (kernel_size // 2, kernel_size // 2)
148
+
149
+ # Param instatiations
150
+ self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size))
151
+
152
+ def forward(self, x):
153
+ return F.conv2d(x, self.weight, None, self.stride, self.padding)
154
+
155
+ def initialize(self, sf, theta, sigx, sigy, phase):
156
+ random_channel = torch.randint(0, self.in_channels, (self.out_channels,))
157
+ for i in range(self.out_channels):
158
+ self.weight[i, random_channel[i]] = gabor_kernel(frequency=sf[i], sigma_x=sigx[i], sigma_y=sigy[i],
159
+ theta=theta[i], offset=phase[i], ks=self.kernel_size[0])
160
+ self.weight = nn.Parameter(self.weight, requires_grad=False)
161
+
162
+
163
+ class VOneBlock(nn.Module):
164
+ def __init__(self, sf, theta, sigx, sigy, phase,
165
+ k_exc=25, noise_mode=None, noise_scale=1, noise_level=1,
166
+ simple_channels=128, complex_channels=128, ksize=25, stride=4, input_size=224):
167
+ super().__init__()
168
+
169
+ self.in_channels = 3
170
+
171
+ self.simple_channels = simple_channels
172
+ self.complex_channels = complex_channels
173
+ self.out_channels = simple_channels + complex_channels
174
+ self.stride = stride
175
+ self.input_size = input_size
176
+
177
+ self.sf = sf
178
+ self.theta = theta
179
+ self.sigx = sigx
180
+ self.sigy = sigy
181
+ self.phase = phase
182
+ self.k_exc = k_exc
183
+
184
+ self.set_noise_mode(noise_mode, noise_scale, noise_level)
185
+ self.fixed_noise = None
186
+
187
+ self.simple_conv_q0 = GFB(self.in_channels, self.out_channels, ksize, stride)
188
+ self.simple_conv_q1 = GFB(self.in_channels, self.out_channels, ksize, stride)
189
+ self.simple_conv_q0.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy,
190
+ phase=self.phase)
191
+ self.simple_conv_q1.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy,
192
+ phase=self.phase + np.pi / 2)
193
+
194
+ self.simple = nn.ReLU(inplace=True)
195
+ self.complex = Identity()
196
+ self.gabors = Identity()
197
+ self.noise = nn.ReLU(inplace=True)
198
+ self.output = Identity()
199
+
200
+ def forward(self, x):
201
+ # Gabor activations [Batch, out_channels, H/stride, W/stride]
202
+ x = self.gabors_f(x)
203
+ # Noise [Batch, out_channels, H/stride, W/stride]
204
+ x = self.noise_f(x)
205
+ # V1 Block output: (Batch, out_channels, H/stride, W/stride)
206
+ x = self.output(x)
207
+ return x
208
+
209
+ def gabors_f(self, x):
210
+ s_q0 = self.simple_conv_q0(x)
211
+ s_q1 = self.simple_conv_q1(x)
212
+ c = self.complex(torch.sqrt(s_q0[:, self.simple_channels:, :, :] ** 2 +
213
+ s_q1[:, self.simple_channels:, :, :] ** 2) / np.sqrt(2))
214
+ s = self.simple(s_q0[:, 0:self.simple_channels, :, :])
215
+ return self.gabors(self.k_exc * torch.cat((s, c), 1))
216
+
217
+ def noise_f(self, x):
218
+ if self.noise_mode == 'neuronal':
219
+ eps = 10e-5
220
+ x *= self.noise_scale
221
+ x += self.noise_level
222
+ if self.fixed_noise is not None:
223
+ x += self.fixed_noise * torch.sqrt(F.relu(x.clone()) + eps)
224
+ else:
225
+ x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * \
226
+ torch.sqrt(F.relu(x.clone()) + eps)
227
+ x -= self.noise_level
228
+ x /= self.noise_scale
229
+ if self.noise_mode == 'gaussian':
230
+ if self.fixed_noise is not None:
231
+ x += self.fixed_noise * self.noise_scale
232
+ else:
233
+ x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * self.noise_scale
234
+ return self.noise(x)
235
+
236
+ def set_noise_mode(self, noise_mode=None, noise_scale=1, noise_level=1):
237
+ self.noise_mode = noise_mode
238
+ self.noise_scale = noise_scale
239
+ self.noise_level = noise_level
240
+
241
+ def fix_noise(self, batch_size=256, seed=None):
242
+ noise_mean = torch.zeros(batch_size, self.out_channels, int(self.input_size / self.stride),
243
+ int(self.input_size / self.stride))
244
+ if seed:
245
+ torch.manual_seed(seed)
246
+ if self.noise_mode:
247
+ self.fixed_noise = torch.distributions.normal.Normal(noise_mean, scale=1).rsample().to(device)
248
+
249
+ def unfix_noise(self):
250
+ self.fixed_noise = None
251
+
252
+
253
+ def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
254
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
255
+ padding=dilation, groups=groups, bias=False, dilation=dilation)
256
+
257
+
258
+ def conv1x1(in_planes, out_planes, stride=1):
259
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
260
+
261
+
262
+ class Bottleneck(nn.Module):
263
+ expansion = 4
264
+ __constants__ = ['downsample']
265
+
266
+ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
267
+ base_width=64, dilation=1, norm_layer=None):
268
+ super(Bottleneck, self).__init__()
269
+ if norm_layer is None:
270
+ norm_layer = nn.BatchNorm2d
271
+ width = int(planes * (base_width / 64.)) * groups
272
+ # Both self.conv2 and self.downsample layers downsample the input when stride != 1
273
+ self.conv1 = conv1x1(inplanes, width)
274
+ self.bn1 = norm_layer(width)
275
+ self.conv2 = conv3x3(width, width, stride, groups, dilation)
276
+ self.bn2 = norm_layer(width)
277
+ self.conv3 = conv1x1(width, planes * self.expansion)
278
+ self.bn3 = norm_layer(planes * self.expansion)
279
+ self.relu = nn.ReLU(inplace=True) # inplace=True
280
+ self.downsample = downsample
281
+ self.stride = stride
282
+
283
+ def forward(self, x):
284
+ identity = x
285
+
286
+ out = self.conv1(x)
287
+ out = self.bn1(out)
288
+ out = self.relu(out)
289
+
290
+ out = self.conv2(out)
291
+ out = self.bn2(out)
292
+ out = self.relu(out)
293
+
294
+ out = self.conv3(out)
295
+ out = self.bn3(out)
296
+
297
+ if self.downsample is not None:
298
+ identity = self.downsample(x)
299
+
300
+ out += identity
301
+ out = self.relu(out)
302
+
303
+ return out
304
+
305
+
306
+ def VOneNet(sf_corr=0.75, sf_max=9, sf_min=0, rand_param=False, gabor_seed=0,
307
+ simple_channels=256, complex_channels=256,
308
+ noise_mode='neuronal', noise_scale=0.35, noise_level=0.07, k_exc=25,
309
+ model_arch='resnet50', image_size=224, visual_degrees=8, ksize=25, stride=4):
310
+ out_channels = simple_channels + complex_channels
311
+
312
+ sf, theta, phase, nx, ny = generate_gabor_param(out_channels, gabor_seed, rand_param, sf_corr, sf_max, sf_min)
313
+
314
+ gabor_params = {'simple_channels': simple_channels, 'complex_channels': complex_channels, 'rand_param': rand_param,
315
+ 'gabor_seed': gabor_seed, 'sf_max': sf_max, 'sf_corr': sf_corr, 'sf': sf.copy(),
316
+ 'theta': theta.copy(), 'phase': phase.copy(), 'nx': nx.copy(), 'ny': ny.copy()}
317
+ arch_params = {'k_exc': k_exc, 'arch': model_arch, 'ksize': ksize, 'stride': stride}
318
+
319
+ # Conversions
320
+ ppd = image_size / visual_degrees
321
+
322
+ sf = sf / ppd
323
+ sigx = nx / sf
324
+ sigy = ny / sf
325
+ theta = theta / 180 * np.pi
326
+ phase = phase / 180 * np.pi
327
+
328
+ vone_block = VOneBlock(sf=sf, theta=theta, sigx=sigx, sigy=sigy, phase=phase,
329
+ k_exc=k_exc, noise_mode=noise_mode, noise_scale=noise_scale, noise_level=noise_level,
330
+ simple_channels=simple_channels, complex_channels=complex_channels,
331
+ ksize=ksize, stride=stride, input_size=image_size)
332
+
333
+ bottleneck = nn.Conv2d(out_channels, 64, kernel_size=1, stride=1, bias=False)
334
+ nn.init.kaiming_normal_(bottleneck.weight, mode='fan_out', nonlinearity='relu')
335
+
336
+ return vone_block, bottleneck
337
+
338
+
339
+ class SKConv(nn.Module):
340
+ def __init__(self, in_channels, out_channels, stride=1, M=2, r=16, L=32, groups=32):
341
+
342
+ super(SKConv, self).__init__()
343
+ d = max(in_channels // r, L)
344
+ self.M = M
345
+ self.out_channels = out_channels
346
+ self.conv = nn.ModuleList()
347
+ for i in range(M):
348
+ conv1 = nn.Conv2d(in_channels, out_channels, 3, stride, padding=1 + i, dilation=1 + i, groups=groups,
349
+ bias=False)
350
+ init.kaiming_normal_(conv1.weight)
351
+ self.conv.append(nn.Sequential(conv1,
352
+ nn.BatchNorm2d(out_channels),
353
+ nn.ReLU(inplace=True)))
354
+ self.global_pool = nn.AdaptiveAvgPool2d(1)
355
+ conv_fc = nn.Conv2d(out_channels, d, 1, bias=False)
356
+ init.normal_(conv_fc.weight, std=0.01)
357
+ self.fc1 = nn.Sequential(conv_fc,
358
+ nn.BatchNorm2d(d),
359
+ nn.ReLU(inplace=True))
360
+ self.fc2 = nn.Conv2d(d, out_channels * M, 1, 1, bias=False)
361
+ init.normal_(self.fc2.weight, std=0.01)
362
+ self.softmax = nn.Softmax(dim=1)
363
+
364
+ def forward(self, input):
365
+ batch_size = input.size(0)
366
+ output = []
367
+ for i, conv in enumerate(self.conv):
368
+ output.append(conv(input))
369
+ U = reduce(lambda x, y: x + y, output)
370
+ s = self.global_pool(U)
371
+ z = self.fc1(s)
372
+ a_b = self.fc2(z)
373
+ a_b = a_b.reshape(batch_size, self.M, self.out_channels, -1)
374
+ a_b = self.softmax(a_b)
375
+ a_b = list(a_b.chunk(self.M, dim=1))
376
+ a_b = list(map(lambda x: x.reshape(batch_size, self.out_channels, 1, 1), a_b))
377
+ V = list(map(lambda x, y: x * y, output, a_b))
378
+ V = reduce(lambda x, y: x + y, V)
379
+ return V
380
+
381
+
382
+ class GRCL(nn.Module):
383
+ def __init__(self, inplanes, planes, downsample=True, iter=3, SKconv=True, expansion=2):
384
+ super(GRCL, self).__init__()
385
+
386
+ self.iter = iter
387
+ self.expansion = expansion
388
+ # feed-forward part
389
+ self.add_module('bn_f', nn.BatchNorm2d(inplanes))
390
+ self.add_module('relu_f', nn.ReLU(inplace=True))
391
+ conv_f = nn.Conv2d(inplanes, int(planes * self.expansion), kernel_size=3, stride=1, padding=1, bias=False,
392
+ groups=32)
393
+ init.kaiming_normal_(conv_f.weight)
394
+ self.add_module('conv_f', conv_f)
395
+
396
+ self.add_module('bn_g_f', nn.BatchNorm2d(inplanes))
397
+ self.add_module('relu_g_f', nn.ReLU(inplace=True))
398
+ conv_g_f = nn.Conv2d(inplanes, int(planes * self.expansion), kernel_size=1, stride=1, padding=0, bias=True,
399
+ groups=32)
400
+ init.normal_(conv_g_f.weight, std=0.01)
401
+ self.add_module('conv_g_f', conv_g_f)
402
+ self.conv_g_r = nn.Conv2d(int(planes * self.expansion), int(planes * self.expansion), kernel_size=1, stride=1,
403
+ padding=0, bias=False, groups=32)
404
+ self.add_module('sig', nn.Sigmoid())
405
+
406
+ # recurrent part
407
+ for i in range(0, self.iter):
408
+ layers = []
409
+ layers_g_bn = []
410
+
411
+ layers.append(nn.BatchNorm2d(planes * self.expansion))
412
+ layers.append(nn.ReLU(inplace=True))
413
+ conv_1 = nn.Conv2d(int(planes * self.expansion), planes, kernel_size=1, stride=1, padding=0, bias=False)
414
+ init.kaiming_normal_(conv_1.weight)
415
+ layers.append(conv_1)
416
+
417
+ layers.append(nn.BatchNorm2d(planes))
418
+ layers.append(nn.ReLU(inplace=True))
419
+
420
+ if SKconv:
421
+ layers.append(SKConv(planes, planes))
422
+ else:
423
+ layers.append(nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False))
424
+ layers.append(nn.BatchNorm2d(planes))
425
+ layers.append(nn.ReLU(inplace=True))
426
+
427
+ conv_2 = nn.Conv2d(planes, int(planes * self.expansion), kernel_size=1, stride=1, padding=0, bias=False)
428
+ init.kaiming_normal_(conv_2.weight)
429
+ layers.append(conv_2)
430
+ layers_g_bn.append(nn.BatchNorm2d(int(planes * self.expansion)))
431
+
432
+ layers_g_bn.append(nn.ReLU(inplace=True))
433
+
434
+ self.add_module('iter_' + str(i + 1), nn.Sequential(*layers))
435
+ self.add_module('iter_g_' + str(i + 1), nn.Sequential(*layers_g_bn))
436
+
437
+ self.downsample = downsample
438
+ if self.downsample:
439
+ self.add_module('d_bn', nn.BatchNorm2d(planes * self.expansion))
440
+ self.add_module('d_relu', nn.ReLU(inplace=True))
441
+ d_conv = nn.Conv2d(int(planes * self.expansion), int(planes * self.expansion), kernel_size=1, stride=1,
442
+ padding=0, bias=False)
443
+ init.kaiming_normal_(d_conv.weight)
444
+ self.add_module('d_conv', d_conv)
445
+ self.add_module('d_ave', nn.AvgPool2d((2, 2), stride=2))
446
+
447
+ self.add_module('d_bn_1', nn.BatchNorm2d(planes * self.expansion))
448
+ self.add_module('d_relu_1', nn.ReLU(inplace=True))
449
+ d_conv_1 = nn.Conv2d(int(planes * self.expansion), planes, kernel_size=1, stride=1, padding=0,
450
+ bias=False)
451
+ init.kaiming_normal_(d_conv_1.weight)
452
+ self.add_module('d_conv_1', d_conv_1)
453
+
454
+ self.add_module('d_bn_3', nn.BatchNorm2d(planes))
455
+ self.add_module('d_relu_3', nn.ReLU(inplace=True))
456
+
457
+ if SKconv:
458
+ d_conv_3 = SKConv(planes, planes, stride=2)
459
+ self.add_module('d_conv_3', d_conv_3)
460
+ else:
461
+ d_conv_3 = nn.Conv2d(planes, planes, kernel_size=3, stride=2, padding=1, bias=False)
462
+ init.kaiming_normal_(d_conv_3.weight)
463
+ self.add_module('d_conv_3', d_conv_3)
464
+
465
+ d_conv_1e = nn.Conv2d(planes, int(planes * self.expansion), kernel_size=1, stride=1, padding=0, bias=False)
466
+ init.kaiming_normal_(d_conv_1e.weight)
467
+ self.add_module('d_conv_1e', d_conv_1e)
468
+
469
+ def forward(self, x):
470
+ # feed-forward
471
+ x_bn = self.bn_f(x)
472
+ x_act = self.relu_f(x_bn)
473
+ x_s = self.conv_f(x_act)
474
+
475
+ x_g_bn = self.bn_g_f(x)
476
+ x_g_act = self.relu_g_f(x_g_bn)
477
+ x_g_s = self.conv_g_f(x_g_act)
478
+
479
+ # recurrent
480
+ for i in range(0, self.iter):
481
+ x_g_r = self.conv_g_r(self.__dict__['_modules']["iter_g_%s" % str(i + 1)](x_s))
482
+ x_s = self.__dict__['_modules']["iter_%s" % str(i + 1)](x_s) * torch.sigmoid(x_g_r + x_g_s) + x_s
483
+
484
+ if self.downsample:
485
+ x_s_1 = self.d_conv(self.d_ave(self.d_relu(self.d_bn(x_s))))
486
+ x_s_2 = self.d_conv_1e(
487
+ self.d_conv_3(self.d_relu_3(self.d_bn_3(self.d_conv_1(self.d_relu_1(self.d_bn_1(x_s)))))))
488
+ x_s = x_s_1 + x_s_2
489
+
490
+ return x_s
491
+
492
+
493
+ class GRCNNBackEnd(nn.Module):
494
+ def __init__(self, iters, maps, SKconv, expansion, num_classes):
495
+ """ Args:
496
+ iters:iterations.
497
+ num_classes: number of classes
498
+ """
499
+ super(GRCNNBackEnd, self).__init__()
500
+ self.iters = iters
501
+ self.maps = maps
502
+ self.num_classes = num_classes
503
+ self.expansion = expansion
504
+
505
+ self.layer1 = GRCL(64, self.maps[0], True, self.iters[0], SKconv, self.expansion)
506
+ self.layer2 = GRCL(self.maps[0] * self.expansion, self.maps[1], True, self.iters[1], SKconv, self.expansion)
507
+ self.layer3 = GRCL(self.maps[1] * self.expansion, self.maps[2], True, self.iters[2], SKconv, self.expansion)
508
+ self.layer4 = GRCL(self.maps[2] * self.expansion, self.maps[3], False, self.iters[3], SKconv, self.expansion)
509
+
510
+ self.lastact = nn.Sequential(nn.BatchNorm2d(self.maps[3] * self.expansion), nn.ReLU(inplace=True))
511
+ self.avgpool = nn.AvgPool2d(7)
512
+ self.classifier = nn.Linear(self.maps[3] * self.expansion, num_classes)
513
+
514
+ for m in self.modules():
515
+ if isinstance(m, nn.Conv2d):
516
+ if m.bias is not None:
517
+ init.zeros_(m.bias)
518
+ elif isinstance(m, nn.BatchNorm2d):
519
+ init.ones_(m.weight)
520
+ init.zeros_(m.bias)
521
+ elif isinstance(m, nn.Linear):
522
+ init.kaiming_normal_(m.weight)
523
+ init.zeros_(m.bias)
524
+
525
+ def forward(self, x):
526
+
527
+ x = self.layer1(x)
528
+ x = self.layer2(x)
529
+ x = self.layer3(x)
530
+ x = self.layer4(x)
531
+
532
+ x = self.lastact(x)
533
+ x = self.avgpool(x)
534
+ x = x.view(x.size(0), -1)
535
+ return self.classifier(x)
536
+
537
+
538
+ def grcnn55BackEnd(num_classes=1000):
539
+ """
540
+ Args:
541
+ num_classes (uint): number of classes
542
+ """
543
+ model = GRCNNBackEnd([3, 3, 4, 3], [64, 128, 256, 512], SKconv=False, expansion=4, num_classes=num_classes)
544
+ return model
@@ -0,0 +1,122 @@
1
+ from brainscore_vision.model_helpers.check_submission import check_models
2
+ import torch
3
+ import functools
4
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
5
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
6
+ from brainscore_vision.model_helpers.s3 import load_weight_file
7
+ from .helpers.vongrcnn_helpers import VOneNet, grcnn55BackEnd
8
+ import torch.nn as nn
9
+ from collections import OrderedDict
10
+
11
+ device = "cpu"
12
+ model_identifier = 'vonegrcnn_62e_nobn'
13
+
14
+
15
+ ###DEFINE YOUR CUSTOM MODEL HERE
16
+
17
+ # get_model method actually gets the model. For a custom model, this is just linked to the
18
+ # model we defined above.
19
+ def get_model(name):
20
+ """
21
+ This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
22
+ containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
23
+ keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
24
+ wrappers.
25
+ :param name: the name of the model to fetch
26
+ :return: the model instance
27
+ """
28
+ assert name == 'vonegrcnn_62e_nobn'
29
+ # link the custom model to the wrapper object(activations_model above):
30
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
31
+ vone_block, bottleneck = VOneNet()
32
+ model_back_end = grcnn55BackEnd()
33
+
34
+ model = nn.Sequential(OrderedDict([
35
+ ('vone_block', vone_block),
36
+ ('bottleneck', bottleneck),
37
+ ('model', model_back_end),
38
+ ]))
39
+
40
+ model = nn.Sequential(OrderedDict([('module',model)]))
41
+ weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
42
+ relative_path="vonegrcnn_62e/model_best.pth",
43
+ version_id="null",
44
+ sha1="66f5319888ebd146565fb45144afa92d8a2bef3b")
45
+ checkpoint = torch.load(weights_path, map_location=torch.device('cpu'))
46
+ model.load_state_dict(checkpoint['state_dict'], strict=True)
47
+ model = model.to(device)
48
+
49
+ # get an activations model from the Pytorch Wrapper
50
+ activations_model = PytorchWrapper(identifier=model_identifier, model= model,
51
+ preprocessing=preprocessing)
52
+ wrapper = activations_model
53
+ wrapper.image_size = 224
54
+ return wrapper
55
+
56
+
57
+ # get_layers method to tell the code what layers to consider. If you are submitting a custom
58
+ # model, then you will most likely need to change this method's return values.
59
+ def get_layers(name):
60
+ """
61
+ This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to
62
+ layers and uses this list as a set of possible layers. The lists doesn't have to contain all layers, the less the
63
+ faster the benchmark process works. Additionally the given layers have to produce an activations vector of at least
64
+ size 25! The layer names are delivered back to the model instance and have to be resolved in there. For a pytorch
65
+ model, the layer name are for instance dot concatenated per module, e.g. "features.2".
66
+ :param name: the name of the model, to return the layers for
67
+ :return: a list of strings containing all layers, that should be considered as brain area.
68
+ """
69
+
70
+ # quick check to make sure the model is the correct one:
71
+ assert name == 'vonegrcnn_62e_nobn'
72
+ all_layers = ['module',
73
+ 'module.vone_block',
74
+ 'module.vone_block.simple_conv_q0',
75
+ 'module.vone_block.simple_conv_q1',
76
+ 'module.vone_block.simple',
77
+ 'module.vone_block.complex',
78
+ 'module.vone_block.gabors',
79
+ 'module.vone_block.noise',
80
+ 'module.vone_block.output',
81
+ 'module.bottleneck',
82
+ 'module.model',
83
+ 'module.model.layer1',
84
+ 'module.model.layer2',
85
+ 'module.model.layer3',
86
+ 'module.model.layer4',
87
+ 'module.model.layer1.conv_f',
88
+ 'module.model.layer2.conv_f',
89
+ 'module.model.layer3.conv_f',
90
+ 'module.model.layer4.conv_f',
91
+ 'module.model.layer1.d_conv_1e',
92
+ 'module.model.layer2.d_conv_1e',
93
+ 'module.model.layer3.d_conv_1e',
94
+ 'module.model.layer1.iter_g_3.1',
95
+ 'module.model.layer2.iter_g_3.1',
96
+ 'module.model.layer3.iter_g_4.1',
97
+ 'module.model.layer4.iter_g_3.1',
98
+ 'module.model.lastact',
99
+ 'module.model.lastact.0',
100
+ 'module.model.lastact.1',
101
+ 'module.model.avgpool',
102
+ 'module.model.classifier']
103
+ # returns the layers you want to consider
104
+ return all_layers
105
+
106
+
107
+ # Bibtex Method. For submitting a custom model, you can either put your own Bibtex if your
108
+ # model has been published, or leave the empty return value if there is no publication to refer to.
109
+ def get_bibtex(model_identifier):
110
+ """
111
+ A method returning the bibtex reference of the requested model as a string.
112
+ """
113
+
114
+ # from pytorch.py:
115
+ return ''
116
+
117
+
118
+ # Main Method: In submitting a custom model, you should not have to mess with this.
119
+ if __name__ == '__main__':
120
+ # Use this method to ensure the correctness of the BaseModel implementations.
121
+ # It executes a mock run of brain-score benchmarks.
122
+ check_models.check_base_models(__name__)
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "module.model.layer1.conv_f",
3
+ "V2": "module.model.layer3.conv_f",
4
+ "V4": "module.model.layer2",
5
+ "IT": "module.model.layer4.conv_f"
6
+ }
@@ -0,0 +1,3 @@
1
+ torch
2
+ numpy
3
+ scipy
@@ -0,0 +1,8 @@
1
+ import pytest
2
+ import brainscore_vision
3
+
4
+
5
+ @pytest.mark.travis_slow
6
+ def test_has_identifier():
7
+ model = brainscore_vision.load_model('vonegrcnn_62e_nobn')
8
+ assert model.identifier == 'vonegrcnn_62e_nobn'
@@ -0,0 +1,7 @@
1
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
2
+ from brainscore_vision import model_registry
3
+ from .model import get_layers,get_model
4
+
5
+
6
+ model_registry['voneresnet-50'] = \
7
+ lambda: ModelCommitment(identifier='voneresnet-50', activations_model=get_model('voneresnet-50'), layers=get_layers('voneresnet-50'))