brainscore-vision 2.2.4__py3-none-any.whl → 2.2.5__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (722) hide show
  1. brainscore_vision/data/baker2022/__init__.py +10 -10
  2. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
  3. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
  4. brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
  5. brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
  6. brainscore_vision/data/barbumayo2019/__init__.py +3 -3
  7. brainscore_vision/data/bashivankar2019/__init__.py +10 -10
  8. brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
  9. brainscore_vision/data/bmd2024/__init__.py +20 -20
  10. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
  11. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
  12. brainscore_vision/data/bracci2019/__init__.py +5 -5
  13. brainscore_vision/data/bracci2019/data_packaging.py +1 -1
  14. brainscore_vision/data/cadena2017/__init__.py +5 -5
  15. brainscore_vision/data/cichy2019/__init__.py +5 -5
  16. brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
  17. brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
  18. brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
  19. brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
  20. brainscore_vision/data/david2004/__init__.py +5 -5
  21. brainscore_vision/data/deng2009/__init__.py +3 -3
  22. brainscore_vision/data/ferguson2024/__init__.py +112 -112
  23. brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
  24. brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
  25. brainscore_vision/data/geirhos2021/__init__.py +85 -85
  26. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
  27. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
  28. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
  29. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
  30. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
  31. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
  32. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
  33. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
  34. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
  35. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
  36. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
  37. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
  38. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
  39. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
  40. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
  41. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
  42. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
  43. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
  44. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
  45. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
  46. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
  47. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
  48. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
  49. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
  50. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
  51. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
  52. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
  53. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
  54. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
  55. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
  56. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
  57. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
  58. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
  59. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
  60. brainscore_vision/data/hebart2023/__init__.py +5 -5
  61. brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
  62. brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
  63. brainscore_vision/data/hendrycks2019/__init__.py +12 -12
  64. brainscore_vision/data/igustibagus2024/__init__.py +5 -5
  65. brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
  66. brainscore_vision/data/islam2021/__init__.py +3 -3
  67. brainscore_vision/data/kar2018/__init__.py +7 -7
  68. brainscore_vision/data/kar2019/__init__.py +5 -5
  69. brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
  70. brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
  71. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
  72. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
  73. brainscore_vision/data/majajhong2015/__init__.py +23 -23
  74. brainscore_vision/data/malania2007/__init__.py +77 -77
  75. brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
  76. brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
  77. brainscore_vision/data/maniquet2024/__init__.py +11 -11
  78. brainscore_vision/data/marques2020/__init__.py +30 -30
  79. brainscore_vision/data/rajalingham2018/__init__.py +10 -10
  80. brainscore_vision/data/rajalingham2020/__init__.py +5 -5
  81. brainscore_vision/data/rust2012/__init__.py +7 -7
  82. brainscore_vision/data/sanghavi2020/__init__.py +19 -19
  83. brainscore_vision/data/scialom2024/__init__.py +110 -110
  84. brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
  85. brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
  86. brainscore_vision/data/seibert2019/__init__.py +2 -2
  87. brainscore_vision/data/zhang2018/__init__.py +5 -5
  88. brainscore_vision/data_helpers/s3.py +25 -6
  89. brainscore_vision/model_helpers/activations/pytorch.py +34 -12
  90. brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
  91. brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
  92. brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
  93. brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
  94. brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
  95. brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
  96. brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
  97. brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
  98. brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
  99. brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
  100. brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
  101. brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
  102. brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
  103. brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
  104. brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
  105. brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
  106. brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
  107. brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
  108. brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
  109. brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
  110. brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
  111. brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
  112. brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
  113. brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
  114. brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
  115. brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
  116. brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
  117. brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
  118. brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
  119. brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
  120. brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
  121. brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
  122. brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
  123. brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
  124. brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
  125. brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
  126. brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
  127. brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
  128. brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
  129. brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
  130. brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
  131. brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
  132. brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
  133. brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
  134. brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
  135. brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
  136. brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
  137. brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
  138. brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
  139. brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
  140. brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
  141. brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
  142. brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
  143. brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
  144. brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
  145. brainscore_vision/models/ReAlnet/__init__.py +64 -0
  146. brainscore_vision/models/ReAlnet/model.py +237 -0
  147. brainscore_vision/models/ReAlnet/requirements.txt +7 -0
  148. brainscore_vision/models/ReAlnet/test.py +0 -0
  149. brainscore_vision/models/ReAlnet/weights.json +26 -0
  150. brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
  151. brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
  152. brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
  153. brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
  154. brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
  155. brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
  156. brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
  157. brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
  158. brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
  159. brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
  160. brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
  161. brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
  162. brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
  163. brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
  164. brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
  165. brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
  166. brainscore_vision/models/VOneCORnet_S/model.py +25 -0
  167. brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
  168. brainscore_vision/models/VOneCORnet_S/test.py +8 -0
  169. brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
  170. brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
  171. brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
  172. brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
  173. brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
  174. brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
  175. brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
  176. brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
  177. brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
  178. brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
  179. brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
  180. brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
  181. brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
  182. brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
  183. brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
  184. brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
  185. brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
  186. brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
  187. brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
  188. brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
  189. brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
  190. brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
  191. brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
  192. brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
  193. brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
  194. brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
  195. brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
  196. brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
  197. brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
  198. brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
  199. brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
  200. brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
  201. brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
  202. brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
  203. brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
  204. brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
  205. brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
  206. brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
  207. brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
  208. brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
  209. brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
  210. brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
  211. brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
  212. brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
  213. brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
  214. brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
  215. brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
  216. brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
  217. brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
  218. brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
  219. brainscore_vision/models/antialiased-r50/__init__.py +7 -0
  220. brainscore_vision/models/antialiased-r50/model.py +62 -0
  221. brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
  222. brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
  223. brainscore_vision/models/antialiased-r50/test.py +8 -0
  224. brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
  225. brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
  226. brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
  227. brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
  228. brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
  229. brainscore_vision/models/cornet_s/model.py +2 -2
  230. brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
  231. brainscore_vision/models/densenet_121/__init__.py +7 -0
  232. brainscore_vision/models/densenet_121/model.py +63 -0
  233. brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
  234. brainscore_vision/models/densenet_121/requirements.txt +1 -0
  235. brainscore_vision/models/densenet_121/test.py +8 -0
  236. brainscore_vision/models/densenet_169/__init__.py +7 -0
  237. brainscore_vision/models/densenet_169/model.py +63 -0
  238. brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
  239. brainscore_vision/models/densenet_169/requirements.txt +1 -0
  240. brainscore_vision/models/densenet_169/test.py +9 -0
  241. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
  242. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
  243. brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
  244. brainscore_vision/models/densenet_201/test.py +8 -0
  245. brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
  246. brainscore_vision/models/efficientnet_b0/model.py +45 -0
  247. brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
  248. brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
  249. brainscore_vision/models/efficientnet_b0/test.py +8 -0
  250. brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
  251. brainscore_vision/models/efficientnet_b7/model.py +61 -0
  252. brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
  253. brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
  254. brainscore_vision/models/efficientnet_b7/test.py +9 -0
  255. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
  256. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
  257. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
  258. brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
  259. brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
  260. brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
  261. brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
  262. brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
  263. brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
  264. brainscore_vision/models/evresnet_50_1/model.py +62 -0
  265. brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
  266. brainscore_vision/models/evresnet_50_1/test.py +8 -0
  267. brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
  268. brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
  269. brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
  270. brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
  271. brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
  272. brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
  273. brainscore_vision/models/evresnet_50_4/model.py +67 -0
  274. brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
  275. brainscore_vision/models/evresnet_50_4/test.py +8 -0
  276. brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
  277. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
  278. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
  279. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
  280. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
  281. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
  282. brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
  283. brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
  284. brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
  285. brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
  286. brainscore_vision/models/grcnn/__init__.py +7 -0
  287. brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
  288. brainscore_vision/models/grcnn/model.py +54 -0
  289. brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
  290. brainscore_vision/models/grcnn/requirements.txt +2 -0
  291. brainscore_vision/models/grcnn/test.py +9 -0
  292. brainscore_vision/models/grcnn_109/__init__.py +5 -0
  293. brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
  294. brainscore_vision/models/grcnn_109/model.py +53 -0
  295. brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
  296. brainscore_vision/models/grcnn_109/requirements.txt +2 -0
  297. brainscore_vision/models/grcnn_109/test.py +9 -0
  298. brainscore_vision/models/hmax/model.py +2 -2
  299. brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
  300. brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
  301. brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
  302. brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
  303. brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
  304. brainscore_vision/models/inception_v1/__init__.py +7 -0
  305. brainscore_vision/models/inception_v1/model.py +67 -0
  306. brainscore_vision/models/inception_v1/requirements.txt +1 -0
  307. brainscore_vision/models/inception_v1/test.py +8 -0
  308. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
  309. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
  310. brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
  311. brainscore_vision/models/inception_v3/test.py +8 -0
  312. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
  313. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
  314. brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
  315. brainscore_vision/models/inception_v4/test.py +8 -0
  316. brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
  317. brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
  318. brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
  319. brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
  320. brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
  321. brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
  322. brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
  323. brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
  324. brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
  325. brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
  326. brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
  327. brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
  328. brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
  329. brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
  330. brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
  331. brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
  332. brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
  333. brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
  334. brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
  335. brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
  336. brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
  337. brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
  338. brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
  339. brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
  340. brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
  341. brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
  342. brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
  343. brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
  344. brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
  345. brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
  346. brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
  347. brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
  348. brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
  349. brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
  350. brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
  351. brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
  352. brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
  353. brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
  354. brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
  355. brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
  356. brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
  357. brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
  358. brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
  359. brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
  360. brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
  361. brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
  362. brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
  363. brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
  364. brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
  365. brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
  366. brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
  367. brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
  368. brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
  369. brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
  370. brainscore_vision/models/nasnet_large/__init__.py +7 -0
  371. brainscore_vision/models/nasnet_large/model.py +60 -0
  372. brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
  373. brainscore_vision/models/nasnet_large/test.py +8 -0
  374. brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
  375. brainscore_vision/models/nasnet_mobile/model.py +685 -0
  376. brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
  377. brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
  378. brainscore_vision/models/nasnet_mobile/test.py +8 -0
  379. brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
  380. brainscore_vision/models/omnivore_swinB/model.py +79 -0
  381. brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
  382. brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
  383. brainscore_vision/models/omnivore_swinB/test.py +9 -0
  384. brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
  385. brainscore_vision/models/omnivore_swinS/model.py +79 -0
  386. brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
  387. brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
  388. brainscore_vision/models/omnivore_swinS/test.py +9 -0
  389. brainscore_vision/models/pnasnet_large/__init__.py +7 -0
  390. brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
  391. brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
  392. brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
  393. brainscore_vision/models/pnasnet_large/test.py +8 -0
  394. brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
  395. brainscore_vision/models/resnet50_SIN/model.py +63 -0
  396. brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
  397. brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
  398. brainscore_vision/models/resnet50_SIN/test.py +9 -0
  399. brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
  400. brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
  401. brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
  402. brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
  403. brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
  404. brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
  405. brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
  406. brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
  407. brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
  408. brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
  409. brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
  410. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
  411. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
  412. brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
  413. brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
  414. brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
  415. brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
  416. brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
  417. brainscore_vision/models/resnet50_barlow/model.py +53 -0
  418. brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
  419. brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
  420. brainscore_vision/models/resnet50_barlow/test.py +9 -0
  421. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
  422. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
  423. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
  424. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
  425. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
  426. brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
  427. brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
  428. brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
  429. brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
  430. brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
  431. brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
  432. brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
  433. brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
  434. brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
  435. brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
  436. brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
  437. brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
  438. brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
  439. brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
  440. brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
  441. brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
  442. brainscore_vision/models/resnet50_sup/__init__.py +5 -0
  443. brainscore_vision/models/resnet50_sup/model.py +55 -0
  444. brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
  445. brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
  446. brainscore_vision/models/resnet50_sup/test.py +8 -0
  447. brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
  448. brainscore_vision/models/resnet50_vicreg/model.py +62 -0
  449. brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
  450. brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
  451. brainscore_vision/models/resnet50_vicreg/test.py +9 -0
  452. brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
  453. brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
  454. brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
  455. brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
  456. brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
  457. brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
  458. brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
  459. brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
  460. brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
  461. brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
  462. brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
  463. brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
  464. brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
  465. brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
  466. brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
  467. brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
  468. brainscore_vision/models/resnet_101_v1/model.py +42 -0
  469. brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
  470. brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
  471. brainscore_vision/models/resnet_101_v1/test.py +8 -0
  472. brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
  473. brainscore_vision/models/resnet_101_v2/model.py +33 -0
  474. brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
  475. brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
  476. brainscore_vision/models/resnet_101_v2/test.py +8 -0
  477. brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
  478. brainscore_vision/models/resnet_152_v1/model.py +42 -0
  479. brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
  480. brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
  481. brainscore_vision/models/resnet_152_v1/test.py +8 -0
  482. brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
  483. brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
  484. brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
  485. brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
  486. brainscore_vision/models/resnet_152_v2/test.py +8 -0
  487. brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
  488. brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
  489. brainscore_vision/models/resnet_18_test_m/model.py +80 -0
  490. brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
  491. brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
  492. brainscore_vision/models/resnet_18_test_m/test.py +8 -0
  493. brainscore_vision/models/resnet_50_2/__init__.py +9 -0
  494. brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
  495. brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
  496. brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
  497. brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
  498. brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
  499. brainscore_vision/models/resnet_50_2/model.py +46 -0
  500. brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
  501. brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
  502. brainscore_vision/models/resnet_50_2/test.py +8 -0
  503. brainscore_vision/models/resnet_50_robust/model.py +2 -2
  504. brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
  505. brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
  506. brainscore_vision/models/resnet_50_v1/model.py +42 -0
  507. brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
  508. brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
  509. brainscore_vision/models/resnet_50_v1/test.py +8 -0
  510. brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
  511. brainscore_vision/models/resnet_50_v2/model.py +33 -0
  512. brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
  513. brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
  514. brainscore_vision/models/resnet_50_v2/test.py +8 -0
  515. brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
  516. brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
  517. brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
  518. brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
  519. brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
  520. brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
  521. brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
  522. brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
  523. brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
  524. brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
  525. brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
  526. brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
  527. brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
  528. brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
  529. brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
  530. brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
  531. brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
  532. brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
  533. brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
  534. brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
  535. brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
  536. brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
  537. brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
  538. brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
  539. brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
  540. brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
  541. brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
  542. brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
  543. brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
  544. brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
  545. brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
  546. brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
  547. brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
  548. brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
  549. brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
  550. brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
  551. brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
  552. brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
  553. brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
  554. brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
  555. brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
  556. brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
  557. brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
  558. brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
  559. brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
  560. brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
  561. brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
  562. brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
  563. brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
  564. brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
  565. brainscore_vision/models/timm_models/__init__.py +193 -0
  566. brainscore_vision/models/timm_models/model.py +90 -0
  567. brainscore_vision/models/timm_models/model_configs.json +464 -0
  568. brainscore_vision/models/timm_models/requirements.txt +3 -0
  569. brainscore_vision/models/timm_models/test.py +0 -0
  570. brainscore_vision/models/vgg_16/__init__.py +7 -0
  571. brainscore_vision/models/vgg_16/model.py +52 -0
  572. brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
  573. brainscore_vision/models/vgg_16/requirements.txt +1 -0
  574. brainscore_vision/models/vgg_16/test.py +8 -0
  575. brainscore_vision/models/vgg_19/__init__.py +7 -0
  576. brainscore_vision/models/vgg_19/model.py +52 -0
  577. brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
  578. brainscore_vision/models/vgg_19/requirements.txt +1 -0
  579. brainscore_vision/models/vgg_19/test.py +8 -0
  580. brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
  581. brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
  582. brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
  583. brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
  584. brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
  585. brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
  586. brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
  587. brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
  588. brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
  589. brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
  590. brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
  591. brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
  592. brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
  593. brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
  594. brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
  595. brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
  596. brainscore_vision/models/voneresnet_50/__init__.py +7 -0
  597. brainscore_vision/models/voneresnet_50/model.py +37 -0
  598. brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
  599. brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
  600. brainscore_vision/models/voneresnet_50/test.py +8 -0
  601. brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
  602. brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
  603. brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
  604. brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
  605. brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
  606. brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
  607. brainscore_vision/models/voneresnet_50_1/model.py +68 -0
  608. brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
  609. brainscore_vision/models/voneresnet_50_1/test.py +7 -0
  610. brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
  611. brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
  612. brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
  613. brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
  614. brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
  615. brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
  616. brainscore_vision/models/voneresnet_50_3/model.py +66 -0
  617. brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
  618. brainscore_vision/models/voneresnet_50_3/test.py +7 -0
  619. brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
  620. brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
  621. brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
  622. brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
  623. brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
  624. brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
  625. brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
  626. brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
  627. brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
  628. brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
  629. brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
  630. brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
  631. brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
  632. brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
  633. brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
  634. brainscore_vision/models/xception/__init__.py +7 -0
  635. brainscore_vision/models/xception/model.py +64 -0
  636. brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
  637. brainscore_vision/models/xception/requirements.txt +2 -0
  638. brainscore_vision/models/xception/test.py +8 -0
  639. brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
  640. brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
  641. brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
  642. brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
  643. brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
  644. brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
  645. brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
  646. brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
  647. brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
  648. brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
  649. brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
  650. brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
  651. brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
  652. brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
  653. brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
  654. brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
  655. brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
  656. brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
  657. brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
  658. brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
  659. brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
  660. brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
  661. brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
  662. brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
  663. brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
  664. brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
  665. brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
  666. brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
  667. brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
  668. brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
  669. brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
  670. brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
  671. brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
  672. brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
  673. brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
  674. brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
  675. brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
  676. brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
  677. brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
  678. brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
  679. brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
  680. brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
  681. brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
  682. brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
  683. brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
  684. brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
  685. brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
  686. brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
  687. brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
  688. brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
  689. brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
  690. brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
  691. brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
  692. brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
  693. brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
  694. brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
  695. brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
  696. brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
  697. brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
  698. brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
  699. brainscore_vision/submission/actions_helpers.py +2 -3
  700. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/METADATA +6 -6
  701. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/RECORD +714 -130
  702. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/WHEEL +1 -1
  703. docs/source/index.rst +1 -0
  704. docs/source/modules/submission.rst +1 -1
  705. docs/source/modules/version_bumping.rst +43 -0
  706. tests/test_submission/test_actions_helpers.py +2 -6
  707. brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
  708. brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
  709. brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
  710. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
  711. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
  712. brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
  713. brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
  714. brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
  715. /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
  716. /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
  717. /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
  718. /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
  719. /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
  720. /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
  721. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/LICENSE +0 -0
  722. {brainscore_vision-2.2.4.dist-info → brainscore_vision-2.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,237 @@
1
+ import math
2
+ from collections import OrderedDict
3
+ import torch
4
+ from torch import nn
5
+ from torchvision import transforms
6
+ import torch.utils.model_zoo
7
+ import os
8
+ from torch.utils.data import Dataset, DataLoader
9
+ import pandas as pd
10
+ import numpy as np
11
+ import torch.nn.functional as F
12
+ import h5py
13
+ import random
14
+ import functools
15
+ import torchvision.models
16
+ from brainscore_vision.model_helpers.s3 import load_weight_file
17
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
18
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
19
+ import json
20
+
21
+ LAYERS = ['V1', 'V2', 'V4', 'IT', 'decoder.avgpool']
22
+
23
+ class Flatten(nn.Module):
24
+ """
25
+ Helper module for flattening input tensor to 1-D for the use in Linear modules
26
+ """
27
+ def forward(self, x):
28
+ return x.view(x.size(0), -1)
29
+
30
+ class Identity(nn.Module):
31
+ """
32
+ Helper module that stores the current tensor. Useful for accessing by name
33
+ """
34
+ def forward(self, x):
35
+ return x
36
+
37
+ class CORblock_S(nn.Module):
38
+ scale = 4 # scale of the bottleneck convolution channels
39
+
40
+ def __init__(self, in_channels, out_channels, times=1):
41
+ super().__init__()
42
+ self.times = times
43
+
44
+ self.conv_input = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
45
+ self.skip = nn.Conv2d(out_channels, out_channels,
46
+ kernel_size=1, stride=2, bias=False)
47
+ self.norm_skip = nn.BatchNorm2d(out_channels)
48
+
49
+ self.conv1 = nn.Conv2d(out_channels, out_channels * self.scale,
50
+ kernel_size=1, bias=False)
51
+ self.nonlin1 = nn.ReLU(inplace=True)
52
+
53
+ self.conv2 = nn.Conv2d(out_channels * self.scale, out_channels * self.scale,
54
+ kernel_size=3, stride=2, padding=1, bias=False)
55
+ self.nonlin2 = nn.ReLU(inplace=True)
56
+
57
+ self.conv3 = nn.Conv2d(out_channels * self.scale, out_channels,
58
+ kernel_size=1, bias=False)
59
+ self.nonlin3 = nn.ReLU(inplace=True)
60
+
61
+ self.output = Identity() # for an easy access to this block's output
62
+
63
+ # need BatchNorm for each time step for training to work well
64
+ for t in range(self.times):
65
+ setattr(self, f'norm1_{t}', nn.BatchNorm2d(out_channels * self.scale))
66
+ setattr(self, f'norm2_{t}', nn.BatchNorm2d(out_channels * self.scale))
67
+ setattr(self, f'norm3_{t}', nn.BatchNorm2d(out_channels))
68
+
69
+ def forward(self, inp):
70
+ x = self.conv_input(inp)
71
+ for t in range(self.times):
72
+ if t == 0:
73
+ skip = self.norm_skip(self.skip(x))
74
+ self.conv2.stride = (2, 2)
75
+ else:
76
+ skip = x
77
+ self.conv2.stride = (1, 1)
78
+
79
+ x = self.conv1(x)
80
+ x = getattr(self, f'norm1_{t}')(x)
81
+ x = self.nonlin1(x)
82
+
83
+ x = self.conv2(x)
84
+ x = getattr(self, f'norm2_{t}')(x)
85
+ x = self.nonlin2(x)
86
+
87
+ x = self.conv3(x)
88
+ x = getattr(self, f'norm3_{t}')(x)
89
+
90
+ x += skip
91
+ x = self.nonlin3(x)
92
+ output = self.output(x)
93
+
94
+ return output
95
+
96
+ def CORnet_S():
97
+ model = nn.Sequential(OrderedDict([
98
+ ('V1', nn.Sequential(OrderedDict([
99
+ ('conv1', nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)),
100
+ ('norm1', nn.BatchNorm2d(64)),
101
+ ('nonlin1', nn.ReLU(inplace=True)),
102
+ ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
103
+ ('conv2', nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)),
104
+ ('norm2', nn.BatchNorm2d(64)),
105
+ ('nonlin2', nn.ReLU(inplace=True)),
106
+ ('output', Identity())
107
+ ]))),
108
+ ('V2', CORblock_S(64, 128, times=2)),
109
+ ('V4', CORblock_S(128, 256, times=4)),
110
+ ('IT', CORblock_S(256, 512, times=2)),
111
+ ('decoder', nn.Sequential(OrderedDict([
112
+ ('avgpool', nn.AdaptiveAvgPool2d(1)),
113
+ ('flatten', Flatten()),
114
+ ('linear', nn.Linear(512, 1000)),
115
+ ('output', Identity())
116
+ ])))
117
+ ]))
118
+
119
+ # weight initialization
120
+ for m in model.modules():
121
+ if isinstance(m, nn.Conv2d):
122
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
123
+ m.weight.data.normal_(0, math.sqrt(2. / n))
124
+ elif isinstance(m, nn.BatchNorm2d):
125
+ m.weight.data.fill_(1)
126
+ m.bias.data.zero_()
127
+
128
+ return model
129
+
130
+ class Encoder(nn.Module):
131
+ def __init__(self, realnet, n_output):
132
+ super(Encoder, self).__init__()
133
+
134
+ # CORnet
135
+ self.realnet = realnet
136
+
137
+ # fully connected layers
138
+ self.fc_v1 = nn.Linear(200704, 128)
139
+ self.fc_v2 = nn.Linear(100352, 128)
140
+ self.fc_v4 = nn.Linear(50176, 128)
141
+ self.fc_it = nn.Linear(25088, 128)
142
+ self.fc = nn.Linear(512, n_output)
143
+ self.activation = nn.ReLU()
144
+
145
+ def forward(self, imgs):
146
+ # forward pass through CORnet_S
147
+ outputs = self.realnet(imgs)
148
+
149
+ N = len(imgs)
150
+ v1_outputs = self.realnet.V1(imgs) # N * 64 * 56 * 56
151
+ v2_outputs = self.realnet.V2(v1_outputs) # N * 128 * 28 * 28
152
+ v4_outputs = self.realnet.V4(v2_outputs) # N * 256 * 14 * 14
153
+ it_outputs = self.realnet.IT(v4_outputs) # N * 512 * 7 * 7
154
+
155
+ # flatten and pass through fully connected layers
156
+ v1_features = self.fc_v1(v1_outputs.view(N, -1))
157
+ v1_features = self.activation(v1_features)
158
+
159
+ v2_features = self.fc_v2(v2_outputs.view(N, -1))
160
+ v2_features = self.activation(v2_features)
161
+
162
+ v4_features = self.fc_v4(v4_outputs.view(N, -1))
163
+ v4_features = self.activation(v4_features)
164
+
165
+ it_features = self.fc_it(it_outputs.view(N, -1))
166
+ it_features = self.activation(it_features)
167
+
168
+ features = torch.cat((v1_features, v2_features, v4_features, it_features), dim=1)
169
+ features = self.fc(features)
170
+
171
+ return outputs, features
172
+
173
+ # Change here: use 'cpu'
174
+ device = 'cpu'
175
+
176
+ transform = transforms.Compose([
177
+ transforms.Resize((224, 224)),
178
+ transforms.ToTensor(),
179
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
180
+ ])
181
+
182
+ # Construct CORnet_S
183
+ realnet = CORnet_S()
184
+ # (Optional) remove DataParallel if not needed for CPU
185
+ # realnet = torch.nn.DataParallel(realnet)
186
+
187
+ def load_config(json_file):
188
+ # Get the directory containing this script (model.py)
189
+ base_dir = os.path.dirname(__file__)
190
+
191
+ # Construct the path to the JSON file
192
+ json_path = os.path.join(base_dir, json_file)
193
+
194
+ # Read the JSON
195
+ with open(json_path, "r", encoding="utf-8") as f:
196
+ data = json.load(f)
197
+ return data
198
+
199
+
200
+ # Build encoder model
201
+ encoder = Encoder(realnet, 340)
202
+ def model_load_weights(identifier: str):
203
+ # Download weights (Brain-Score team modification)
204
+ # Read the version id and sha1 from json file called "weights.json"
205
+ weights_info = load_config("weights.json")
206
+
207
+ version_id = weights_info['version_ids'][identifier]
208
+ sha1 = weights_info['sha1s'][identifier]
209
+
210
+ weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
211
+ relative_path=f"ReAlnet/{identifier}_best_model_params.pt",
212
+ version_id=version_id,
213
+ sha1=sha1)
214
+
215
+ # Load weights onto CPU and remove "module." from keys
216
+ weights = torch.load(weights_path, map_location='cpu')
217
+ new_state_dict = {}
218
+ for key, val in weights.items():
219
+ # remove "module." (if it exists) from the key
220
+ new_key = key.replace("module.", "")
221
+ new_state_dict[new_key] = val
222
+
223
+ encoder.load_state_dict(new_state_dict)
224
+
225
+ # Retrieve the realnet portion from the encoder
226
+ realnet = encoder.realnet
227
+ realnet.eval()
228
+ return realnet
229
+
230
+ def get_model(identifier: str):
231
+ model = model_load_weights(identifier)
232
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
233
+ wrapper = PytorchWrapper(identifier=identifier, model=model, preprocessing=preprocessing)
234
+ wrapper.image_size = 224
235
+ return wrapper
236
+
237
+ # if __name__ == "__main__":
@@ -0,0 +1,7 @@
1
+
2
+ torch
3
+ torchvision
4
+ pandas
5
+ numpy
6
+ h5py
7
+ gdown
File without changes
@@ -0,0 +1,26 @@
1
+ {
2
+ "version_ids": {
3
+ "ReAlnet01": "75oY3CnI17U5S1f_yrZxl1XGhRfJEG9N",
4
+ "ReAlnet02": "TfGdm1CphJJ1vvkJGcm3n266PHvTuOaV",
5
+ "ReAlnet03": "dmohrH_AHZzgL_o8Xd2SDp6XCnjPOdAu",
6
+ "ReAlnet04": "45qJFXHihmIHdpHbjKWZco6STH1eh49p",
7
+ "ReAlnet05": "nqvoYgiBTyWSskjnpF9YOK4yYQfOnc_H",
8
+ "ReAlnet06": "6.cloFvnMihiicwQ0jkag8reEe4bVlxZ",
9
+ "ReAlnet07": "WKJaiN4b1ttpbGYNn8yVjng4LjCqWdk.",
10
+ "ReAlnet08": "vmouew6ePkPnKP.We8VnVxU7TifuhL.x",
11
+ "ReAlnet09": "53gqQ2tgS.5MEoncipy9mrBEqCc5izw5",
12
+ "ReAlnet10": "ZZFMhTm9KQYEXl8OGwKmnTr0S.pxkU0J"
13
+ },
14
+ "sha1s": {
15
+ "ReAlnet01": "05e4e401e8734b97e561aad306fc584b7e027225",
16
+ "ReAlnet02": "e85769fadb3c09ff88a7d73b01451b6bcccefd77",
17
+ "ReAlnet03": "f32d01d73380374ae501a1504e9c8cd219e9f0bf",
18
+ "ReAlnet04": "8062373fd6a74c52360420619235590d3688b4df",
19
+ "ReAlnet05": "88ca110f6b6d225b7b4e7dca02d2e7a906f5a8ed",
20
+ "ReAlnet06": "a1658c15a3c9d61262f87349c9fb7aa63854ac5b",
21
+ "ReAlnet07": "6a1c260839c75f6e6c018e06830562cdcda877e5",
22
+ "ReAlnet08": "1772211b27dd3a7d9255ac59d5f9b7e7cb6c3314",
23
+ "ReAlnet09": "159d96f0433a87c7063259dac4527325a3c7b79a",
24
+ "ReAlnet10": "dbdeaee9280267613ebce92dd5d515d89b544352"
25
+ }
26
+ }
@@ -0,0 +1,46 @@
1
+ from brainscore_vision import model_registry
2
+ from .helpers.helpers import CORnetCommitment, _build_time_mappings
3
+ from .model import get_model, TIME_MAPPINGS, get_layers
4
+
5
+
6
+ model_registry['ReAlnet01_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet01_cornet', activations_model=get_model('ReAlnet01_cornet'),
7
+ layers=get_layers('ReAlnet01_cornet'),
8
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
9
+
10
+
11
+ model_registry['ReAlnet02_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet02_cornet', activations_model=get_model('ReAlnet02_cornet'),
12
+ layers=get_layers('ReAlnet02_cornet'),
13
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
14
+
15
+ model_registry['ReAlnet03_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet03_cornet', activations_model=get_model('ReAlnet03_cornet'),
16
+ layers = get_layers('ReAlnet03_cornet'),
17
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
18
+
19
+ model_registry['ReAlnet04_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet04_cornet', activations_model=get_model('ReAlnet04_cornet'),
20
+ layers = get_layers('ReAlnet04_cornet'),
21
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
22
+
23
+ model_registry['ReAlnet05_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet05_cornet', activations_model=get_model('ReAlnet05_cornet'),
24
+ layers = get_layers('ReAlnet05_cornet'),
25
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
26
+
27
+ model_registry['ReAlnet06_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet06_cornet', activations_model=get_model('ReAlnet06_cornet'),
28
+ layers = get_layers('ReAlnet06_cornet'),
29
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
30
+
31
+ model_registry['ReAlnet07_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet07_cornet', activations_model=get_model('ReAlnet07_cornet'),
32
+ layers = get_layers('ReAlnet07_cornet'),
33
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
34
+
35
+ model_registry['ReAlnet08_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet08_cornet', activations_model=get_model('ReAlnet08_cornet'),
36
+ layers = get_layers('ReAlnet08_cornet'),
37
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
38
+
39
+ model_registry['ReAlnet09_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet09_cornet', activations_model=get_model('ReAlnet09_cornet'),
40
+ layers = get_layers('ReAlnet09_cornet'),
41
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
42
+
43
+ model_registry['ReAlnet10_cornet'] = lambda: CORnetCommitment(identifier='ReAlnet10_cornet', activations_model=get_model('ReAlnet10_cornet'),
44
+ layers = get_layers('ReAlnet10_cornet'),
45
+ time_mapping=_build_time_mappings(TIME_MAPPINGS))
46
+
@@ -0,0 +1,215 @@
1
+ import re
2
+ from collections import defaultdict
3
+ from typing import Dict, Tuple
4
+
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+
8
+ from brainio.assemblies import merge_data_arrays, NeuroidAssembly, walk_coords
9
+ from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper
10
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
11
+ from brainscore_vision.model_helpers.brain_transformation.behavior import BehaviorArbiter, LogitsBehavior, \
12
+ ProbabilitiesMapping, OddOneOut
13
+ from brainscore_vision.model_interface import BrainModel
14
+ from result_caching import store
15
+
16
+
17
+ class TemporalPytorchWrapper(PytorchWrapper):
18
+ def __init__(self, *args, separate_time=True, **kwargs):
19
+ self._separate_time = separate_time
20
+ super(TemporalPytorchWrapper, self).__init__(*args, **kwargs)
21
+
22
+ def _build_extractor(self, *args, **kwargs):
23
+ if self._separate_time:
24
+ return TemporalExtractor(*args, **kwargs)
25
+ else:
26
+ return super(TemporalPytorchWrapper, self)._build_extractor(*args, **kwargs)
27
+
28
+ def get_activations(self, images, layer_names):
29
+ # reset
30
+ self._layer_counter = defaultdict(lambda: 0)
31
+ self._layer_hooks = {}
32
+ return super(TemporalPytorchWrapper, self).get_activations(images=images, layer_names=layer_names)
33
+
34
+ def register_hook(self, layer, layer_name, target_dict):
35
+ layer_name = self._strip_layer_timestep(layer_name)
36
+ if layer_name in self._layer_hooks: # add hook only once for multiple timesteps
37
+ return self._layer_hooks[layer_name]
38
+
39
+ def hook_function(_layer, _input, output):
40
+ target_dict[f"{layer_name}-t{self._layer_counter[layer_name]}"] = PytorchWrapper._tensor_to_numpy(output)
41
+ self._layer_counter[layer_name] += 1
42
+
43
+ hook = layer.register_forward_hook(hook_function)
44
+ self._layer_hooks[layer_name] = hook
45
+ return hook
46
+
47
+ def get_layer(self, layer_name):
48
+ layer_name = self._strip_layer_timestep(layer_name)
49
+ return super(TemporalPytorchWrapper, self).get_layer(layer_name)
50
+
51
+ def _strip_layer_timestep(self, layer_name):
52
+ match = re.search('-t[0-9]+$', layer_name)
53
+ if match:
54
+ layer_name = layer_name[:match.start()]
55
+ return layer_name
56
+
57
+
58
+ class CORnetCommitment(BrainModel):
59
+ """
60
+ CORnet commitment where only the model interface is implemented and behavioral readouts are attached.
61
+ Importantly, layer-region commitments do not occur due to the anatomical pre-mapping.
62
+ Further, due to the temporal component of the model, requested time-bins are matched to the nearest committed
63
+ time-bin for the model.
64
+ """
65
+
66
+ def __init__(self, identifier, activations_model, layers,
67
+ time_mapping: Dict[str, Dict[int, Tuple[int, int]]], behavioral_readout_layer=None,
68
+ visual_degrees=8):
69
+ """
70
+ :param time_mapping: mapping from region -> {model_timestep -> (time_bin_start, time_bin_end)}
71
+ """
72
+ self.layers = layers
73
+ self.activations_model = activations_model
74
+ self.time_mapping = time_mapping
75
+ self.recording_layers = None
76
+ self.recording_time_bins = None
77
+ self._identifier = identifier
78
+
79
+ logits_behavior = LogitsBehavior(
80
+ identifier=identifier, activations_model=TemporalIgnore(activations_model))
81
+ behavioral_readout_layer = behavioral_readout_layer or layers[-1]
82
+ probabilities_behavior = ProbabilitiesMapping(
83
+ identifier=identifier, activations_model=TemporalIgnore(activations_model), layer=behavioral_readout_layer)
84
+ odd_one_out = OddOneOut(identifier=identifier, activations_model=TemporalIgnore(activations_model),
85
+ layer=behavioral_readout_layer)
86
+ self.behavior_model = BehaviorArbiter({BrainModel.Task.label: logits_behavior,
87
+ BrainModel.Task.probabilities: probabilities_behavior,
88
+ BrainModel.Task.odd_one_out: odd_one_out,
89
+ })
90
+ self.do_behavior = False
91
+
92
+ self._visual_degrees = visual_degrees
93
+
94
+ @property
95
+ def identifier(self):
96
+ return self._identifier
97
+
98
+ def visual_degrees(self) -> int:
99
+ return self._visual_degrees
100
+
101
+ def start_recording(self, recording_target, time_bins):
102
+ self.recording_layers = [layer for layer in self.layers if layer.startswith(recording_target)]
103
+ self.recording_time_bins = time_bins
104
+
105
+ def start_task(self, task: BrainModel.Task, *args, **kwargs):
106
+ if task != BrainModel.Task.passive:
107
+ self.behavior_model.start_task(task, *args, **kwargs)
108
+ self.do_behavior = True
109
+
110
+ def look_at(self, stimuli, number_of_trials: int = 1, require_variance: bool = False):
111
+ if self.do_behavior:
112
+ return self.behavior_model.look_at(stimuli,
113
+ number_of_trials=number_of_trials, require_variance=require_variance)
114
+ else:
115
+ # cache, since piecing times together is not too fast unfortunately
116
+ return self.look_at_cached(self.identifier, stimuli.identifier, stimuli,
117
+ number_of_trials=number_of_trials, require_variance=require_variance)
118
+
119
+ @store(identifier_ignore=['stimuli', 'number_of_trials', 'require_variance'])
120
+ def look_at_cached(self, model_identifier, stimuli_identifier, stimuli,
121
+ number_of_trials, require_variance):
122
+ responses = self.activations_model(stimuli, layers=self.recording_layers,
123
+ number_of_trials=number_of_trials, require_variance=require_variance)
124
+ # map time
125
+ regions = set(responses['region'].values)
126
+ if len(regions) > 1:
127
+ raise NotImplementedError("cannot handle more than one simultaneous region")
128
+ region = list(regions)[0]
129
+ time_bins = [self.time_mapping[region][timestep] if timestep in self.time_mapping[region] else (None, None)
130
+ for timestep in responses['time_step'].values]
131
+ responses['time_bin_start'] = 'time_step', [time_bin[0] for time_bin in time_bins]
132
+ responses['time_bin_end'] = 'time_step', [time_bin[1] for time_bin in time_bins]
133
+ responses = NeuroidAssembly(responses.rename({'time_step': 'time_bin'}))
134
+ responses = responses[{'time_bin': [not np.isnan(time_start) for time_start in responses['time_bin_start']]}]
135
+ # select time
136
+ time_responses = []
137
+ for time_bin in tqdm(self.recording_time_bins, desc='CORnet-time to recording time'):
138
+ time_bin = time_bin if not isinstance(time_bin, np.ndarray) else time_bin.tolist()
139
+ time_bin_start, time_bin_end = time_bin
140
+ nearest_start = find_nearest(responses['time_bin_start'].values, time_bin_start)
141
+ bin_responses = responses.sel(time_bin_start=nearest_start)
142
+ bin_responses = NeuroidAssembly(bin_responses.values, coords={
143
+ **{coord: (dims, values) for coord, dims, values in walk_coords(bin_responses)
144
+ if coord not in ['time_bin_level_0', 'time_bin_end']},
145
+ **{'time_bin_start': ('time_bin', [time_bin_start]),
146
+ 'time_bin_end': ('time_bin', [time_bin_end])}
147
+ }, dims=bin_responses.dims)
148
+ time_responses.append(bin_responses)
149
+ responses = merge_data_arrays(time_responses)
150
+ return responses
151
+
152
+
153
+ def find_nearest(array, value):
154
+ array = np.asarray(array)
155
+ idx = (np.abs(array - value)).argmin()
156
+ return array[idx]
157
+
158
+
159
+ class TemporalIgnore:
160
+ """
161
+ Wrapper around a activations model that squeezes out the temporal axis.
162
+ Useful when there is only one time step and the behavioral readout does not know what to do with time.
163
+ """
164
+
165
+ def __init__(self, temporal_activations_model):
166
+ self._activations_model = temporal_activations_model
167
+
168
+ def __call__(self, *args, **kwargs):
169
+ activations = self._activations_model(*args, **kwargs)
170
+ activations = activations.squeeze('time_step')
171
+ return activations
172
+
173
+
174
+ class TemporalExtractor(ActivationsExtractorHelper):
175
+ # `from_paths` is the earliest method at which we can interject because calls below are stored and checked for the
176
+ # presence of all layers which, for CORnet, are passed as e.g. `IT.output-t0`.
177
+ # This code re-arranges the time component.
178
+ def from_paths(self, *args, **kwargs):
179
+ raw_activations = super(TemporalExtractor, self).from_paths(*args, **kwargs)
180
+ # introduce time dimension
181
+ regions = defaultdict(list)
182
+ for layer in set(raw_activations['layer'].values):
183
+ match = re.match(r'(([^-]*)\..*|logits|avgpool)-t([0-9]+)', layer)
184
+ region, timestep = match.group(2) if match.group(2) else match.group(1), match.group(3)
185
+ stripped_layer = match.group(1)
186
+ regions[region].append((layer, stripped_layer, timestep))
187
+ activations = {}
188
+ for region, time_layers in regions.items():
189
+ for (full_layer, stripped_layer, timestep) in time_layers:
190
+ region_time_activations = raw_activations.sel(layer=full_layer)
191
+ region_time_activations['layer'] = 'neuroid', [stripped_layer] * len(region_time_activations['neuroid'])
192
+ activations[(region, timestep)] = region_time_activations
193
+ for key, key_activations in activations.items():
194
+ region, timestep = key
195
+ key_activations['region'] = 'neuroid', [region] * len(key_activations['neuroid'])
196
+ activations[key] = NeuroidAssembly([key_activations.values], coords={
197
+ **{coord: (dims, values) for coord, dims, values in walk_coords(activations[key])
198
+ if coord != 'neuroid_id'}, # otherwise, neuroid dim will be as large as before with nans
199
+ **{'time_step': [int(timestep)]}
200
+ }, dims=['time_step'] + list(key_activations.dims))
201
+ activations = list(activations.values())
202
+ activations = merge_data_arrays(activations)
203
+ # rebuild neuroid_id without timestep
204
+ neuroid_id = [".".join([f"{value}" for value in values]) for values in zip(*[
205
+ activations[coord].values for coord in ['model', 'region', 'neuroid_num']])]
206
+ activations['neuroid_id'] = 'neuroid', neuroid_id
207
+ return activations
208
+
209
+
210
+ def _build_time_mappings(time_mappings):
211
+ return {region: {
212
+ timestep: (time_start + timestep * time_step_size,
213
+ time_start + (timestep + 1) * time_step_size)
214
+ for timestep in range(0, timesteps)}
215
+ for region, (time_start, time_step_size, timesteps) in time_mappings.items()}
@@ -0,0 +1,69 @@
1
+ import functools
2
+ import importlib
3
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
4
+ from brainscore_vision.model_helpers.check_submission import check_models
5
+ import torch.hub
6
+ import ssl
7
+ from brainscore_vision.model_helpers.s3 import load_weight_file
8
+ from torch.nn import Module
9
+ from .helpers.helpers import TemporalPytorchWrapper
10
+ from pathlib import Path
11
+ from urllib.request import urlretrieve
12
+
13
+ ssl._create_default_https_context = ssl._create_unverified_context
14
+
15
+
16
+ TIME_MAPPINGS = {
17
+ 'V1': (50, 100, 1),
18
+ 'V2': (70, 100, 2),
19
+ # 'V2': (20, 50, 2), # MS: This follows from the movshon anesthesized-monkey recordings, so might not hold up
20
+ 'V4': (90, 50, 4),
21
+ 'IT': (100, 100, 2),
22
+ }
23
+
24
+
25
+ def get_model(identifier: str):
26
+
27
+ class Wrapper(Module):
28
+ def __init__(self, model):
29
+ super(Wrapper, self).__init__()
30
+ self.module = model
31
+
32
+ mod = importlib.import_module(f'cornet.cornet_s')
33
+ model_ctr = getattr(mod, 'CORnet_S')
34
+ model = model_ctr()
35
+ model = Wrapper(model) # model was wrapped with DataParallel, so weights require `module.` prefix
36
+ # cornet version: shorten identifier
37
+ identifier_short = identifier[:9]
38
+ # cornet version: shorten identifier
39
+ identifier_short = identifier[:9]
40
+ url = f'https://brainscore-storage.s3.us-east-2.amazonaws.com/brainscore-vision/models/ReAlnet/{identifier_short}_best_model_params.pt'
41
+ fh = urlretrieve(url, f'{identifier_short}_best_model_params.pth')
42
+ load_path = fh[0]
43
+ checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage) # map onto cpu
44
+ new_state_dict = {}
45
+ for key, val in checkpoint.items():
46
+ # remove "module." (if it exists) from the key
47
+ new_key = key.replace("realnet.", "")
48
+ # discard the keys starting with "fc"
49
+ if not new_key.startswith('fc'):
50
+ new_state_dict[new_key] = val
51
+
52
+ model.load_state_dict(new_state_dict)
53
+ model = model.module # unwrap
54
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
55
+ wrapper = TemporalPytorchWrapper(identifier=identifier, model=model, preprocessing=preprocessing,
56
+ separate_time=True)
57
+ wrapper.image_size = 224
58
+ return wrapper
59
+
60
+
61
+
62
+
63
+ def get_layers(identifier: str):
64
+ return (['V1.output-t0'] +
65
+ [f'{area}.output-t{timestep}'
66
+ for area, timesteps in [('V2', range(2)), ('V4', range(4)), ('IT', range(2))]
67
+ for timestep in timesteps] +
68
+ ['decoder.avgpool-t0']
69
+ )
@@ -0,0 +1,8 @@
1
+
2
+ CORnet @ git+https://github.com/dicarlolab/CORnet.git
3
+ torch
4
+ torchvision
5
+ pandas
6
+ numpy
7
+ h5py
8
+ gdown
File without changes
@@ -0,0 +1,5 @@
1
+ from brainscore_vision import model_registry
2
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
3
+ from .model import get_model, get_layers
4
+
5
+ model_registry['Res2Net50_26w_4s'] = lambda: ModelCommitment(identifier='Res2Net50_26w_4s', activations_model=get_model('Res2Net50_26w_4s'), layers=get_layers('Res2Net50_26w_4s'))