brainscore-vision 2.2.3__py3-none-any.whl → 2.2.5__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (722) hide show
  1. brainscore_vision/data/baker2022/__init__.py +10 -10
  2. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
  3. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
  4. brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
  5. brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
  6. brainscore_vision/data/barbumayo2019/__init__.py +3 -3
  7. brainscore_vision/data/bashivankar2019/__init__.py +10 -10
  8. brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
  9. brainscore_vision/data/bmd2024/__init__.py +20 -20
  10. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
  11. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
  12. brainscore_vision/data/bracci2019/__init__.py +5 -5
  13. brainscore_vision/data/bracci2019/data_packaging.py +1 -1
  14. brainscore_vision/data/cadena2017/__init__.py +5 -5
  15. brainscore_vision/data/cichy2019/__init__.py +5 -5
  16. brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
  17. brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
  18. brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
  19. brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
  20. brainscore_vision/data/david2004/__init__.py +5 -5
  21. brainscore_vision/data/deng2009/__init__.py +3 -3
  22. brainscore_vision/data/ferguson2024/__init__.py +112 -112
  23. brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
  24. brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
  25. brainscore_vision/data/geirhos2021/__init__.py +85 -85
  26. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
  27. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
  28. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
  29. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
  30. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
  31. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
  32. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
  33. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
  34. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
  35. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
  36. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
  37. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
  38. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
  39. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
  40. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
  41. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
  42. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
  43. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
  44. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
  45. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
  46. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
  47. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
  48. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
  49. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
  50. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
  51. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
  52. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
  53. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
  54. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
  55. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
  56. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
  57. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
  58. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
  59. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
  60. brainscore_vision/data/hebart2023/__init__.py +5 -5
  61. brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
  62. brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
  63. brainscore_vision/data/hendrycks2019/__init__.py +12 -12
  64. brainscore_vision/data/igustibagus2024/__init__.py +5 -5
  65. brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
  66. brainscore_vision/data/islam2021/__init__.py +3 -3
  67. brainscore_vision/data/kar2018/__init__.py +7 -7
  68. brainscore_vision/data/kar2019/__init__.py +5 -5
  69. brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
  70. brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
  71. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
  72. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
  73. brainscore_vision/data/majajhong2015/__init__.py +23 -23
  74. brainscore_vision/data/malania2007/__init__.py +77 -77
  75. brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
  76. brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
  77. brainscore_vision/data/maniquet2024/__init__.py +11 -11
  78. brainscore_vision/data/marques2020/__init__.py +30 -30
  79. brainscore_vision/data/rajalingham2018/__init__.py +10 -10
  80. brainscore_vision/data/rajalingham2020/__init__.py +5 -5
  81. brainscore_vision/data/rust2012/__init__.py +7 -7
  82. brainscore_vision/data/sanghavi2020/__init__.py +19 -19
  83. brainscore_vision/data/scialom2024/__init__.py +110 -110
  84. brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
  85. brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
  86. brainscore_vision/data/seibert2019/__init__.py +2 -2
  87. brainscore_vision/data/zhang2018/__init__.py +5 -5
  88. brainscore_vision/data_helpers/s3.py +25 -6
  89. brainscore_vision/model_helpers/activations/pytorch.py +34 -12
  90. brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
  91. brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
  92. brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
  93. brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
  94. brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
  95. brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
  96. brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
  97. brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
  98. brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
  99. brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
  100. brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
  101. brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
  102. brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
  103. brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
  104. brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
  105. brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
  106. brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
  107. brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
  108. brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
  109. brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
  110. brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
  111. brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
  112. brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
  113. brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
  114. brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
  115. brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
  116. brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
  117. brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
  118. brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
  119. brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
  120. brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
  121. brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
  122. brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
  123. brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
  124. brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
  125. brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
  126. brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
  127. brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
  128. brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
  129. brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
  130. brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
  131. brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
  132. brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
  133. brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
  134. brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
  135. brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
  136. brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
  137. brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
  138. brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
  139. brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
  140. brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
  141. brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
  142. brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
  143. brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
  144. brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
  145. brainscore_vision/models/ReAlnet/__init__.py +64 -0
  146. brainscore_vision/models/ReAlnet/model.py +237 -0
  147. brainscore_vision/models/ReAlnet/requirements.txt +7 -0
  148. brainscore_vision/models/ReAlnet/test.py +0 -0
  149. brainscore_vision/models/ReAlnet/weights.json +26 -0
  150. brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
  151. brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
  152. brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
  153. brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
  154. brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
  155. brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
  156. brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
  157. brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
  158. brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
  159. brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
  160. brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
  161. brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
  162. brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
  163. brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
  164. brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
  165. brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
  166. brainscore_vision/models/VOneCORnet_S/model.py +25 -0
  167. brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
  168. brainscore_vision/models/VOneCORnet_S/test.py +8 -0
  169. brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
  170. brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
  171. brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
  172. brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
  173. brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
  174. brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
  175. brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
  176. brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
  177. brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
  178. brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
  179. brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
  180. brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
  181. brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
  182. brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
  183. brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
  184. brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
  185. brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
  186. brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
  187. brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
  188. brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
  189. brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
  190. brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
  191. brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
  192. brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
  193. brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
  194. brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
  195. brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
  196. brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
  197. brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
  198. brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
  199. brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
  200. brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
  201. brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
  202. brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
  203. brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
  204. brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
  205. brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
  206. brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
  207. brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
  208. brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
  209. brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
  210. brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
  211. brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
  212. brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
  213. brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
  214. brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
  215. brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
  216. brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
  217. brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
  218. brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
  219. brainscore_vision/models/antialiased-r50/__init__.py +7 -0
  220. brainscore_vision/models/antialiased-r50/model.py +62 -0
  221. brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
  222. brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
  223. brainscore_vision/models/antialiased-r50/test.py +8 -0
  224. brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
  225. brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
  226. brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
  227. brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
  228. brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
  229. brainscore_vision/models/cornet_s/model.py +2 -2
  230. brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
  231. brainscore_vision/models/densenet_121/__init__.py +7 -0
  232. brainscore_vision/models/densenet_121/model.py +63 -0
  233. brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
  234. brainscore_vision/models/densenet_121/requirements.txt +1 -0
  235. brainscore_vision/models/densenet_121/test.py +8 -0
  236. brainscore_vision/models/densenet_169/__init__.py +7 -0
  237. brainscore_vision/models/densenet_169/model.py +63 -0
  238. brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
  239. brainscore_vision/models/densenet_169/requirements.txt +1 -0
  240. brainscore_vision/models/densenet_169/test.py +9 -0
  241. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
  242. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
  243. brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
  244. brainscore_vision/models/densenet_201/test.py +8 -0
  245. brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
  246. brainscore_vision/models/efficientnet_b0/model.py +45 -0
  247. brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
  248. brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
  249. brainscore_vision/models/efficientnet_b0/test.py +8 -0
  250. brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
  251. brainscore_vision/models/efficientnet_b7/model.py +61 -0
  252. brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
  253. brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
  254. brainscore_vision/models/efficientnet_b7/test.py +9 -0
  255. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
  256. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
  257. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
  258. brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
  259. brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
  260. brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
  261. brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
  262. brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
  263. brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
  264. brainscore_vision/models/evresnet_50_1/model.py +62 -0
  265. brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
  266. brainscore_vision/models/evresnet_50_1/test.py +8 -0
  267. brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
  268. brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
  269. brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
  270. brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
  271. brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
  272. brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
  273. brainscore_vision/models/evresnet_50_4/model.py +67 -0
  274. brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
  275. brainscore_vision/models/evresnet_50_4/test.py +8 -0
  276. brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
  277. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
  278. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
  279. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
  280. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
  281. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
  282. brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
  283. brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
  284. brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
  285. brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
  286. brainscore_vision/models/grcnn/__init__.py +7 -0
  287. brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
  288. brainscore_vision/models/grcnn/model.py +54 -0
  289. brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
  290. brainscore_vision/models/grcnn/requirements.txt +2 -0
  291. brainscore_vision/models/grcnn/test.py +9 -0
  292. brainscore_vision/models/grcnn_109/__init__.py +5 -0
  293. brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
  294. brainscore_vision/models/grcnn_109/model.py +53 -0
  295. brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
  296. brainscore_vision/models/grcnn_109/requirements.txt +2 -0
  297. brainscore_vision/models/grcnn_109/test.py +9 -0
  298. brainscore_vision/models/hmax/model.py +2 -2
  299. brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
  300. brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
  301. brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
  302. brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
  303. brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
  304. brainscore_vision/models/inception_v1/__init__.py +7 -0
  305. brainscore_vision/models/inception_v1/model.py +67 -0
  306. brainscore_vision/models/inception_v1/requirements.txt +1 -0
  307. brainscore_vision/models/inception_v1/test.py +8 -0
  308. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
  309. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
  310. brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
  311. brainscore_vision/models/inception_v3/test.py +8 -0
  312. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
  313. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
  314. brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
  315. brainscore_vision/models/inception_v4/test.py +8 -0
  316. brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
  317. brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
  318. brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
  319. brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
  320. brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
  321. brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
  322. brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
  323. brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
  324. brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
  325. brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
  326. brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
  327. brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
  328. brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
  329. brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
  330. brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
  331. brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
  332. brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
  333. brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
  334. brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
  335. brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
  336. brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
  337. brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
  338. brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
  339. brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
  340. brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
  341. brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
  342. brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
  343. brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
  344. brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
  345. brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
  346. brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
  347. brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
  348. brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
  349. brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
  350. brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
  351. brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
  352. brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
  353. brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
  354. brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
  355. brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
  356. brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
  357. brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
  358. brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
  359. brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
  360. brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
  361. brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
  362. brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
  363. brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
  364. brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
  365. brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
  366. brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
  367. brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
  368. brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
  369. brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
  370. brainscore_vision/models/nasnet_large/__init__.py +7 -0
  371. brainscore_vision/models/nasnet_large/model.py +60 -0
  372. brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
  373. brainscore_vision/models/nasnet_large/test.py +8 -0
  374. brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
  375. brainscore_vision/models/nasnet_mobile/model.py +685 -0
  376. brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
  377. brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
  378. brainscore_vision/models/nasnet_mobile/test.py +8 -0
  379. brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
  380. brainscore_vision/models/omnivore_swinB/model.py +79 -0
  381. brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
  382. brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
  383. brainscore_vision/models/omnivore_swinB/test.py +9 -0
  384. brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
  385. brainscore_vision/models/omnivore_swinS/model.py +79 -0
  386. brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
  387. brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
  388. brainscore_vision/models/omnivore_swinS/test.py +9 -0
  389. brainscore_vision/models/pnasnet_large/__init__.py +7 -0
  390. brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
  391. brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
  392. brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
  393. brainscore_vision/models/pnasnet_large/test.py +8 -0
  394. brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
  395. brainscore_vision/models/resnet50_SIN/model.py +63 -0
  396. brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
  397. brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
  398. brainscore_vision/models/resnet50_SIN/test.py +9 -0
  399. brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
  400. brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
  401. brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
  402. brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
  403. brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
  404. brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
  405. brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
  406. brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
  407. brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
  408. brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
  409. brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
  410. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
  411. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
  412. brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
  413. brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
  414. brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
  415. brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
  416. brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
  417. brainscore_vision/models/resnet50_barlow/model.py +53 -0
  418. brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
  419. brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
  420. brainscore_vision/models/resnet50_barlow/test.py +9 -0
  421. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
  422. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
  423. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
  424. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
  425. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
  426. brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
  427. brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
  428. brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
  429. brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
  430. brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
  431. brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
  432. brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
  433. brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
  434. brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
  435. brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
  436. brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
  437. brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
  438. brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
  439. brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
  440. brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
  441. brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
  442. brainscore_vision/models/resnet50_sup/__init__.py +5 -0
  443. brainscore_vision/models/resnet50_sup/model.py +55 -0
  444. brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
  445. brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
  446. brainscore_vision/models/resnet50_sup/test.py +8 -0
  447. brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
  448. brainscore_vision/models/resnet50_vicreg/model.py +62 -0
  449. brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
  450. brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
  451. brainscore_vision/models/resnet50_vicreg/test.py +9 -0
  452. brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
  453. brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
  454. brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
  455. brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
  456. brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
  457. brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
  458. brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
  459. brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
  460. brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
  461. brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
  462. brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
  463. brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
  464. brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
  465. brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
  466. brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
  467. brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
  468. brainscore_vision/models/resnet_101_v1/model.py +42 -0
  469. brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
  470. brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
  471. brainscore_vision/models/resnet_101_v1/test.py +8 -0
  472. brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
  473. brainscore_vision/models/resnet_101_v2/model.py +33 -0
  474. brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
  475. brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
  476. brainscore_vision/models/resnet_101_v2/test.py +8 -0
  477. brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
  478. brainscore_vision/models/resnet_152_v1/model.py +42 -0
  479. brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
  480. brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
  481. brainscore_vision/models/resnet_152_v1/test.py +8 -0
  482. brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
  483. brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
  484. brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
  485. brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
  486. brainscore_vision/models/resnet_152_v2/test.py +8 -0
  487. brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
  488. brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
  489. brainscore_vision/models/resnet_18_test_m/model.py +80 -0
  490. brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
  491. brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
  492. brainscore_vision/models/resnet_18_test_m/test.py +8 -0
  493. brainscore_vision/models/resnet_50_2/__init__.py +9 -0
  494. brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
  495. brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
  496. brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
  497. brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
  498. brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
  499. brainscore_vision/models/resnet_50_2/model.py +46 -0
  500. brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
  501. brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
  502. brainscore_vision/models/resnet_50_2/test.py +8 -0
  503. brainscore_vision/models/resnet_50_robust/model.py +2 -2
  504. brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
  505. brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
  506. brainscore_vision/models/resnet_50_v1/model.py +42 -0
  507. brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
  508. brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
  509. brainscore_vision/models/resnet_50_v1/test.py +8 -0
  510. brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
  511. brainscore_vision/models/resnet_50_v2/model.py +33 -0
  512. brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
  513. brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
  514. brainscore_vision/models/resnet_50_v2/test.py +8 -0
  515. brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
  516. brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
  517. brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
  518. brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
  519. brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
  520. brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
  521. brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
  522. brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
  523. brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
  524. brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
  525. brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
  526. brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
  527. brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
  528. brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
  529. brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
  530. brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
  531. brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
  532. brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
  533. brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
  534. brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
  535. brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
  536. brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
  537. brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
  538. brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
  539. brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
  540. brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
  541. brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
  542. brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
  543. brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
  544. brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
  545. brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
  546. brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
  547. brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
  548. brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
  549. brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
  550. brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
  551. brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
  552. brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
  553. brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
  554. brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
  555. brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
  556. brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
  557. brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
  558. brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
  559. brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
  560. brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
  561. brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
  562. brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
  563. brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
  564. brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
  565. brainscore_vision/models/timm_models/__init__.py +193 -0
  566. brainscore_vision/models/timm_models/model.py +90 -0
  567. brainscore_vision/models/timm_models/model_configs.json +464 -0
  568. brainscore_vision/models/timm_models/requirements.txt +3 -0
  569. brainscore_vision/models/timm_models/test.py +0 -0
  570. brainscore_vision/models/vgg_16/__init__.py +7 -0
  571. brainscore_vision/models/vgg_16/model.py +52 -0
  572. brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
  573. brainscore_vision/models/vgg_16/requirements.txt +1 -0
  574. brainscore_vision/models/vgg_16/test.py +8 -0
  575. brainscore_vision/models/vgg_19/__init__.py +7 -0
  576. brainscore_vision/models/vgg_19/model.py +52 -0
  577. brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
  578. brainscore_vision/models/vgg_19/requirements.txt +1 -0
  579. brainscore_vision/models/vgg_19/test.py +8 -0
  580. brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
  581. brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
  582. brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
  583. brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
  584. brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
  585. brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
  586. brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
  587. brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
  588. brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
  589. brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
  590. brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
  591. brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
  592. brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
  593. brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
  594. brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
  595. brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
  596. brainscore_vision/models/voneresnet_50/__init__.py +7 -0
  597. brainscore_vision/models/voneresnet_50/model.py +37 -0
  598. brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
  599. brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
  600. brainscore_vision/models/voneresnet_50/test.py +8 -0
  601. brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
  602. brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
  603. brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
  604. brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
  605. brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
  606. brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
  607. brainscore_vision/models/voneresnet_50_1/model.py +68 -0
  608. brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
  609. brainscore_vision/models/voneresnet_50_1/test.py +7 -0
  610. brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
  611. brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
  612. brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
  613. brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
  614. brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
  615. brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
  616. brainscore_vision/models/voneresnet_50_3/model.py +66 -0
  617. brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
  618. brainscore_vision/models/voneresnet_50_3/test.py +7 -0
  619. brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
  620. brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
  621. brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
  622. brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
  623. brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
  624. brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
  625. brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
  626. brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
  627. brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
  628. brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
  629. brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
  630. brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
  631. brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
  632. brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
  633. brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
  634. brainscore_vision/models/xception/__init__.py +7 -0
  635. brainscore_vision/models/xception/model.py +64 -0
  636. brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
  637. brainscore_vision/models/xception/requirements.txt +2 -0
  638. brainscore_vision/models/xception/test.py +8 -0
  639. brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
  640. brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
  641. brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
  642. brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
  643. brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
  644. brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
  645. brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
  646. brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
  647. brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
  648. brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
  649. brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
  650. brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
  651. brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
  652. brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
  653. brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
  654. brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
  655. brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
  656. brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
  657. brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
  658. brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
  659. brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
  660. brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
  661. brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
  662. brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
  663. brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
  664. brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
  665. brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
  666. brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
  667. brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
  668. brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
  669. brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
  670. brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
  671. brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
  672. brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
  673. brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
  674. brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
  675. brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
  676. brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
  677. brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
  678. brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
  679. brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
  680. brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
  681. brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
  682. brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
  683. brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
  684. brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
  685. brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
  686. brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
  687. brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
  688. brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
  689. brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
  690. brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
  691. brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
  692. brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
  693. brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
  694. brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
  695. brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
  696. brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
  697. brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
  698. brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
  699. brainscore_vision/submission/actions_helpers.py +2 -3
  700. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/METADATA +6 -6
  701. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/RECORD +714 -130
  702. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/WHEEL +1 -1
  703. docs/source/index.rst +1 -0
  704. docs/source/modules/submission.rst +1 -1
  705. docs/source/modules/version_bumping.rst +43 -0
  706. tests/test_submission/test_actions_helpers.py +2 -6
  707. brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
  708. brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
  709. brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
  710. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
  711. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
  712. brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
  713. brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
  714. brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
  715. /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
  716. /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
  717. /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
  718. /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
  719. /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
  720. /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
  721. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/LICENSE +0 -0
  722. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,63 @@
1
+ import functools
2
+ from torchvision.models import densenet169
3
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
4
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
5
+ from brainscore_vision.model_helpers.check_submission import check_models
6
+
7
+
8
+ def get_model(name):
9
+ """
10
+ This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
11
+ containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
12
+ keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
13
+ wrappers.
14
+ :param name: the name of the model to fetch
15
+ :return: the model instance
16
+ """
17
+ assert name == 'densenet-169'
18
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
19
+ model = densenet169(weights='DEFAULT')
20
+ wrapper = PytorchWrapper(identifier='densenet-169', model=model, preprocessing=preprocessing)
21
+ wrapper.image_size = 224
22
+ return wrapper
23
+
24
+
25
+ def get_layers(name):
26
+ assert name == 'densenet-169'
27
+ layer_names = (['norm0'] + ['pool0'] +
28
+ [f'denseblock1.denselayer{i}' for i in range(1, 7)] + ['transition1.pool'] +
29
+ [f'denseblock2.denselayer{i}' for i in range(1, 13)] + ['transition2.pool'] +
30
+ [f'denseblock3.denselayer{i}' for i in range(1, 33)] + ['transition3.pool'] +
31
+ [f'denseblock4.denselayer{i}' for i in range(1, 33)])
32
+ layer_names = [f"features.{name}" for name in layer_names]
33
+ layer_names += ['features.norm5']
34
+ return layer_names
35
+
36
+
37
+ def get_bibtex(model_identifier):
38
+ """
39
+ A method returning the bibtex reference of the requested model as a string.
40
+ """
41
+ return '''
42
+ @article{DBLP:journals/corr/HuangLW16a,
43
+ author = {Gao Huang and
44
+ Zhuang Liu and
45
+ Kilian Q. Weinberger},
46
+ title = {Densely Connected Convolutional Networks},
47
+ journal = {CoRR},
48
+ volume = {abs/1608.06993},
49
+ year = {2016},
50
+ url = {http://arxiv.org/abs/1608.06993},
51
+ eprinttype = {arXiv},
52
+ eprint = {1608.06993},
53
+ timestamp = {Mon, 10 Sep 2018 15:49:32 +0200},
54
+ biburl = {https://dblp.org/rec/journals/corr/HuangLW16a.bib},
55
+ bibsource = {dblp computer science bibliography, https://dblp.org}
56
+ }
57
+ '''
58
+
59
+
60
+ if __name__ == '__main__':
61
+ # Use this method to ensure the correctness of the BaseModel implementations.
62
+ # It executes a mock run of brain-score benchmarks.
63
+ check_models.check_base_models(__name__)
@@ -0,0 +1 @@
1
+ {"V4": "features.transition2.pool", "IT": "features.transition3.pool", "V2": "features.transition2.pool", "V1": "features.transition1.pool"}
@@ -0,0 +1 @@
1
+ torchvision
@@ -0,0 +1,9 @@
1
+ import brainscore_vision
2
+ import pytest
3
+
4
+
5
+
6
+ @pytest.mark.travis_slow
7
+ def test_has_identifier():
8
+ model = brainscore_vision.load_model('densenet-169')
9
+ assert model.identifier == 'densenet-169'
@@ -2,6 +2,6 @@ from brainscore_vision import model_registry
2
2
  from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
3
3
  from .model import get_model, get_layers
4
4
 
5
- model_registry['densenet_201_pytorch'] = lambda: ModelCommitment(identifier='densenet_201_pytorch',
6
- activations_model=get_model('densenet_201_pytorch'),
7
- layers=get_layers('densenet_201_pytorch'))
5
+ model_registry['densenet-201'] = lambda: ModelCommitment(identifier='densenet-201',
6
+ activations_model=get_model('densenet-201'),
7
+ layers=get_layers('densenet-201'))
@@ -8,7 +8,7 @@ from brainscore_vision.model_helpers.check_submission import check_models
8
8
  ssl._create_default_https_context = ssl._create_unverified_context
9
9
 
10
10
  '''
11
- This is a Pytorch implementation of densenet_201.
11
+ This is a Pytorch implementation of densenet-201.
12
12
 
13
13
  Previously on Brain-Score, this model existed as a Tensorflow model, and was converted via:
14
14
  https://huggingface.co/timm/densenet201.tv_in1k
@@ -23,9 +23,9 @@ MODEL = timm.create_model('densenet201.tv_in1k', pretrained=True)
23
23
 
24
24
 
25
25
  def get_model(name):
26
- assert name == 'densenet_201_pytorch'
26
+ assert name == 'densenet-201'
27
27
  preprocessing = functools.partial(load_preprocess_images, image_size=224)
28
- wrapper = PytorchWrapper(identifier='densenet_201_pytorch', model=MODEL,
28
+ wrapper = PytorchWrapper(identifier='densenet-201', model=MODEL,
29
29
  preprocessing=preprocessing,
30
30
  batch_size=4) # doesn't fit into 12 GB GPU memory otherwise
31
31
  wrapper.image_size = 224
@@ -33,13 +33,15 @@ def get_model(name):
33
33
 
34
34
 
35
35
  def get_layers(name):
36
- assert name == 'densenet_201_pytorch'
37
- layer_names = []
38
-
39
- for name, module in MODEL.named_modules():
40
- layer_names.append(name)
41
-
42
- return layer_names[2:]
36
+ assert name == 'densenet-201'
37
+ layer_names = (['norm0.act'] + ['pool0'] +
38
+ [f'denseblock1.denselayer{i}' for i in range(1, 7)] + ['transition1.pool'] +
39
+ [f'denseblock2.denselayer{i}' for i in range(1, 13)] + ['transition2.pool'] +
40
+ [f'denseblock3.denselayer{i}' for i in range(1, 49)] + ['transition3.pool'] +
41
+ [f'denseblock4.denselayer{i}' for i in range(1, 33)])
42
+ layer_names = [f"features.{name}" for name in layer_names]
43
+ layer_names += ['global_pool']
44
+ return layer_names
43
45
 
44
46
 
45
47
  def get_bibtex(model_identifier):
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.transition1.pool",
3
+ "V2": "features.transition2.pool",
4
+ "V4": "features.transition2.pool",
5
+ "IT": "features.transition3.pool"
6
+ }
@@ -0,0 +1,8 @@
1
+ import pytest
2
+ import brainscore_vision
3
+
4
+
5
+ @pytest.mark.travis_slow
6
+ def test_has_identifier():
7
+ model = brainscore_vision.load_model('densenet-201')
8
+ assert model.identifier == 'densenet-201'
@@ -0,0 +1,7 @@
1
+ from brainscore_vision import model_registry
2
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
3
+ from .model import get_model, get_layers
4
+
5
+ model_registry['efficientnet_b0'] = lambda: ModelCommitment(identifier='efficientnet_b0',
6
+ activations_model=get_model('efficientnet_b0'),
7
+ layers=get_layers('efficientnet_b0'))
@@ -0,0 +1,45 @@
1
+ import functools
2
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
3
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
4
+ from brainscore_vision.model_helpers.check_submission import check_models
5
+ import torchvision.models
6
+
7
+ import ssl
8
+ ssl._create_default_https_context = ssl._create_unverified_context
9
+
10
+ def get_model(name):
11
+ assert name == 'efficientnet_b0'
12
+ model = torchvision.models.efficientnet_b0(pretrained=True)
13
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
14
+ wrapper = PytorchWrapper(identifier='efficientnet_b0', model=model, preprocessing=preprocessing)
15
+ wrapper.image_size = 224
16
+ return wrapper
17
+
18
+ def get_layers(name):
19
+ assert name == 'efficientnet_b0'
20
+ return ['features.0.2',
21
+ 'features.2.1.stochastic_depth',
22
+ 'features.3.1.stochastic_depth',
23
+ 'features.4.1.stochastic_depth',
24
+ 'features.4.2.stochastic_depth',
25
+ 'features.5.1.stochastic_depth',
26
+ 'features.5.2.stochastic_depth',
27
+ 'features.6.1.stochastic_depth',
28
+ 'features.6.2.stochastic_depth',
29
+ 'features.8.2','classifier.0','classifier.1',
30
+ ]
31
+
32
+
33
+ def get_bibtex(model_identifier):
34
+ return """@misc{tan2020efficientnet,
35
+ title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
36
+ author={Mingxing Tan and Quoc V. Le},
37
+ year={2020},
38
+ eprint={1905.11946},
39
+ archivePrefix={arXiv},
40
+ primaryClass={cs.LG}
41
+ }"""
42
+
43
+
44
+ if __name__ == '__main__':
45
+ check_models.check_base_models(__name__)
@@ -0,0 +1 @@
1
+ {"IT": "features.6.1.stochastic_depth", "V4": "features.4.1.stochastic_depth", "V1": "features.3.1.stochastic_depth", "V2": "features.6.1.stochastic_depth"}
@@ -0,0 +1,2 @@
1
+ torch
2
+ torchvision
@@ -0,0 +1,8 @@
1
+ import pytest
2
+ import brainscore_vision
3
+
4
+
5
+ @pytest.mark.travis_slow
6
+ def test_has_identifier():
7
+ model = brainscore_vision.load_model('efficientnet_b0')
8
+ assert model.identifier == 'efficientnet_b0'
@@ -0,0 +1,7 @@
1
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
2
+ from brainscore_vision import model_registry
3
+ from .model import get_layers,get_model
4
+
5
+
6
+ model_registry['efficientnet-b7'] = \
7
+ lambda: ModelCommitment(identifier='efficientnet-b7', activations_model=get_model('efficientnet-b7'), layers=get_layers('efficientnet-b7'))
@@ -0,0 +1,61 @@
1
+ import functools
2
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
3
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
4
+ from brainscore_vision.model_helpers.check_submission import check_models
5
+ from types import MethodType
6
+ from transformers import AutoModelForImageClassification
7
+
8
+
9
+ model = AutoModelForImageClassification.from_pretrained("google/efficientnet-b7")
10
+ def get_model(name):
11
+ """
12
+ This method fetches an instance of a base model. The instance has to be callable and return a xarray object,
13
+ containing activations. There exist standard wrapper implementations for common libraries, like pytorch and
14
+ keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the
15
+ wrappers.
16
+ :param name: the name of the model to fetch
17
+ :return: the model instance
18
+ """
19
+ assert name == 'efficientnet-b7'
20
+
21
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
22
+ wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
23
+ def _output_layer(self):
24
+ return self._model._fc
25
+ wrapper._output_layer = MethodType(_output_layer, wrapper)
26
+ wrapper.image_size = 224
27
+ return wrapper
28
+
29
+
30
+ def get_layers(name):
31
+ assert name == 'efficientnet-b7'
32
+ layers = [f'efficientnet.encoder.blocks.{i}' for i in range(55)]
33
+ return layers
34
+
35
+
36
+ def get_bibtex(name):
37
+ """
38
+ A method returning the bibtex reference of the requested model as a string.
39
+ """
40
+ return '''
41
+ @article{DBLP:journals/corr/abs-1905-11946,
42
+ author = {Mingxing Tan and
43
+ Quoc V. Le},
44
+ title = {EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
45
+ journal = {CoRR},
46
+ volume = {abs/1905.11946},
47
+ year = {2019},
48
+ url = {http://arxiv.org/abs/1905.11946},
49
+ eprinttype = {arXiv},
50
+ eprint = {1905.11946},
51
+ timestamp = {Mon, 03 Jun 2019 13:42:33 +0200},
52
+ biburl = {https://dblp.org/rec/journals/corr/abs-1905-11946.bib},
53
+ bibsource = {dblp computer science bibliography, https://dblp.org}
54
+ }
55
+ '''
56
+
57
+
58
+ if __name__ == '__main__':
59
+ # Use this method to ensure the correctness of the BaseModel implementations.
60
+ # It executes a mock run of brain-score benchmarks.
61
+ check_models.check_base_models(__name__)
@@ -0,0 +1 @@
1
+ {"IT": "_blocks.38", "V1": "_blocks.11", "V4": "_blocks.28", "V2": "_blocks.18"}
@@ -0,0 +1 @@
1
+ transformers
@@ -0,0 +1,9 @@
1
+ import brainscore_vision
2
+ import pytest
3
+
4
+
5
+
6
+ @pytest.mark.travis_slow
7
+ def test_has_identifier():
8
+ model = brainscore_vision.load_model('efficientnet-b7')
9
+ assert model.identifier == 'efficientnet-b7'
@@ -65,9 +65,9 @@ def get_model(name):
65
65
  assert name == 'effnetb1_cutmix_augmix_sam_e1_5avg_424x377'
66
66
  model_tf_efficientnet_b1_ns = EffNetBX()
67
67
 
68
- weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models",
68
+ weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
69
69
  relative_path="effnetb1_cutmix_augmix_sam_e1_5avg_424x377/weights1_5_avg.pth",
70
- version_id="EqB6P7BittVdkgRd3oMncq_j9AAdiYvz",
70
+ version_id="null",
71
71
  sha1="871bd10e6ce164bfe8f3ce10bb77a69d326d7b65")
72
72
  model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))["model"])
73
73
  model = model_tf_efficientnet_b1_ns.efnet_model
@@ -1,142 +1,142 @@
1
- import functools
2
-
3
- import torch
4
- from brainscore_vision.model_helpers.activations import PytorchWrapper
5
- from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
6
- from brainscore_vision.model_helpers.s3 import load_weight_file
7
- from PIL import Image
8
- import numpy as np
9
- import timm
10
- from timm.data import resolve_data_config
11
- from timm.data.transforms_factory import create_transform
12
- import torch.nn as nn
13
- from albumentations import (
14
- Compose, Normalize, Resize,CenterCrop
15
- )
16
- from albumentations.pytorch import ToTensorV2
17
- # This is an example implementation for submitting alexnet as a pytorch model
18
- # If you use pytorch, don't forget to add it to the setup.py
19
-
20
- # Attention: It is important, that the wrapper identifier is unique per model!
21
- # The results will otherwise be the same due to brain-scores internal result caching mechanism.
22
- # Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
23
- # If the model requires a GPU, contact the brain-score team directly.
24
- from brainscore_vision.model_helpers.check_submission import check_models
25
-
26
- import os
27
-
28
- image_resize = 324
29
- image_crop = 288
30
- norm_mean = [0.485, 0.456, 0.406]
31
- norm_std = [0.229, 0.224, 0.225]
32
- freeze_layers = ['blocks.0.0', 'blocks.0.1', 'blocks.1.0',
33
- 'blocks.1.1', 'blocks.1.2', 'blocks.2.0',
34
- 'blocks.2.1', 'blocks.2.2', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2']
35
-
36
- def custom_image_preprocess(images, **kwargs):
37
-
38
- transforms_val = Compose([
39
- Resize(image_resize, image_resize),
40
- CenterCrop(image_crop, image_crop),
41
- Normalize(mean=norm_mean,std=norm_std,),
42
- ToTensorV2()])
43
-
44
- images = [np.array(pillow_image) for pillow_image in images]
45
- images = [transforms_val(image=image)["image"] for image in images]
46
- images = np.stack(images)
47
-
48
- return images
49
-
50
- def load_preprocess_images_custom(image_filepaths, preprocess_images=custom_image_preprocess, **kwargs):
51
- images = [load_image(image_filepath) for image_filepath in image_filepaths]
52
- images = preprocess_images(images, **kwargs)
53
- return images
54
-
55
- def load_image(image_filepath):
56
- with Image.open(image_filepath) as pil_image:
57
- if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper()\
58
- and 'P' not in pil_image.mode.upper(): # not binary and not alpha and not palletized
59
- # work around to https://github.com/python-pillow/Pillow/issues/1144,
60
- # see https://stackoverflow.com/a/30376272/2225200
61
- return pil_image.copy()
62
- else: # make sure potential binary images are in RGB
63
- rgb_image = Image.new("RGB", pil_image.size)
64
- rgb_image.paste(pil_image)
65
- return rgb_image
66
-
67
- class EffNetBX(nn.Module):
68
- def __init__(self,):
69
- super().__init__ ()
70
- self.efnet_model = timm.create_model('tf_efficientnet_b1_ns', pretrained=True)
71
-
72
- def forward(self, x):
73
- x = self.efnet_model(x)
74
- return x
75
-
76
- def get_model(name):
77
- assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288'
78
- model_tf_efficientnet_b1_ns= EffNetBX()
79
-
80
- weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models",
81
- relative_path="effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/tf_efficientnet_b1_ns_robust_cutmixpatchresize_SAM_e6e8e9e10.pth",
82
- version_id="prSgvyJFh_c7OKQODIEqU_c_hg_YXh5M",
83
- sha1="9d60e49043b2d5354447c46cd011764cc6cf094e")
84
- model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))["model"])
85
- model = model_tf_efficientnet_b1_ns.efnet_model
86
- filter_elems = set(["se", "act", "bn", "conv"])
87
- layer_list = [layer for layer, _ in model.named_modules() if not any(i in layer for i in filter_elems)]
88
- print(layer_list)
89
- print(len(layer_list))
90
-
91
- for n, m in model.named_modules():
92
- if isinstance(m, nn.BatchNorm2d) and any(x in n for x in ["conv_stem" ] + freeze_layers) or n =="bn1":
93
- print(f"Freeze {n, m}")
94
- m.eval()
95
-
96
-
97
- preprocessing = functools.partial(load_preprocess_images_custom,
98
- preprocess_images=custom_image_preprocess,
99
- )
100
-
101
-
102
- wrapper = PytorchWrapper(identifier='my-model', model=model, preprocessing=preprocessing, batch_size=8)
103
-
104
- wrapper.image_size = image_crop
105
- return wrapper
106
-
107
-
108
- def get_layers(name):
109
- assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288'
110
- return ['blocks', 'blocks.0', 'blocks.0.0', 'blocks.0.1',
111
- 'blocks.1', 'blocks.1.0', 'blocks.1.1', 'blocks.1.2',
112
- 'blocks.2', 'blocks.2.0', 'blocks.2.1', 'blocks.2.2',
113
- 'blocks.3', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2', 'blocks.3.3',
114
- 'blocks.4', 'blocks.4.0',
115
- 'blocks.4.0.conv_pw', 'blocks.4.0.conv_dw', 'blocks.4.0.conv_pwl', 'blocks.4.1', 'blocks.4.1.conv_pw', 'blocks.4.1.conv_dw', 'blocks.4.1.conv_pwl', 'blocks.4.2',
116
- 'blocks.4.2.conv_pw', 'blocks.4.2.conv_dw', 'blocks.4.2.conv_pwl', 'blocks.4.3', 'blocks.4.3.conv_pw', 'blocks.4.3.conv_dw', 'blocks.4.3.conv_pwl', 'blocks.5',
117
- 'blocks.5.0', 'blocks.5.0.conv_pw', 'blocks.5.0.conv_dw', 'blocks.5.0.conv_pwl', 'blocks.5.1', 'blocks.5.1.conv_pw', 'blocks.5.1.conv_dw', 'blocks.5.1.conv_pwl',
118
- 'blocks.5.2', 'blocks.5.2.conv_pw', 'blocks.5.2.conv_dw', 'blocks.5.2.conv_pwl', 'blocks.5.3', 'blocks.5.3.conv_pw', 'blocks.5.3.conv_dw', 'blocks.5.3.conv_pwl',
119
- 'blocks.5.4', 'blocks.5.4.conv_pw', 'blocks.5.4.conv_dw', 'blocks.5.4.conv_pwl', 'blocks.6', 'blocks.6.0', 'blocks.6.0.conv_pw', 'blocks.6.0.conv_dw',
120
- 'blocks.6.0.conv_pwl', 'blocks.6.1', 'blocks.6.1.conv_pw', 'blocks.6.1.conv_dw', 'blocks.6.1.conv_pwl',
121
- 'global_pool', 'global_pool.flatten', 'global_pool.pool']
122
-
123
- def get_bibtex(model_identifier):
124
- return """@InProceedings{pmlr-v97-tan19a,
125
- title = {{E}fficient{N}et: Rethinking Model Scaling for Convolutional Neural Networks},
126
- author = {Tan, Mingxing and Le, Quoc},
127
- booktitle = {Proceedings of the 36th International Conference on Machine Learning},
128
- pages = {6105--6114},
129
- year = {2019},
130
- editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan},
131
- volume = {97},
132
- series = {Proceedings of Machine Learning Research},
133
- month = {09--15 Jun},
134
- publisher = {PMLR},
135
- pdf = {http://proceedings.mlr.press/v97/tan19a/tan19a.pdf},
136
- url = {https://proceedings.mlr.press/v97/tan19a.html},
137
- abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are given. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves stateof-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet (Huang et al., 2018). Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flower (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.}
138
- }"""
139
-
140
-
141
- if __name__ == '__main__':
142
- check_models.check_base_models(__name__)
1
+ import functools
2
+
3
+ import torch
4
+ from brainscore_vision.model_helpers.activations import PytorchWrapper
5
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
6
+ from brainscore_vision.model_helpers.s3 import load_weight_file
7
+ from PIL import Image
8
+ import numpy as np
9
+ import timm
10
+ from timm.data import resolve_data_config
11
+ from timm.data.transforms_factory import create_transform
12
+ import torch.nn as nn
13
+ from albumentations import (
14
+ Compose, Normalize, Resize,CenterCrop
15
+ )
16
+ from albumentations.pytorch import ToTensorV2
17
+ # This is an example implementation for submitting alexnet as a pytorch model
18
+ # If you use pytorch, don't forget to add it to the setup.py
19
+
20
+ # Attention: It is important, that the wrapper identifier is unique per model!
21
+ # The results will otherwise be the same due to brain-scores internal result caching mechanism.
22
+ # Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
23
+ # If the model requires a GPU, contact the brain-score team directly.
24
+ from brainscore_vision.model_helpers.check_submission import check_models
25
+
26
+ import os
27
+
28
+ image_resize = 324
29
+ image_crop = 288
30
+ norm_mean = [0.485, 0.456, 0.406]
31
+ norm_std = [0.229, 0.224, 0.225]
32
+ freeze_layers = ['blocks.0.0', 'blocks.0.1', 'blocks.1.0',
33
+ 'blocks.1.1', 'blocks.1.2', 'blocks.2.0',
34
+ 'blocks.2.1', 'blocks.2.2', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2']
35
+
36
+ def custom_image_preprocess(images, **kwargs):
37
+
38
+ transforms_val = Compose([
39
+ Resize(image_resize, image_resize),
40
+ CenterCrop(image_crop, image_crop),
41
+ Normalize(mean=norm_mean,std=norm_std,),
42
+ ToTensorV2()])
43
+
44
+ images = [np.array(pillow_image) for pillow_image in images]
45
+ images = [transforms_val(image=image)["image"] for image in images]
46
+ images = np.stack(images)
47
+
48
+ return images
49
+
50
+ def load_preprocess_images_custom(image_filepaths, preprocess_images=custom_image_preprocess, **kwargs):
51
+ images = [load_image(image_filepath) for image_filepath in image_filepaths]
52
+ images = preprocess_images(images, **kwargs)
53
+ return images
54
+
55
+ def load_image(image_filepath):
56
+ with Image.open(image_filepath) as pil_image:
57
+ if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper()\
58
+ and 'P' not in pil_image.mode.upper(): # not binary and not alpha and not palletized
59
+ # work around to https://github.com/python-pillow/Pillow/issues/1144,
60
+ # see https://stackoverflow.com/a/30376272/2225200
61
+ return pil_image.copy()
62
+ else: # make sure potential binary images are in RGB
63
+ rgb_image = Image.new("RGB", pil_image.size)
64
+ rgb_image.paste(pil_image)
65
+ return rgb_image
66
+
67
+ class EffNetBX(nn.Module):
68
+ def __init__(self,):
69
+ super().__init__ ()
70
+ self.efnet_model = timm.create_model('tf_efficientnet_b1_ns', pretrained=True)
71
+
72
+ def forward(self, x):
73
+ x = self.efnet_model(x)
74
+ return x
75
+
76
+ def get_model(name):
77
+ assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288'
78
+ model_tf_efficientnet_b1_ns= EffNetBX()
79
+
80
+ weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
81
+ relative_path="effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/tf_efficientnet_b1_ns_robust_cutmixpatchresize_SAM_e6e8e9e10.pth",
82
+ version_id="null",
83
+ sha1="9d60e49043b2d5354447c46cd011764cc6cf094e")
84
+ model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))["model"])
85
+ model = model_tf_efficientnet_b1_ns.efnet_model
86
+ filter_elems = set(["se", "act", "bn", "conv"])
87
+ layer_list = [layer for layer, _ in model.named_modules() if not any(i in layer for i in filter_elems)]
88
+ print(layer_list)
89
+ print(len(layer_list))
90
+
91
+ for n, m in model.named_modules():
92
+ if isinstance(m, nn.BatchNorm2d) and any(x in n for x in ["conv_stem" ] + freeze_layers) or n =="bn1":
93
+ print(f"Freeze {n, m}")
94
+ m.eval()
95
+
96
+
97
+ preprocessing = functools.partial(load_preprocess_images_custom,
98
+ preprocess_images=custom_image_preprocess,
99
+ )
100
+
101
+
102
+ wrapper = PytorchWrapper(identifier='my-model', model=model, preprocessing=preprocessing, batch_size=8)
103
+
104
+ wrapper.image_size = image_crop
105
+ return wrapper
106
+
107
+
108
+ def get_layers(name):
109
+ assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288'
110
+ return ['blocks', 'blocks.0', 'blocks.0.0', 'blocks.0.1',
111
+ 'blocks.1', 'blocks.1.0', 'blocks.1.1', 'blocks.1.2',
112
+ 'blocks.2', 'blocks.2.0', 'blocks.2.1', 'blocks.2.2',
113
+ 'blocks.3', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2', 'blocks.3.3',
114
+ 'blocks.4', 'blocks.4.0',
115
+ 'blocks.4.0.conv_pw', 'blocks.4.0.conv_dw', 'blocks.4.0.conv_pwl', 'blocks.4.1', 'blocks.4.1.conv_pw', 'blocks.4.1.conv_dw', 'blocks.4.1.conv_pwl', 'blocks.4.2',
116
+ 'blocks.4.2.conv_pw', 'blocks.4.2.conv_dw', 'blocks.4.2.conv_pwl', 'blocks.4.3', 'blocks.4.3.conv_pw', 'blocks.4.3.conv_dw', 'blocks.4.3.conv_pwl', 'blocks.5',
117
+ 'blocks.5.0', 'blocks.5.0.conv_pw', 'blocks.5.0.conv_dw', 'blocks.5.0.conv_pwl', 'blocks.5.1', 'blocks.5.1.conv_pw', 'blocks.5.1.conv_dw', 'blocks.5.1.conv_pwl',
118
+ 'blocks.5.2', 'blocks.5.2.conv_pw', 'blocks.5.2.conv_dw', 'blocks.5.2.conv_pwl', 'blocks.5.3', 'blocks.5.3.conv_pw', 'blocks.5.3.conv_dw', 'blocks.5.3.conv_pwl',
119
+ 'blocks.5.4', 'blocks.5.4.conv_pw', 'blocks.5.4.conv_dw', 'blocks.5.4.conv_pwl', 'blocks.6', 'blocks.6.0', 'blocks.6.0.conv_pw', 'blocks.6.0.conv_dw',
120
+ 'blocks.6.0.conv_pwl', 'blocks.6.1', 'blocks.6.1.conv_pw', 'blocks.6.1.conv_dw', 'blocks.6.1.conv_pwl',
121
+ 'global_pool', 'global_pool.flatten', 'global_pool.pool']
122
+
123
+ def get_bibtex(model_identifier):
124
+ return """@InProceedings{pmlr-v97-tan19a,
125
+ title = {{E}fficient{N}et: Rethinking Model Scaling for Convolutional Neural Networks},
126
+ author = {Tan, Mingxing and Le, Quoc},
127
+ booktitle = {Proceedings of the 36th International Conference on Machine Learning},
128
+ pages = {6105--6114},
129
+ year = {2019},
130
+ editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan},
131
+ volume = {97},
132
+ series = {Proceedings of Machine Learning Research},
133
+ month = {09--15 Jun},
134
+ publisher = {PMLR},
135
+ pdf = {http://proceedings.mlr.press/v97/tan19a/tan19a.pdf},
136
+ url = {https://proceedings.mlr.press/v97/tan19a.html},
137
+ abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are given. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves stateof-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet (Huang et al., 2018). Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flower (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.}
138
+ }"""
139
+
140
+
141
+ if __name__ == '__main__':
142
+ check_models.check_base_models(__name__)
@@ -70,9 +70,9 @@ class EffNetBX(nn.Module):
70
70
  def get_model(name):
71
71
  assert name == 'effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288'
72
72
  model_tf_efficientnet_b1_ns = EffNetBX()
73
- weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models",
73
+ weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
74
74
  relative_path="effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/tf_efficientnet_b1_ns_robust_cutmixpatchresize_augmix_e4toe7.pth",
75
- version_id="iB0UqbguDpYHD0HRbMt1F1er3c414yWr",
75
+ version_id="null",
76
76
  sha1="37f3ac1b14e80cfaa99fa5f412c1e132480ed5b6")
77
77
 
78
78
  model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path,map_location=torch.device('cpu'))["model"])