brainscore-vision 2.2.3__py3-none-any.whl → 2.2.5__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (722) hide show
  1. brainscore_vision/data/baker2022/__init__.py +10 -10
  2. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +2 -2
  3. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +2 -2
  4. brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +2 -2
  5. brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +2 -2
  6. brainscore_vision/data/barbumayo2019/__init__.py +3 -3
  7. brainscore_vision/data/bashivankar2019/__init__.py +10 -10
  8. brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +2 -2
  9. brainscore_vision/data/bmd2024/__init__.py +20 -20
  10. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +2 -1
  11. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +2 -1
  12. brainscore_vision/data/bracci2019/__init__.py +5 -5
  13. brainscore_vision/data/bracci2019/data_packaging.py +1 -1
  14. brainscore_vision/data/cadena2017/__init__.py +5 -5
  15. brainscore_vision/data/cichy2019/__init__.py +5 -5
  16. brainscore_vision/data/coggan2024_behavior/__init__.py +8 -8
  17. brainscore_vision/data/coggan2024_behavior/data_packaging.py +2 -2
  18. brainscore_vision/data/coggan2024_fMRI/__init__.py +5 -6
  19. brainscore_vision/data/coggan2024_fMRI/data_packaging.py +2 -2
  20. brainscore_vision/data/david2004/__init__.py +5 -5
  21. brainscore_vision/data/deng2009/__init__.py +3 -3
  22. brainscore_vision/data/ferguson2024/__init__.py +112 -112
  23. brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +2 -2
  24. brainscore_vision/data/freemanziemba2013/__init__.py +31 -30
  25. brainscore_vision/data/geirhos2021/__init__.py +85 -85
  26. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +2 -2
  27. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +2 -2
  28. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +1 -1
  29. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +1 -1
  30. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +1 -1
  31. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +1 -1
  32. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +2 -2
  33. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +2 -2
  34. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +1 -1
  35. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +1 -1
  36. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +1 -1
  37. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +1 -1
  38. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +1 -1
  39. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +1 -1
  40. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +1 -1
  41. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +1 -1
  42. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +1 -1
  43. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +1 -1
  44. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +1 -1
  45. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +1 -1
  46. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +1 -1
  47. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +1 -1
  48. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +1 -1
  49. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +1 -1
  50. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +1 -1
  51. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +1 -1
  52. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +1 -1
  53. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +1 -1
  54. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +1 -1
  55. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +1 -1
  56. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +1 -1
  57. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +1 -1
  58. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +1 -1
  59. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +1 -1
  60. brainscore_vision/data/hebart2023/__init__.py +5 -5
  61. brainscore_vision/data/hebart2023/packaging/data_assembly.py +2 -2
  62. brainscore_vision/data/hebart2023/packaging/stimulus_set.py +1 -1
  63. brainscore_vision/data/hendrycks2019/__init__.py +12 -12
  64. brainscore_vision/data/igustibagus2024/__init__.py +5 -5
  65. brainscore_vision/data/imagenetslim15000/__init__.py +3 -3
  66. brainscore_vision/data/islam2021/__init__.py +3 -3
  67. brainscore_vision/data/kar2018/__init__.py +7 -7
  68. brainscore_vision/data/kar2019/__init__.py +5 -5
  69. brainscore_vision/data/kuzovkin2018/__init__.py +5 -5
  70. brainscore_vision/data/lonnqvist2024/__init__.py +12 -12
  71. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +1 -1
  72. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +1 -1
  73. brainscore_vision/data/majajhong2015/__init__.py +23 -23
  74. brainscore_vision/data/malania2007/__init__.py +77 -77
  75. brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +1 -1
  76. brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +1 -1
  77. brainscore_vision/data/maniquet2024/__init__.py +11 -11
  78. brainscore_vision/data/marques2020/__init__.py +30 -30
  79. brainscore_vision/data/rajalingham2018/__init__.py +10 -10
  80. brainscore_vision/data/rajalingham2020/__init__.py +5 -5
  81. brainscore_vision/data/rust2012/__init__.py +7 -7
  82. brainscore_vision/data/sanghavi2020/__init__.py +19 -19
  83. brainscore_vision/data/scialom2024/__init__.py +110 -110
  84. brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +1 -1
  85. brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +1 -1
  86. brainscore_vision/data/seibert2019/__init__.py +2 -2
  87. brainscore_vision/data/zhang2018/__init__.py +5 -5
  88. brainscore_vision/data_helpers/s3.py +25 -6
  89. brainscore_vision/model_helpers/activations/pytorch.py +34 -12
  90. brainscore_vision/models/AT_efficientnet_b2/__init__.py +7 -0
  91. brainscore_vision/models/AT_efficientnet_b2/model.py +58 -0
  92. brainscore_vision/models/AT_efficientnet_b2/region_layer_map/AT_efficientnet-b2.json +6 -0
  93. brainscore_vision/models/AT_efficientnet_b2/requirements.txt +1 -0
  94. brainscore_vision/models/AT_efficientnet_b2/test.py +8 -0
  95. brainscore_vision/models/AdvProp_efficientnet_b2/__init__.py +7 -0
  96. brainscore_vision/models/AdvProp_efficientnet_b2/model.py +64 -0
  97. brainscore_vision/models/AdvProp_efficientnet_b2/region_layer_map/AdvProp_efficientnet-b2.json +1 -0
  98. brainscore_vision/models/AdvProp_efficientnet_b2/requirements.txt +1 -0
  99. brainscore_vision/models/AdvProp_efficientnet_b2/test.py +8 -0
  100. brainscore_vision/models/AdvProp_efficientnet_b4/__init__.py +5 -0
  101. brainscore_vision/models/AdvProp_efficientnet_b4/model.py +65 -0
  102. brainscore_vision/models/AdvProp_efficientnet_b4/region_layer_map/AdvProp_efficientnet-b4.json +1 -0
  103. brainscore_vision/models/AdvProp_efficientnet_b4/requirements.txt +1 -0
  104. brainscore_vision/models/AdvProp_efficientnet_b4/test.py +8 -0
  105. brainscore_vision/models/AdvProp_efficientnet_b7/__init__.py +5 -0
  106. brainscore_vision/models/AdvProp_efficientnet_b7/model.py +65 -0
  107. brainscore_vision/models/AdvProp_efficientnet_b7/region_layer_map/AdvProp_efficientnet-b7.json +1 -0
  108. brainscore_vision/models/AdvProp_efficientnet_b7/requirements.txt +1 -0
  109. brainscore_vision/models/AdvProp_efficientnet_b7/test.py +8 -0
  110. brainscore_vision/models/AdvProp_efficientnet_b8/__init__.py +7 -0
  111. brainscore_vision/models/AdvProp_efficientnet_b8/model.py +65 -0
  112. brainscore_vision/models/AdvProp_efficientnet_b8/region_layer_map/AdvProp_efficientnet-b8.json +1 -0
  113. brainscore_vision/models/AdvProp_efficientnet_b8/requirements.txt +1 -0
  114. brainscore_vision/models/AdvProp_efficientnet_b8/test.py +8 -0
  115. brainscore_vision/models/BiT_S_R101x1/__init__.py +7 -0
  116. brainscore_vision/models/BiT_S_R101x1/model.py +223 -0
  117. brainscore_vision/models/BiT_S_R101x1/region_layer_map/BiT-S-R101x1.json +1 -0
  118. brainscore_vision/models/BiT_S_R101x1/requirements.txt +4 -0
  119. brainscore_vision/models/BiT_S_R101x1/test.py +8 -0
  120. brainscore_vision/models/BiT_S_R101x3/__init__.py +7 -0
  121. brainscore_vision/models/BiT_S_R101x3/model.py +225 -0
  122. brainscore_vision/models/BiT_S_R101x3/region_layer_map/BiT-S-R101x3.json +1 -0
  123. brainscore_vision/models/BiT_S_R101x3/requirements.txt +4 -0
  124. brainscore_vision/models/BiT_S_R101x3/test.py +8 -0
  125. brainscore_vision/models/BiT_S_R152x2/__init__.py +7 -0
  126. brainscore_vision/models/BiT_S_R152x2/model.py +231 -0
  127. brainscore_vision/models/BiT_S_R152x2/region_layer_map/BiT-S-R152x2.json +1 -0
  128. brainscore_vision/models/BiT_S_R152x2/requirements.txt +4 -0
  129. brainscore_vision/models/BiT_S_R152x2/test.py +8 -0
  130. brainscore_vision/models/BiT_S_R152x4/__init__.py +7 -0
  131. brainscore_vision/models/BiT_S_R152x4/model.py +231 -0
  132. brainscore_vision/models/BiT_S_R152x4/region_layer_map/BiT-S-R152x4.json +1 -0
  133. brainscore_vision/models/BiT_S_R152x4/requirements.txt +4 -0
  134. brainscore_vision/models/BiT_S_R152x4/test.py +8 -0
  135. brainscore_vision/models/BiT_S_R50x1/__init__.py +7 -0
  136. brainscore_vision/models/BiT_S_R50x1/model.py +218 -0
  137. brainscore_vision/models/BiT_S_R50x1/region_layer_map/BiT-S-R50x1.json +1 -0
  138. brainscore_vision/models/BiT_S_R50x1/requirements.txt +4 -0
  139. brainscore_vision/models/BiT_S_R50x1/test.py +8 -0
  140. brainscore_vision/models/BiT_S_R50x3/__init__.py +7 -0
  141. brainscore_vision/models/BiT_S_R50x3/model.py +217 -0
  142. brainscore_vision/models/BiT_S_R50x3/region_layer_map/BiT-S-R50x3.json +1 -0
  143. brainscore_vision/models/BiT_S_R50x3/requirements.txt +4 -0
  144. brainscore_vision/models/BiT_S_R50x3/test.py +8 -0
  145. brainscore_vision/models/ReAlnet/__init__.py +64 -0
  146. brainscore_vision/models/ReAlnet/model.py +237 -0
  147. brainscore_vision/models/ReAlnet/requirements.txt +7 -0
  148. brainscore_vision/models/ReAlnet/test.py +0 -0
  149. brainscore_vision/models/ReAlnet/weights.json +26 -0
  150. brainscore_vision/models/ReAlnet_cornet/__init__.py +46 -0
  151. brainscore_vision/models/ReAlnet_cornet/helpers/helpers.py +215 -0
  152. brainscore_vision/models/ReAlnet_cornet/model.py +69 -0
  153. brainscore_vision/models/ReAlnet_cornet/requirements.txt +8 -0
  154. brainscore_vision/models/ReAlnet_cornet/test.py +0 -0
  155. brainscore_vision/models/Res2Net50_26w_4s/__init__.py +5 -0
  156. brainscore_vision/models/Res2Net50_26w_4s/helpers/resnet_helpers.py +161 -0
  157. brainscore_vision/models/Res2Net50_26w_4s/model.py +75 -0
  158. brainscore_vision/models/Res2Net50_26w_4s/region_layer_map/Res2Net50_26w_4s.json +1 -0
  159. brainscore_vision/models/Res2Net50_26w_4s/requirements.txt +1 -0
  160. brainscore_vision/models/Res2Net50_26w_4s/test.py +8 -0
  161. brainscore_vision/models/VOneCORnet_S/__init__.py +9 -0
  162. brainscore_vision/models/VOneCORnet_S/helpers/cornet_helpers.py +34 -0
  163. brainscore_vision/models/VOneCORnet_S/helpers/cornet_s_helpers.py +128 -0
  164. brainscore_vision/models/VOneCORnet_S/helpers/cornets.py +136 -0
  165. brainscore_vision/models/VOneCORnet_S/helpers/vonecornets.py +38 -0
  166. brainscore_vision/models/VOneCORnet_S/model.py +25 -0
  167. brainscore_vision/models/VOneCORnet_S/requirements.txt +1 -0
  168. brainscore_vision/models/VOneCORnet_S/test.py +8 -0
  169. brainscore_vision/models/alexnet_training_seed_01/__init__.py +6 -0
  170. brainscore_vision/models/alexnet_training_seed_01/model.py +140 -0
  171. brainscore_vision/models/alexnet_training_seed_01/region_layer_map/alexnet_training_seed_01.json +6 -0
  172. brainscore_vision/models/alexnet_training_seed_01/requirements.txt +3 -0
  173. brainscore_vision/models/alexnet_training_seed_01/test.py +9 -0
  174. brainscore_vision/models/alexnet_training_seed_02/__init__.py +6 -0
  175. brainscore_vision/models/alexnet_training_seed_02/model.py +140 -0
  176. brainscore_vision/models/alexnet_training_seed_02/region_layer_map/alexnet_training_seed_02.json +6 -0
  177. brainscore_vision/models/alexnet_training_seed_02/requirements.txt +3 -0
  178. brainscore_vision/models/alexnet_training_seed_02/test.py +9 -0
  179. brainscore_vision/models/alexnet_training_seed_03/__init__.py +6 -0
  180. brainscore_vision/models/alexnet_training_seed_03/model.py +140 -0
  181. brainscore_vision/models/alexnet_training_seed_03/region_layer_map/alexnet_training_seed_03.json +6 -0
  182. brainscore_vision/models/alexnet_training_seed_03/requirements.txt +3 -0
  183. brainscore_vision/models/alexnet_training_seed_03/test.py +9 -0
  184. brainscore_vision/models/alexnet_training_seed_04/__init__.py +6 -0
  185. brainscore_vision/models/alexnet_training_seed_04/model.py +140 -0
  186. brainscore_vision/models/alexnet_training_seed_04/region_layer_map/alexnet_training_seed_04.json +6 -0
  187. brainscore_vision/models/alexnet_training_seed_04/requirements.txt +3 -0
  188. brainscore_vision/models/alexnet_training_seed_04/test.py +9 -0
  189. brainscore_vision/models/alexnet_training_seed_05/__init__.py +6 -0
  190. brainscore_vision/models/alexnet_training_seed_05/model.py +140 -0
  191. brainscore_vision/models/alexnet_training_seed_05/region_layer_map/alexnet_training_seed_05.json +6 -0
  192. brainscore_vision/models/alexnet_training_seed_05/requirements.txt +3 -0
  193. brainscore_vision/models/alexnet_training_seed_05/test.py +9 -0
  194. brainscore_vision/models/alexnet_training_seed_06/__init__.py +6 -0
  195. brainscore_vision/models/alexnet_training_seed_06/model.py +140 -0
  196. brainscore_vision/models/alexnet_training_seed_06/region_layer_map/alexnet_training_seed_06.json +6 -0
  197. brainscore_vision/models/alexnet_training_seed_06/requirements.txt +3 -0
  198. brainscore_vision/models/alexnet_training_seed_06/test.py +9 -0
  199. brainscore_vision/models/alexnet_training_seed_07/__init__.py +6 -0
  200. brainscore_vision/models/alexnet_training_seed_07/model.py +140 -0
  201. brainscore_vision/models/alexnet_training_seed_07/region_layer_map/alexnet_training_seed_07.json +6 -0
  202. brainscore_vision/models/alexnet_training_seed_07/requirements.txt +3 -0
  203. brainscore_vision/models/alexnet_training_seed_07/test.py +9 -0
  204. brainscore_vision/models/alexnet_training_seed_08/__init__.py +6 -0
  205. brainscore_vision/models/alexnet_training_seed_08/model.py +140 -0
  206. brainscore_vision/models/alexnet_training_seed_08/region_layer_map/alexnet_training_seed_08.json +6 -0
  207. brainscore_vision/models/alexnet_training_seed_08/requirements.txt +3 -0
  208. brainscore_vision/models/alexnet_training_seed_08/test.py +9 -0
  209. brainscore_vision/models/alexnet_training_seed_09/__init__.py +6 -0
  210. brainscore_vision/models/alexnet_training_seed_09/model.py +140 -0
  211. brainscore_vision/models/alexnet_training_seed_09/region_layer_map/alexnet_training_seed_09.json +6 -0
  212. brainscore_vision/models/alexnet_training_seed_09/requirements.txt +3 -0
  213. brainscore_vision/models/alexnet_training_seed_09/test.py +9 -0
  214. brainscore_vision/models/alexnet_training_seed_10/__init__.py +6 -0
  215. brainscore_vision/models/alexnet_training_seed_10/model.py +140 -0
  216. brainscore_vision/models/alexnet_training_seed_10/region_layer_map/alexnet_training_seed_10.json +6 -0
  217. brainscore_vision/models/alexnet_training_seed_10/requirements.txt +3 -0
  218. brainscore_vision/models/alexnet_training_seed_10/test.py +9 -0
  219. brainscore_vision/models/antialiased-r50/__init__.py +7 -0
  220. brainscore_vision/models/antialiased-r50/model.py +62 -0
  221. brainscore_vision/models/antialiased-r50/region_layer_map/antialiased-r50.json +1 -0
  222. brainscore_vision/models/antialiased-r50/requirements.txt +3 -0
  223. brainscore_vision/models/antialiased-r50/test.py +8 -0
  224. brainscore_vision/models/convnext_tiny_sup/__init__.py +8 -0
  225. brainscore_vision/models/convnext_tiny_sup/model.py +56 -0
  226. brainscore_vision/models/convnext_tiny_sup/region_layer_map/convnext_tiny_sup.json +1 -0
  227. brainscore_vision/models/convnext_tiny_sup/requirements.txt +1 -0
  228. brainscore_vision/models/convnext_tiny_sup/test.py +8 -0
  229. brainscore_vision/models/cornet_s/model.py +2 -2
  230. brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +2 -2
  231. brainscore_vision/models/densenet_121/__init__.py +7 -0
  232. brainscore_vision/models/densenet_121/model.py +63 -0
  233. brainscore_vision/models/densenet_121/region_layer_map/densenet-121.json +1 -0
  234. brainscore_vision/models/densenet_121/requirements.txt +1 -0
  235. brainscore_vision/models/densenet_121/test.py +8 -0
  236. brainscore_vision/models/densenet_169/__init__.py +7 -0
  237. brainscore_vision/models/densenet_169/model.py +63 -0
  238. brainscore_vision/models/densenet_169/region_layer_map/densenet-169.json +1 -0
  239. brainscore_vision/models/densenet_169/requirements.txt +1 -0
  240. brainscore_vision/models/densenet_169/test.py +9 -0
  241. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/__init__.py +3 -3
  242. brainscore_vision/models/{densenet_201_pytorch → densenet_201}/model.py +12 -10
  243. brainscore_vision/models/densenet_201/region_layer_map/densenet-201.json +6 -0
  244. brainscore_vision/models/densenet_201/test.py +8 -0
  245. brainscore_vision/models/efficientnet_b0/__init__.py +7 -0
  246. brainscore_vision/models/efficientnet_b0/model.py +45 -0
  247. brainscore_vision/models/efficientnet_b0/region_layer_map/efficientnet_b0.json +1 -0
  248. brainscore_vision/models/efficientnet_b0/requirements.txt +2 -0
  249. brainscore_vision/models/efficientnet_b0/test.py +8 -0
  250. brainscore_vision/models/efficientnet_b7/__init__.py +7 -0
  251. brainscore_vision/models/efficientnet_b7/model.py +61 -0
  252. brainscore_vision/models/efficientnet_b7/region_layer_map/efficientnet-b7.json +1 -0
  253. brainscore_vision/models/efficientnet_b7/requirements.txt +1 -0
  254. brainscore_vision/models/efficientnet_b7/test.py +9 -0
  255. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +2 -2
  256. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -142
  257. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +2 -2
  258. brainscore_vision/models/evresnet_50_1/__init__.py +12 -0
  259. brainscore_vision/models/evresnet_50_1/evnet/backends.py +109 -0
  260. brainscore_vision/models/evresnet_50_1/evnet/evnet.py +147 -0
  261. brainscore_vision/models/evresnet_50_1/evnet/modules.py +308 -0
  262. brainscore_vision/models/evresnet_50_1/evnet/params.py +326 -0
  263. brainscore_vision/models/evresnet_50_1/evnet/utils.py +142 -0
  264. brainscore_vision/models/evresnet_50_1/model.py +62 -0
  265. brainscore_vision/models/evresnet_50_1/requirements.txt +5 -0
  266. brainscore_vision/models/evresnet_50_1/test.py +8 -0
  267. brainscore_vision/models/evresnet_50_4/__init__.py +12 -0
  268. brainscore_vision/models/evresnet_50_4/evnet/backends.py +109 -0
  269. brainscore_vision/models/evresnet_50_4/evnet/evnet.py +147 -0
  270. brainscore_vision/models/evresnet_50_4/evnet/modules.py +308 -0
  271. brainscore_vision/models/evresnet_50_4/evnet/params.py +326 -0
  272. brainscore_vision/models/evresnet_50_4/evnet/utils.py +142 -0
  273. brainscore_vision/models/evresnet_50_4/model.py +67 -0
  274. brainscore_vision/models/evresnet_50_4/requirements.txt +4 -0
  275. brainscore_vision/models/evresnet_50_4/test.py +8 -0
  276. brainscore_vision/models/evresnet_50_4_no_mapping/__init__.py +10 -0
  277. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/backends.py +109 -0
  278. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/evnet.py +147 -0
  279. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +308 -0
  280. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/params.py +326 -0
  281. brainscore_vision/models/evresnet_50_4_no_mapping/evnet/utils.py +142 -0
  282. brainscore_vision/models/evresnet_50_4_no_mapping/model.py +67 -0
  283. brainscore_vision/models/evresnet_50_4_no_mapping/region_layer_map/evresnet_50_4_no_mapping.json +6 -0
  284. brainscore_vision/models/evresnet_50_4_no_mapping/requirements.txt +4 -0
  285. brainscore_vision/models/evresnet_50_4_no_mapping/test.py +8 -0
  286. brainscore_vision/models/grcnn/__init__.py +7 -0
  287. brainscore_vision/models/grcnn/helpers/helpers.py +236 -0
  288. brainscore_vision/models/grcnn/model.py +54 -0
  289. brainscore_vision/models/grcnn/region_layer_map/grcnn.json +1 -0
  290. brainscore_vision/models/grcnn/requirements.txt +2 -0
  291. brainscore_vision/models/grcnn/test.py +9 -0
  292. brainscore_vision/models/grcnn_109/__init__.py +5 -0
  293. brainscore_vision/models/grcnn_109/helpers/helpers.py +237 -0
  294. brainscore_vision/models/grcnn_109/model.py +53 -0
  295. brainscore_vision/models/grcnn_109/region_layer_map/grcnn_109.json +1 -0
  296. brainscore_vision/models/grcnn_109/requirements.txt +2 -0
  297. brainscore_vision/models/grcnn_109/test.py +9 -0
  298. brainscore_vision/models/hmax/model.py +2 -2
  299. brainscore_vision/models/imagenet_l2_3_0/__init__.py +9 -0
  300. brainscore_vision/models/imagenet_l2_3_0/model.py +101 -0
  301. brainscore_vision/models/imagenet_l2_3_0/region_layer_map/imagenet_l2_3_0.json +1 -0
  302. brainscore_vision/models/imagenet_l2_3_0/requirements.txt +2 -0
  303. brainscore_vision/models/imagenet_l2_3_0/test.py +8 -0
  304. brainscore_vision/models/inception_v1/__init__.py +7 -0
  305. brainscore_vision/models/inception_v1/model.py +67 -0
  306. brainscore_vision/models/inception_v1/requirements.txt +1 -0
  307. brainscore_vision/models/inception_v1/test.py +8 -0
  308. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/__init__.py +3 -3
  309. brainscore_vision/models/{inception_v3_pytorch → inception_v3}/model.py +10 -10
  310. brainscore_vision/models/inception_v3/region_layer_map/inception_v3.json +6 -0
  311. brainscore_vision/models/inception_v3/test.py +8 -0
  312. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/__init__.py +3 -3
  313. brainscore_vision/models/{inception_v4_pytorch → inception_v4}/model.py +8 -15
  314. brainscore_vision/models/inception_v4/region_layer_map/inception_v4.json +6 -0
  315. brainscore_vision/models/inception_v4/test.py +8 -0
  316. brainscore_vision/models/mobilenet_v2_0_5_192/__init__.py +7 -0
  317. brainscore_vision/models/mobilenet_v2_0_5_192/model.py +83 -0
  318. brainscore_vision/models/mobilenet_v2_0_5_192/region_layer_map/mobilenet_v2_0_5_192.json +6 -0
  319. brainscore_vision/models/mobilenet_v2_0_5_192/requirements.txt +2 -0
  320. brainscore_vision/models/mobilenet_v2_0_5_192/test.py +8 -0
  321. brainscore_vision/models/mobilenet_v2_0_5_224/__init__.py +7 -0
  322. brainscore_vision/models/mobilenet_v2_0_5_224/model.py +73 -0
  323. brainscore_vision/models/mobilenet_v2_0_5_224/region_layer_map/mobilenet_v2_0_5_224.json +6 -0
  324. brainscore_vision/models/mobilenet_v2_0_5_224/requirements.txt +2 -0
  325. brainscore_vision/models/mobilenet_v2_0_5_224/test.py +9 -0
  326. brainscore_vision/models/mobilenet_v2_0_75_160/__init__.py +7 -0
  327. brainscore_vision/models/mobilenet_v2_0_75_160/model.py +74 -0
  328. brainscore_vision/models/mobilenet_v2_0_75_160/region_layer_map/mobilenet_v2_0_75_160.json +6 -0
  329. brainscore_vision/models/mobilenet_v2_0_75_160/requirements.txt +2 -0
  330. brainscore_vision/models/mobilenet_v2_0_75_160/test.py +8 -0
  331. brainscore_vision/models/mobilenet_v2_0_75_192/__init__.py +7 -0
  332. brainscore_vision/models/mobilenet_v2_0_75_192/model.py +72 -0
  333. brainscore_vision/models/mobilenet_v2_0_75_192/region_layer_map/mobilenet_v2_0_75_192.json +6 -0
  334. brainscore_vision/models/mobilenet_v2_0_75_192/requirements.txt +2 -0
  335. brainscore_vision/models/mobilenet_v2_0_75_192/test.py +9 -0
  336. brainscore_vision/models/mobilenet_v2_0_75_224/__init__.py +7 -0
  337. brainscore_vision/models/mobilenet_v2_0_75_224/model.py +73 -0
  338. brainscore_vision/models/mobilenet_v2_0_75_224/region_layer_map/mobilenet_v2_0_75_224.json +6 -0
  339. brainscore_vision/models/mobilenet_v2_0_75_224/requirements.txt +2 -0
  340. brainscore_vision/models/mobilenet_v2_0_75_224/test.py +8 -0
  341. brainscore_vision/models/mobilenet_v2_1_0_128/__init__.py +7 -0
  342. brainscore_vision/models/mobilenet_v2_1_0_128/model.py +73 -0
  343. brainscore_vision/models/mobilenet_v2_1_0_128/region_layer_map/mobilenet_v2_1_0_128.json +6 -0
  344. brainscore_vision/models/mobilenet_v2_1_0_128/requirements.txt +2 -0
  345. brainscore_vision/models/mobilenet_v2_1_0_128/test.py +8 -0
  346. brainscore_vision/models/mobilenet_v2_1_0_160/__init__.py +7 -0
  347. brainscore_vision/models/mobilenet_v2_1_0_160/model.py +73 -0
  348. brainscore_vision/models/mobilenet_v2_1_0_160/region_layer_map/mobilenet_v2_1_0_160.json +6 -0
  349. brainscore_vision/models/mobilenet_v2_1_0_160/requirements.txt +2 -0
  350. brainscore_vision/models/mobilenet_v2_1_0_160/test.py +8 -0
  351. brainscore_vision/models/mobilenet_v2_1_0_192/__init__.py +7 -0
  352. brainscore_vision/models/mobilenet_v2_1_0_192/model.py +73 -0
  353. brainscore_vision/models/mobilenet_v2_1_0_192/region_layer_map/mobilenet_v2_1_0_192.json +6 -0
  354. brainscore_vision/models/mobilenet_v2_1_0_192/requirements.txt +2 -0
  355. brainscore_vision/models/mobilenet_v2_1_0_192/test.py +8 -0
  356. brainscore_vision/models/{pnasnet_large_pytorch → mobilenet_v2_1_0_224}/__init__.py +3 -3
  357. brainscore_vision/models/mobilenet_v2_1_0_224/model.py +60 -0
  358. brainscore_vision/models/mobilenet_v2_1_0_224/region_layer_map/mobilenet_v2_1_0_224.json +6 -0
  359. brainscore_vision/models/mobilenet_v2_1_0_224/test.py +8 -0
  360. brainscore_vision/models/mobilenet_v2_1_3_224/__init__.py +7 -0
  361. brainscore_vision/models/mobilenet_v2_1_3_224/model.py +73 -0
  362. brainscore_vision/models/mobilenet_v2_1_3_224/region_layer_map/mobilenet_v2_1_3_224.json +6 -0
  363. brainscore_vision/models/mobilenet_v2_1_3_224/requirements.txt +2 -0
  364. brainscore_vision/models/mobilenet_v2_1_3_224/test.py +8 -0
  365. brainscore_vision/models/mobilenet_v2_1_4_224/__init__.py +7 -0
  366. brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_4_224}/model.py +3 -3
  367. brainscore_vision/models/mobilenet_v2_1_4_224/region_layer_map/mobilenet_v2_1_4_224.json +6 -0
  368. brainscore_vision/models/mobilenet_v2_1_4_224/requirements.txt +3 -0
  369. brainscore_vision/models/mobilenet_v2_1_4_224/test.py +8 -0
  370. brainscore_vision/models/nasnet_large/__init__.py +7 -0
  371. brainscore_vision/models/nasnet_large/model.py +60 -0
  372. brainscore_vision/models/nasnet_large/region_layer_map/nasnet_large.json +6 -0
  373. brainscore_vision/models/nasnet_large/test.py +8 -0
  374. brainscore_vision/models/nasnet_mobile/__init__.py +7 -0
  375. brainscore_vision/models/nasnet_mobile/model.py +685 -0
  376. brainscore_vision/models/nasnet_mobile/region_layer_map/nasnet_mobile.json +6 -0
  377. brainscore_vision/models/nasnet_mobile/requirements.txt +1 -0
  378. brainscore_vision/models/nasnet_mobile/test.py +8 -0
  379. brainscore_vision/models/omnivore_swinB/__init__.py +7 -0
  380. brainscore_vision/models/omnivore_swinB/model.py +79 -0
  381. brainscore_vision/models/omnivore_swinB/region_layer_map/omnivore_swinB.json +1 -0
  382. brainscore_vision/models/omnivore_swinB/requirements.txt +5 -0
  383. brainscore_vision/models/omnivore_swinB/test.py +9 -0
  384. brainscore_vision/models/omnivore_swinS/__init__.py +7 -0
  385. brainscore_vision/models/omnivore_swinS/model.py +79 -0
  386. brainscore_vision/models/omnivore_swinS/region_layer_map/omnivore_swinS.json +1 -0
  387. brainscore_vision/models/omnivore_swinS/requirements.txt +7 -0
  388. brainscore_vision/models/omnivore_swinS/test.py +9 -0
  389. brainscore_vision/models/pnasnet_large/__init__.py +7 -0
  390. brainscore_vision/models/{pnasnet_large_pytorch → pnasnet_large}/model.py +6 -10
  391. brainscore_vision/models/pnasnet_large/region_layer_map/pnasnet_large.json +6 -0
  392. brainscore_vision/models/pnasnet_large/requirements.txt +3 -0
  393. brainscore_vision/models/pnasnet_large/test.py +8 -0
  394. brainscore_vision/models/resnet50_SIN/__init__.py +7 -0
  395. brainscore_vision/models/resnet50_SIN/model.py +63 -0
  396. brainscore_vision/models/resnet50_SIN/region_layer_map/resnet50-SIN.json +6 -0
  397. brainscore_vision/models/resnet50_SIN/requirements.txt +1 -0
  398. brainscore_vision/models/resnet50_SIN/test.py +9 -0
  399. brainscore_vision/models/resnet50_SIN_IN/__init__.py +7 -0
  400. brainscore_vision/models/resnet50_SIN_IN/model.py +65 -0
  401. brainscore_vision/models/resnet50_SIN_IN/region_layer_map/resnet50-SIN_IN.json +6 -0
  402. brainscore_vision/models/resnet50_SIN_IN/requirements.txt +2 -0
  403. brainscore_vision/models/resnet50_SIN_IN/test.py +9 -0
  404. brainscore_vision/models/resnet50_SIN_IN_IN/__init__.py +7 -0
  405. brainscore_vision/models/resnet50_SIN_IN_IN/model.py +65 -0
  406. brainscore_vision/models/resnet50_SIN_IN_IN/region_layer_map/resnet50-SIN_IN_IN.json +6 -0
  407. brainscore_vision/models/resnet50_SIN_IN_IN/requirements.txt +2 -0
  408. brainscore_vision/models/resnet50_SIN_IN_IN/test.py +9 -0
  409. brainscore_vision/models/resnet50_VITO_8deg_cc/__init__.py +9 -0
  410. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +1061 -0
  411. brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/spatialattn.py +50 -0
  412. brainscore_vision/models/resnet50_VITO_8deg_cc/model.py +72 -0
  413. brainscore_vision/models/resnet50_VITO_8deg_cc/region_layer_map/resnet50-VITO-8deg-cc.json +6 -0
  414. brainscore_vision/models/resnet50_VITO_8deg_cc/requirements.txt +3 -0
  415. brainscore_vision/models/resnet50_VITO_8deg_cc/test.py +8 -0
  416. brainscore_vision/models/resnet50_barlow/__init__.py +7 -0
  417. brainscore_vision/models/resnet50_barlow/model.py +53 -0
  418. brainscore_vision/models/resnet50_barlow/region_layer_map/resnet50-barlow.json +1 -0
  419. brainscore_vision/models/resnet50_barlow/requirements.txt +1 -0
  420. brainscore_vision/models/resnet50_barlow/test.py +9 -0
  421. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/__init__.py +6 -0
  422. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/model.py +128 -0
  423. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/region_layer_map/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234.json +1 -0
  424. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/requirements.txt +5 -0
  425. brainscore_vision/models/resnet50_finetune_cutmix_AVGe2e3_robust_linf8255_e0_247x234/test.py +7 -0
  426. brainscore_vision/models/resnet50_moclr8deg/__init__.py +11 -0
  427. brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +496 -0
  428. brainscore_vision/models/resnet50_moclr8deg/model.py +45 -0
  429. brainscore_vision/models/resnet50_moclr8deg/region_layer_map/resnet50-moclr8deg.json +6 -0
  430. brainscore_vision/models/resnet50_moclr8deg/requirements.txt +3 -0
  431. brainscore_vision/models/resnet50_moclr8deg/test.py +8 -0
  432. brainscore_vision/models/resnet50_robust_l2_eps1/__init__.py +9 -0
  433. brainscore_vision/models/resnet50_robust_l2_eps1/model.py +72 -0
  434. brainscore_vision/models/resnet50_robust_l2_eps1/region_layer_map/resnet50_robust_l2_eps1.json +1 -0
  435. brainscore_vision/models/resnet50_robust_l2_eps1/requirements.txt +2 -0
  436. brainscore_vision/models/resnet50_robust_l2_eps1/test.py +8 -0
  437. brainscore_vision/models/resnet50_robust_l2_eps3/__init__.py +8 -0
  438. brainscore_vision/models/resnet50_robust_l2_eps3/model.py +72 -0
  439. brainscore_vision/models/resnet50_robust_l2_eps3/region_layer_map/resnet50_robust_l2_eps3.json +1 -0
  440. brainscore_vision/models/resnet50_robust_l2_eps3/requirements.txt +2 -0
  441. brainscore_vision/models/resnet50_robust_l2_eps3/test.py +8 -0
  442. brainscore_vision/models/resnet50_sup/__init__.py +5 -0
  443. brainscore_vision/models/resnet50_sup/model.py +55 -0
  444. brainscore_vision/models/resnet50_sup/region_layer_map/resnet50-sup.json +1 -0
  445. brainscore_vision/models/resnet50_sup/requirements.txt +1 -0
  446. brainscore_vision/models/resnet50_sup/test.py +8 -0
  447. brainscore_vision/models/resnet50_vicreg/__init__.py +7 -0
  448. brainscore_vision/models/resnet50_vicreg/model.py +62 -0
  449. brainscore_vision/models/resnet50_vicreg/region_layer_map/resnet50-vicreg.json +1 -0
  450. brainscore_vision/models/resnet50_vicreg/requirements.txt +1 -0
  451. brainscore_vision/models/resnet50_vicreg/test.py +9 -0
  452. brainscore_vision/models/resnet50_vicregl0p75/__init__.py +5 -0
  453. brainscore_vision/models/resnet50_vicregl0p75/model.py +80 -0
  454. brainscore_vision/models/resnet50_vicregl0p75/region_layer_map/resnet50-vicregl0p75.json +1 -0
  455. brainscore_vision/models/resnet50_vicregl0p75/test.py +9 -0
  456. brainscore_vision/models/resnet50_vicregl0p9/__init__.py +5 -0
  457. brainscore_vision/models/resnet50_vicregl0p9/model.py +85 -0
  458. brainscore_vision/models/resnet50_vicregl0p9/region_layer_map/resnet50-vicregl0p9.json +1 -0
  459. brainscore_vision/models/resnet50_vicregl0p9/requirements.txt +3 -0
  460. brainscore_vision/models/resnet50_vicregl0p9/test.py +9 -0
  461. brainscore_vision/models/resnet50_vitoimagevidnet8/__init__.py +11 -0
  462. brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +496 -0
  463. brainscore_vision/models/resnet50_vitoimagevidnet8/model.py +45 -0
  464. brainscore_vision/models/resnet50_vitoimagevidnet8/region_layer_map/resnet50-vitoimagevidnet8.json +6 -0
  465. brainscore_vision/models/resnet50_vitoimagevidnet8/requirements.txt +3 -0
  466. brainscore_vision/models/resnet50_vitoimagevidnet8/test.py +8 -0
  467. brainscore_vision/models/resnet_101_v1/__init__.py +5 -0
  468. brainscore_vision/models/resnet_101_v1/model.py +42 -0
  469. brainscore_vision/models/resnet_101_v1/region_layer_map/resnet_101_v1.json +6 -0
  470. brainscore_vision/models/resnet_101_v1/requirements.txt +1 -0
  471. brainscore_vision/models/resnet_101_v1/test.py +8 -0
  472. brainscore_vision/models/resnet_101_v2/__init__.py +8 -0
  473. brainscore_vision/models/resnet_101_v2/model.py +33 -0
  474. brainscore_vision/models/resnet_101_v2/region_layer_map/resnet_101_v2.json +6 -0
  475. brainscore_vision/models/resnet_101_v2/requirements.txt +2 -0
  476. brainscore_vision/models/resnet_101_v2/test.py +8 -0
  477. brainscore_vision/models/resnet_152_v1/__init__.py +5 -0
  478. brainscore_vision/models/resnet_152_v1/model.py +42 -0
  479. brainscore_vision/models/resnet_152_v1/region_layer_map/resnet_152_v1.json +6 -0
  480. brainscore_vision/models/resnet_152_v1/requirements.txt +1 -0
  481. brainscore_vision/models/resnet_152_v1/test.py +8 -0
  482. brainscore_vision/models/resnet_152_v2/__init__.py +7 -0
  483. brainscore_vision/models/{resnet_152_v2_pytorch → resnet_152_v2}/model.py +9 -11
  484. brainscore_vision/models/resnet_152_v2/region_layer_map/resnet_152_v2.json +6 -0
  485. brainscore_vision/models/resnet_152_v2/requirements.txt +2 -0
  486. brainscore_vision/models/resnet_152_v2/test.py +8 -0
  487. brainscore_vision/models/resnet_18_test_m/__init__.py +9 -0
  488. brainscore_vision/models/resnet_18_test_m/helpers/resnet.py +586 -0
  489. brainscore_vision/models/resnet_18_test_m/model.py +80 -0
  490. brainscore_vision/models/resnet_18_test_m/region_layer_map/resnet-18_test_m.json +1 -0
  491. brainscore_vision/models/resnet_18_test_m/requirements.txt +2 -0
  492. brainscore_vision/models/resnet_18_test_m/test.py +8 -0
  493. brainscore_vision/models/resnet_50_2/__init__.py +9 -0
  494. brainscore_vision/models/resnet_50_2/evnet/backends.py +109 -0
  495. brainscore_vision/models/resnet_50_2/evnet/evnet.py +147 -0
  496. brainscore_vision/models/resnet_50_2/evnet/modules.py +308 -0
  497. brainscore_vision/models/resnet_50_2/evnet/params.py +326 -0
  498. brainscore_vision/models/resnet_50_2/evnet/utils.py +142 -0
  499. brainscore_vision/models/resnet_50_2/model.py +46 -0
  500. brainscore_vision/models/resnet_50_2/region_layer_map/resnet_50_2.json +6 -0
  501. brainscore_vision/models/resnet_50_2/requirements.txt +4 -0
  502. brainscore_vision/models/resnet_50_2/test.py +8 -0
  503. brainscore_vision/models/resnet_50_robust/model.py +2 -2
  504. brainscore_vision/models/resnet_50_robust/region_layer_map/resnet-50-robust.json +1 -0
  505. brainscore_vision/models/resnet_50_v1/__init__.py +5 -0
  506. brainscore_vision/models/resnet_50_v1/model.py +42 -0
  507. brainscore_vision/models/resnet_50_v1/region_layer_map/resnet_50_v1.json +6 -0
  508. brainscore_vision/models/resnet_50_v1/requirements.txt +1 -0
  509. brainscore_vision/models/resnet_50_v1/test.py +8 -0
  510. brainscore_vision/models/resnet_50_v2/__init__.py +8 -0
  511. brainscore_vision/models/resnet_50_v2/model.py +33 -0
  512. brainscore_vision/models/resnet_50_v2/region_layer_map/resnet_50_v2.json +6 -0
  513. brainscore_vision/models/resnet_50_v2/requirements.txt +2 -0
  514. brainscore_vision/models/resnet_50_v2/test.py +8 -0
  515. brainscore_vision/models/resnet_SIN_IN_FT_IN/__init__.py +5 -0
  516. brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +79 -0
  517. brainscore_vision/models/resnet_SIN_IN_FT_IN/region_layer_map/resnet_SIN_IN_FT_IN.json +1 -0
  518. brainscore_vision/models/resnet_SIN_IN_FT_IN/requirements.txt +2 -0
  519. brainscore_vision/models/resnet_SIN_IN_FT_IN/test.py +8 -0
  520. brainscore_vision/models/sBarlow_lmda_0/__init__.py +9 -0
  521. brainscore_vision/models/sBarlow_lmda_0/model.py +64 -0
  522. brainscore_vision/models/sBarlow_lmda_0/region_layer_map/sBarlow_lmda_0.json +6 -0
  523. brainscore_vision/models/sBarlow_lmda_0/setup.py +25 -0
  524. brainscore_vision/models/sBarlow_lmda_0/test.py +1 -0
  525. brainscore_vision/models/sBarlow_lmda_01/__init__.py +9 -0
  526. brainscore_vision/models/sBarlow_lmda_01/model.py +64 -0
  527. brainscore_vision/models/sBarlow_lmda_01/region_layer_map/sBarlow_lmda_01.json +6 -0
  528. brainscore_vision/models/sBarlow_lmda_01/setup.py +25 -0
  529. brainscore_vision/models/sBarlow_lmda_01/test.py +1 -0
  530. brainscore_vision/models/sBarlow_lmda_1/__init__.py +9 -0
  531. brainscore_vision/models/sBarlow_lmda_1/model.py +64 -0
  532. brainscore_vision/models/sBarlow_lmda_1/region_layer_map/sBarlow_lmda_1.json +6 -0
  533. brainscore_vision/models/sBarlow_lmda_1/setup.py +25 -0
  534. brainscore_vision/models/sBarlow_lmda_1/test.py +1 -0
  535. brainscore_vision/models/sBarlow_lmda_2/__init__.py +9 -0
  536. brainscore_vision/models/sBarlow_lmda_2/model.py +64 -0
  537. brainscore_vision/models/sBarlow_lmda_2/region_layer_map/sBarlow_lmda_2.json +6 -0
  538. brainscore_vision/models/sBarlow_lmda_2/setup.py +25 -0
  539. brainscore_vision/models/sBarlow_lmda_2/test.py +1 -0
  540. brainscore_vision/models/sBarlow_lmda_8/__init__.py +9 -0
  541. brainscore_vision/models/sBarlow_lmda_8/model.py +64 -0
  542. brainscore_vision/models/sBarlow_lmda_8/region_layer_map/sBarlow_lmda_8.json +6 -0
  543. brainscore_vision/models/sBarlow_lmda_8/setup.py +25 -0
  544. brainscore_vision/models/sBarlow_lmda_8/test.py +1 -0
  545. brainscore_vision/models/scsBarlow_lmda_1/__init__.py +9 -0
  546. brainscore_vision/models/scsBarlow_lmda_1/model.py +64 -0
  547. brainscore_vision/models/scsBarlow_lmda_1/region_layer_map/scsBarlow_lmda_1.json +6 -0
  548. brainscore_vision/models/scsBarlow_lmda_1/setup.py +25 -0
  549. brainscore_vision/models/scsBarlow_lmda_1/test.py +1 -0
  550. brainscore_vision/models/scsBarlow_lmda_2/__init__.py +9 -0
  551. brainscore_vision/models/scsBarlow_lmda_2/model.py +64 -0
  552. brainscore_vision/models/scsBarlow_lmda_2/region_layer_map/scsBarlow_lmda_2.json +6 -0
  553. brainscore_vision/models/scsBarlow_lmda_2/setup.py +25 -0
  554. brainscore_vision/models/scsBarlow_lmda_2/test.py +1 -0
  555. brainscore_vision/models/scsBarlow_lmda_4/__init__.py +9 -0
  556. brainscore_vision/models/scsBarlow_lmda_4/model.py +64 -0
  557. brainscore_vision/models/scsBarlow_lmda_4/region_layer_map/scsBarlow_lmda_4.json +6 -0
  558. brainscore_vision/models/scsBarlow_lmda_4/setup.py +25 -0
  559. brainscore_vision/models/scsBarlow_lmda_4/test.py +1 -0
  560. brainscore_vision/models/shufflenet_v2_x1_0/__init__.py +7 -0
  561. brainscore_vision/models/shufflenet_v2_x1_0/model.py +52 -0
  562. brainscore_vision/models/shufflenet_v2_x1_0/region_layer_map/shufflenet_v2_x1_0.json +1 -0
  563. brainscore_vision/models/shufflenet_v2_x1_0/requirements.txt +2 -0
  564. brainscore_vision/models/shufflenet_v2_x1_0/test.py +9 -0
  565. brainscore_vision/models/timm_models/__init__.py +193 -0
  566. brainscore_vision/models/timm_models/model.py +90 -0
  567. brainscore_vision/models/timm_models/model_configs.json +464 -0
  568. brainscore_vision/models/timm_models/requirements.txt +3 -0
  569. brainscore_vision/models/timm_models/test.py +0 -0
  570. brainscore_vision/models/vgg_16/__init__.py +7 -0
  571. brainscore_vision/models/vgg_16/model.py +52 -0
  572. brainscore_vision/models/vgg_16/region_layer_map/vgg_16.json +6 -0
  573. brainscore_vision/models/vgg_16/requirements.txt +1 -0
  574. brainscore_vision/models/vgg_16/test.py +8 -0
  575. brainscore_vision/models/vgg_19/__init__.py +7 -0
  576. brainscore_vision/models/vgg_19/model.py +52 -0
  577. brainscore_vision/models/vgg_19/region_layer_map/vgg_19.json +1 -0
  578. brainscore_vision/models/vgg_19/requirements.txt +1 -0
  579. brainscore_vision/models/vgg_19/test.py +8 -0
  580. brainscore_vision/models/vonegrcnn_47e/__init__.py +5 -0
  581. brainscore_vision/models/vonegrcnn_47e/model.py +622 -0
  582. brainscore_vision/models/vonegrcnn_47e/region_layer_map/vonegrcnn_47e.json +6 -0
  583. brainscore_vision/models/vonegrcnn_47e/requirements.txt +0 -0
  584. brainscore_vision/models/vonegrcnn_47e/test.py +8 -0
  585. brainscore_vision/models/vonegrcnn_52e_full/__init__.py +5 -0
  586. brainscore_vision/models/vonegrcnn_52e_full/model.py +623 -0
  587. brainscore_vision/models/vonegrcnn_52e_full/region_layer_map/vonegrcnn_52e_full.json +6 -0
  588. brainscore_vision/models/vonegrcnn_52e_full/requirements.txt +4 -0
  589. brainscore_vision/models/vonegrcnn_52e_full/test.py +8 -0
  590. brainscore_vision/models/vonegrcnn_62e_nobn/__init__.py +7 -0
  591. brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +544 -0
  592. brainscore_vision/models/vonegrcnn_62e_nobn/model.py +122 -0
  593. brainscore_vision/models/vonegrcnn_62e_nobn/region_layer_map/vonegrcnn_62e_nobn.json +6 -0
  594. brainscore_vision/models/vonegrcnn_62e_nobn/requirements.txt +3 -0
  595. brainscore_vision/models/vonegrcnn_62e_nobn/test.py +8 -0
  596. brainscore_vision/models/voneresnet_50/__init__.py +7 -0
  597. brainscore_vision/models/voneresnet_50/model.py +37 -0
  598. brainscore_vision/models/voneresnet_50/region_layer_map/voneresnet-50.json +6 -0
  599. brainscore_vision/models/voneresnet_50/requirements.txt +1 -0
  600. brainscore_vision/models/voneresnet_50/test.py +8 -0
  601. brainscore_vision/models/voneresnet_50_1/__init__.py +11 -0
  602. brainscore_vision/models/voneresnet_50_1/evnet/backends.py +109 -0
  603. brainscore_vision/models/voneresnet_50_1/evnet/evnet.py +147 -0
  604. brainscore_vision/models/voneresnet_50_1/evnet/modules.py +308 -0
  605. brainscore_vision/models/voneresnet_50_1/evnet/params.py +326 -0
  606. brainscore_vision/models/voneresnet_50_1/evnet/utils.py +142 -0
  607. brainscore_vision/models/voneresnet_50_1/model.py +68 -0
  608. brainscore_vision/models/voneresnet_50_1/requirements.txt +5 -0
  609. brainscore_vision/models/voneresnet_50_1/test.py +7 -0
  610. brainscore_vision/models/voneresnet_50_3/__init__.py +11 -0
  611. brainscore_vision/models/voneresnet_50_3/evnet/backends.py +109 -0
  612. brainscore_vision/models/voneresnet_50_3/evnet/evnet.py +147 -0
  613. brainscore_vision/models/voneresnet_50_3/evnet/modules.py +308 -0
  614. brainscore_vision/models/voneresnet_50_3/evnet/params.py +326 -0
  615. brainscore_vision/models/voneresnet_50_3/evnet/utils.py +142 -0
  616. brainscore_vision/models/voneresnet_50_3/model.py +66 -0
  617. brainscore_vision/models/voneresnet_50_3/requirements.txt +4 -0
  618. brainscore_vision/models/voneresnet_50_3/test.py +7 -0
  619. brainscore_vision/models/voneresnet_50_no_weight/__init__.py +11 -0
  620. brainscore_vision/models/voneresnet_50_no_weight/evnet/backends.py +109 -0
  621. brainscore_vision/models/voneresnet_50_no_weight/evnet/evnet.py +147 -0
  622. brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +308 -0
  623. brainscore_vision/models/voneresnet_50_no_weight/evnet/params.py +326 -0
  624. brainscore_vision/models/voneresnet_50_no_weight/evnet/utils.py +142 -0
  625. brainscore_vision/models/voneresnet_50_no_weight/model.py +56 -0
  626. brainscore_vision/models/voneresnet_50_no_weight/requirements.txt +4 -0
  627. brainscore_vision/models/voneresnet_50_no_weight/test.py +7 -0
  628. brainscore_vision/models/voneresnet_50_non_stochastic/model.py +2 -2
  629. brainscore_vision/models/voneresnet_50_robust/__init__.py +7 -0
  630. brainscore_vision/models/voneresnet_50_robust/model.py +50 -0
  631. brainscore_vision/models/voneresnet_50_robust/region_layer_map/voneresnet-50-robust.json +6 -0
  632. brainscore_vision/models/voneresnet_50_robust/requirements.txt +1 -0
  633. brainscore_vision/models/voneresnet_50_robust/test.py +8 -0
  634. brainscore_vision/models/xception/__init__.py +7 -0
  635. brainscore_vision/models/xception/model.py +64 -0
  636. brainscore_vision/models/xception/region_layer_map/xception.json +6 -0
  637. brainscore_vision/models/xception/requirements.txt +2 -0
  638. brainscore_vision/models/xception/test.py +8 -0
  639. brainscore_vision/models/yudixie_resnet50_250117_0/__init__.py +11 -0
  640. brainscore_vision/models/yudixie_resnet50_250117_0/model.py +60 -0
  641. brainscore_vision/models/yudixie_resnet50_250117_0/region_layer_map/yudixie_resnet50_distance_reg_0_240908.json +6 -0
  642. brainscore_vision/models/yudixie_resnet50_250117_0/setup.py +25 -0
  643. brainscore_vision/models/yudixie_resnet50_250117_0/test.py +1 -0
  644. brainscore_vision/models/yudixie_resnet50_250117_1/__init__.py +11 -0
  645. brainscore_vision/models/yudixie_resnet50_250117_1/model.py +60 -0
  646. brainscore_vision/models/yudixie_resnet50_250117_1/region_layer_map/yudixie_resnet50_translation_reg_0_240908.json +6 -0
  647. brainscore_vision/models/yudixie_resnet50_250117_1/setup.py +25 -0
  648. brainscore_vision/models/yudixie_resnet50_250117_1/test.py +1 -0
  649. brainscore_vision/models/yudixie_resnet50_250117_10/__init__.py +11 -0
  650. brainscore_vision/models/yudixie_resnet50_250117_10/model.py +60 -0
  651. brainscore_vision/models/yudixie_resnet50_250117_10/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240908.json +6 -0
  652. brainscore_vision/models/yudixie_resnet50_250117_10/setup.py +25 -0
  653. brainscore_vision/models/yudixie_resnet50_250117_10/test.py +1 -0
  654. brainscore_vision/models/yudixie_resnet50_250117_11/__init__.py +11 -0
  655. brainscore_vision/models/yudixie_resnet50_250117_11/model.py +60 -0
  656. brainscore_vision/models/yudixie_resnet50_250117_11/region_layer_map/yudixie_resnet50_random_0_240908.json +6 -0
  657. brainscore_vision/models/yudixie_resnet50_250117_11/setup.py +25 -0
  658. brainscore_vision/models/yudixie_resnet50_250117_11/test.py +1 -0
  659. brainscore_vision/models/yudixie_resnet50_250117_2/__init__.py +11 -0
  660. brainscore_vision/models/yudixie_resnet50_250117_2/model.py +60 -0
  661. brainscore_vision/models/yudixie_resnet50_250117_2/region_layer_map/yudixie_resnet50_rotation_reg_0_240908.json +6 -0
  662. brainscore_vision/models/yudixie_resnet50_250117_2/setup.py +25 -0
  663. brainscore_vision/models/yudixie_resnet50_250117_2/test.py +1 -0
  664. brainscore_vision/models/yudixie_resnet50_250117_3/__init__.py +11 -0
  665. brainscore_vision/models/yudixie_resnet50_250117_3/model.py +60 -0
  666. brainscore_vision/models/yudixie_resnet50_250117_3/region_layer_map/yudixie_resnet50_distance_translation_0_240908.json +6 -0
  667. brainscore_vision/models/yudixie_resnet50_250117_3/setup.py +25 -0
  668. brainscore_vision/models/yudixie_resnet50_250117_3/test.py +1 -0
  669. brainscore_vision/models/yudixie_resnet50_250117_4/__init__.py +11 -0
  670. brainscore_vision/models/yudixie_resnet50_250117_4/model.py +60 -0
  671. brainscore_vision/models/yudixie_resnet50_250117_4/region_layer_map/yudixie_resnet50_distance_rotation_0_240908.json +6 -0
  672. brainscore_vision/models/yudixie_resnet50_250117_4/setup.py +25 -0
  673. brainscore_vision/models/yudixie_resnet50_250117_4/test.py +1 -0
  674. brainscore_vision/models/yudixie_resnet50_250117_5/__init__.py +11 -0
  675. brainscore_vision/models/yudixie_resnet50_250117_5/model.py +60 -0
  676. brainscore_vision/models/yudixie_resnet50_250117_5/region_layer_map/yudixie_resnet50_translation_rotation_0_240908.json +6 -0
  677. brainscore_vision/models/yudixie_resnet50_250117_5/setup.py +25 -0
  678. brainscore_vision/models/yudixie_resnet50_250117_5/test.py +1 -0
  679. brainscore_vision/models/yudixie_resnet50_250117_6/__init__.py +11 -0
  680. brainscore_vision/models/yudixie_resnet50_250117_6/model.py +60 -0
  681. brainscore_vision/models/yudixie_resnet50_250117_6/region_layer_map/yudixie_resnet50_distance_translation_rotation_0_240908.json +6 -0
  682. brainscore_vision/models/yudixie_resnet50_250117_6/setup.py +25 -0
  683. brainscore_vision/models/yudixie_resnet50_250117_6/test.py +1 -0
  684. brainscore_vision/models/yudixie_resnet50_250117_7/__init__.py +11 -0
  685. brainscore_vision/models/yudixie_resnet50_250117_7/model.py +60 -0
  686. brainscore_vision/models/yudixie_resnet50_250117_7/region_layer_map/yudixie_resnet50_category_class_0_240908.json +6 -0
  687. brainscore_vision/models/yudixie_resnet50_250117_7/setup.py +25 -0
  688. brainscore_vision/models/yudixie_resnet50_250117_7/test.py +1 -0
  689. brainscore_vision/models/yudixie_resnet50_250117_8/__init__.py +11 -0
  690. brainscore_vision/models/yudixie_resnet50_250117_8/model.py +60 -0
  691. brainscore_vision/models/yudixie_resnet50_250117_8/region_layer_map/yudixie_resnet50_object_class_0_240908.json +6 -0
  692. brainscore_vision/models/yudixie_resnet50_250117_8/setup.py +25 -0
  693. brainscore_vision/models/yudixie_resnet50_250117_8/test.py +1 -0
  694. brainscore_vision/models/yudixie_resnet50_250117_9/__init__.py +11 -0
  695. brainscore_vision/models/yudixie_resnet50_250117_9/model.py +60 -0
  696. brainscore_vision/models/yudixie_resnet50_250117_9/region_layer_map/yudixie_resnet50_cat_obj_class_all_latents_0_240908.json +6 -0
  697. brainscore_vision/models/yudixie_resnet50_250117_9/setup.py +25 -0
  698. brainscore_vision/models/yudixie_resnet50_250117_9/test.py +1 -0
  699. brainscore_vision/submission/actions_helpers.py +2 -3
  700. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/METADATA +6 -6
  701. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/RECORD +714 -130
  702. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/WHEEL +1 -1
  703. docs/source/index.rst +1 -0
  704. docs/source/modules/submission.rst +1 -1
  705. docs/source/modules/version_bumping.rst +43 -0
  706. tests/test_submission/test_actions_helpers.py +2 -6
  707. brainscore_vision/models/densenet_201_pytorch/test.py +0 -8
  708. brainscore_vision/models/inception_v3_pytorch/test.py +0 -8
  709. brainscore_vision/models/inception_v4_pytorch/test.py +0 -8
  710. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +0 -7
  711. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +0 -8
  712. brainscore_vision/models/pnasnet_large_pytorch/test.py +0 -8
  713. brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +0 -7
  714. brainscore_vision/models/resnet_152_v2_pytorch/test.py +0 -8
  715. /brainscore_vision/models/{densenet_201_pytorch → densenet_201}/requirements.txt +0 -0
  716. /brainscore_vision/models/{inception_v3_pytorch → inception_v3}/requirements.txt +0 -0
  717. /brainscore_vision/models/{inception_v4_pytorch → inception_v4}/requirements.txt +0 -0
  718. /brainscore_vision/models/{mobilenet_v2_1_4_224_pytorch → mobilenet_v2_1_0_224}/requirements.txt +0 -0
  719. /brainscore_vision/models/{pnasnet_large_pytorch → nasnet_large}/requirements.txt +0 -0
  720. /brainscore_vision/models/{resnet_152_v2_pytorch → resnet50_vicregl0p75}/requirements.txt +0 -0
  721. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/LICENSE +0 -0
  722. {brainscore_vision-2.2.3.dist-info → brainscore_vision-2.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,326 @@
1
+ import torch
2
+ import numpy as np
3
+ import scipy.stats as stats
4
+ from .utils import sample_dist
5
+ from typing import Literal
6
+
7
+ image_size = 224
8
+ visual_degrees = 7
9
+ kernel_size = {'p': 21, 'm': 65} # 95% of Gaussian coverage
10
+
11
+
12
+ # Receptive fields of P and M ganglion cells across the primate retina (Kroner and Kaplan, 1994)
13
+ # https://www.sciencedirect.com/science/article/pii/0042698994E0066T
14
+
15
+ # P Cells (eccentricity range of 0-5)
16
+ P_cell_params = {
17
+ 'med_rc': 0.03, 'iqr_rc': 0.01, # Center radius
18
+ 'med_kc': 325.2, 'iqr_kc': 302, # Center peak sensitivity
19
+ 'med_rs': 0.18, 'iqr_rs': 0.07, # Surround radius
20
+ 'med_ks': 4.4, 'iqr_ks': 4.6, # Surround peak sensitivity
21
+ 'c_kc': 0.391, 'm_kc': -1.850, # Center peak sensitivity vs. radius regression
22
+ 'c_ks': 0.128, 'm_ks': -2.147, # Surround peak sensitivity vs. radius regression
23
+ }
24
+
25
+ # M Cells (eccentricity range of 0-10)
26
+ M_cell_params = {
27
+ 'med_rc': 0.10, 'iqr_rc': 0.02, # Center radius
28
+ 'med_kc': 148.0, 'iqr_kc': 122.4, # Center peak sensitivity
29
+ 'med_rs': 0.72, 'iqr_rs': 0.23, # Surround radius
30
+ 'med_ks': 1.1, 'iqr_ks': 0.8, # Surround peak sensitivity
31
+ }
32
+
33
+
34
+ def get_dog_params(
35
+ features:int, sampling:Literal['median', 'binning', 'uniform', 'lognormal']='median',
36
+ colors:list[Literal['r/g', 'g/r', 'b/y', 'w/b']]=['r/g', 'g/r', 'b/y'],
37
+ polarity:list[Literal[0, 1]]=None,
38
+ cell_type:Literal['p', 'm']='p', image_size:int=image_size, visual_degrees:int=visual_degrees
39
+ ) -> dict:
40
+ """Generates DoG parameters for RetinaBlock with more than 3 channels.
41
+ Number of channels = number of features * 3 color options (R/G, G/R, B/Y).
42
+ Only generates ON-center cells.
43
+
44
+ Args:
45
+ features (int): _description_
46
+ binning (bool, optional): whether to use discrete binning while sampling values. Defaults to True.
47
+ image_size (int, optional): model image size. Defaults to image_size.
48
+ visual_degrees (int, optional): visual degrees of the model FoV. Defaults to visual_degrees.
49
+
50
+ Returns:
51
+ dict: dictionary with center and surround radii, opponency tensor and DoG kernel size
52
+ """
53
+
54
+ if not features:
55
+ return {
56
+ f'rc_{cell_type}_cell': torch.tensor([]),
57
+ f'rs_{cell_type}_cell': torch.tensor([]),
58
+ f'opponency_{cell_type}_cell': torch.tensor([]),
59
+ f'kernel_{cell_type}_cell': torch.tensor([])
60
+ }
61
+
62
+ assert cell_type in ['p', 'm']
63
+
64
+ cell_params = M_cell_params if cell_type=='m' else P_cell_params
65
+ min_rc = cell_params['med_rc'] - cell_params['iqr_rc']
66
+ max_rc = cell_params['med_rc'] + cell_params['iqr_rc']
67
+ min_rs = cell_params['med_rs'] - cell_params['iqr_rs']
68
+ max_rs = cell_params['med_rs'] + cell_params['iqr_rs']
69
+
70
+ color_mapping = {
71
+ 'r/g': np.array([[1,0,0],[0,-1,0]], dtype=np.float16), # R+/G- (center/surround)
72
+ 'g/r': np.array([[0,1,0],[-1,0,0]], dtype=np.float16), # G+/R-
73
+ 'b/y': np.array([[0,0,1],[-.5,-.5,0]], dtype=np.float16), # B+/Y-
74
+ 'w/b': np.array([[1/3]*3,[-1/3]*3], dtype=np.float16) # ON/OFF
75
+ }
76
+
77
+ assert features % len(colors) == 0
78
+
79
+ if sampling=='median':
80
+ # Use median values from distributions (deterministic)
81
+ assert features == len(colors)
82
+ rc = np.ones((features,), dtype=np.float16) * cell_params['med_rc']
83
+ rs = np.ones((features,), dtype=np.float16) * cell_params['med_rs']
84
+ kc = np.ones((features,), dtype=np.float16) * cell_params['med_kc']
85
+ ks = np.ones((features,), dtype=np.float16) * cell_params['med_ks']
86
+ elif sampling=='binning':
87
+ # Assume uniform joint distribution of rc and rs with discrete binning (deterministic)
88
+ assert int(np.sqrt(features//len(colors)))==np.sqrt(features//len(colors))
89
+ edges_rc = np.linspace(min_rc, max_rc, int(np.sqrt(features // len(colors))) + 1)
90
+ edges_rs = np.linspace(min_rs, max_rs, int(np.sqrt(features // len(colors))) + 1)
91
+ centers_rc = (edges_rc[:-1] + edges_rc[1:]) / 2
92
+ centers_rs = (edges_rs[:-1] + edges_rs[1:]) / 2
93
+ rc = np.repeat(centers_rc, int(np.sqrt(features // len(colors))))
94
+ rs = np.tile(centers_rs, int(np.sqrt(features // len(colors))))
95
+ elif sampling=='uniform':
96
+ # Assume uniform disjoint distribution of rc and rs without binning (stochastic)
97
+ rc = np.random.uniform(min_rc, max_rc, features // len(colors))
98
+ rs = np.random.uniform(min_rs, max_rs, features // len(colors))
99
+ elif sampling=='lognormal':
100
+ # Assume lognormal disjoint distribution of rc and rs (stochastic)
101
+ std_rc = (np.log(cell_params['med_rc'] - (cell_params['iqr_rc']/2)) - np.log(cell_params['med_rc'])) / stats.norm.ppf(.25)
102
+ std_rs = (np.log(cell_params['med_rs'] - (cell_params['iqr_rs']/2)) - np.log(cell_params['med_rs'])) / stats.norm.ppf(.25)
103
+ rc = np.random.lognormal(np.log(cell_params['med_rc']), std_rc, features // len(colors))
104
+ rs = np.random.lognormal(np.log(cell_params['med_rs']), std_rs, features // len(colors))
105
+
106
+ if sampling != 'median':
107
+ assert cell_type == 'p'
108
+ rc = np.tile(rc, len(colors))
109
+ rs = np.tile(rs, len(colors))
110
+ kc = cell_params['c_kc'] * rc ** cell_params['m_kc']
111
+ ks = cell_params['c_ks'] * rs ** cell_params['m_ks']
112
+
113
+ opponency = np.concatenate([
114
+ np.repeat(color_mapping[c][None, ...], features // len(colors), axis=0)
115
+ for c in colors
116
+ ])
117
+
118
+ # Conversions
119
+ ppd = image_size / visual_degrees # pixels per FOV degree
120
+ rc = torch.from_numpy(rc * ppd)
121
+ rs = torch.from_numpy(rs * ppd)
122
+ kc = torch.from_numpy(kc / ppd ** 2)
123
+ ks = torch.from_numpy(ks / ppd ** 2)
124
+ opponency = torch.from_numpy(opponency)
125
+
126
+ opponency[:,1] *= torch.unsqueeze(ks[:]/kc[:], 1)
127
+
128
+ if polarity:
129
+ assert len(polarity) == opponency.size(0)
130
+ opponency *= torch.tensor(polarity)[..., None, None]
131
+
132
+ params = {
133
+ f'rc_{cell_type}_cell': rc,
134
+ f'rs_{cell_type}_cell': rs,
135
+ f'opponency_{cell_type}_cell': opponency,
136
+ f'kernel_{cell_type}_cell': kernel_size[cell_type]
137
+ }
138
+
139
+ return params
140
+
141
+ def get_div_norm_params(
142
+ relative_size_la, kernel_la=None,
143
+ image_size=image_size, visual_degrees=visual_degrees
144
+ ) -> dict:
145
+
146
+ # Conversions
147
+ ppd = image_size / visual_degrees # pixels per FOV degree
148
+ radius_la = P_cell_params['med_rs'] * relative_size_la * ppd
149
+ radius_cn = 2 * P_cell_params['med_rc'] * ppd
150
+ c50 = .3
151
+
152
+ if not kernel_la and radius_la < np.inf:
153
+ kernel_la = int(radius_la*2) + int(int(radius_la*2)%2==0)
154
+
155
+ params = {
156
+ 'kernel_la': kernel_la,
157
+ 'radius_la': radius_la,
158
+ 'kernel_cn': kernel_size['p'],
159
+ 'radius_cn': radius_cn,
160
+ 'c50': c50
161
+ }
162
+
163
+ return params
164
+
165
+
166
+ def get_grating_params(
167
+ sf, angle=0, phase=0, contrast=1, radius=.5,
168
+ image_size=image_size, visual_degrees=visual_degrees
169
+ ) -> dict:
170
+ ppd = image_size / visual_degrees # pixels per FOV degree
171
+ params = {
172
+ 'size': image_size,
173
+ 'radius': radius * ppd,
174
+ 'sf': sf/ppd,
175
+ 'theta': angle,
176
+ 'phase': phase,
177
+ 'contrast': contrast
178
+ }
179
+ return params
180
+
181
+
182
+ def generate_gabor_param(
183
+ n_sc, n_cc, seed=0, rand_flag=False, sf_corr=0.75,
184
+ sf_max=11.5, sf_min=0, diff_n=False, dnstd=0.22,
185
+ # Additional parameters
186
+ in_channels=3, set_orientation=None
187
+ ):
188
+
189
+ features = n_sc + n_cc
190
+
191
+ # Generates random sample
192
+ np.random.seed(seed)
193
+
194
+ phase_bins = np.array([0, 360])
195
+ phase_dist = np.array([1])
196
+
197
+ if rand_flag:
198
+ print('Uniform gabor parameters')
199
+ ori_bins = np.array([0, 180])
200
+ ori_dist = np.array([1])
201
+
202
+ nx_bins = np.array([0.1, 10**0])
203
+ nx_dist = np.array([1])
204
+
205
+ ny_bins = np.array([0.1, 10**0])
206
+ ny_dist = np.array([1])
207
+
208
+ sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8, 11.2])
209
+ sf_s_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
210
+ sf_c_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
211
+
212
+ else:
213
+ print('Neuronal distributions gabor parameters')
214
+ # DeValois 1982a
215
+ ori_bins = np.array([-22.5, 22.5, 67.5, 112.5, 157.5])
216
+ ori_dist = np.array([66, 49, 77, 54])
217
+ ori_dist = ori_dist / ori_dist.sum()
218
+
219
+ # Ringach 2002b
220
+ nx_bins = np.logspace(-1, 0., 5, base=10)
221
+ ny_bins = np.logspace(-1, 0., 5, base=10)
222
+ n_joint_dist = np.array([[2., 0., 1., 0.],
223
+ [8., 9., 4., 1.],
224
+ [1., 2., 19., 17.],
225
+ [0., 0., 1., 7.]])
226
+ n_joint_dist = n_joint_dist / n_joint_dist.sum()
227
+ nx_dist = n_joint_dist.sum(axis=1)
228
+ nx_dist = nx_dist / nx_dist.sum()
229
+ ny_dist_marg = n_joint_dist / n_joint_dist.sum(axis=1, keepdims=True)
230
+
231
+ # DeValois 1982b
232
+ sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8, 11.2])
233
+ # foveal only
234
+ sf_s_dist = np.array([4, 4, 8, 25, 33, 26, 28, 12, 8])
235
+ sf_c_dist = np.array([0, 0, 9, 9, 7, 10, 23, 12, 14])
236
+
237
+ phase = sample_dist(phase_dist, phase_bins, features)
238
+
239
+ if set_orientation or set_orientation == 0:
240
+ ori = np.ones((features,)) * set_orientation
241
+ else:
242
+ ori = sample_dist(ori_dist, ori_bins, features)
243
+
244
+ sfmax_ind = np.where(sf_bins <= sf_max)[0][-1]
245
+ sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
246
+
247
+ sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
248
+ sf_s_dist = sf_s_dist[sfmin_ind:sfmax_ind]
249
+ sf_c_dist = sf_c_dist[sfmin_ind:sfmax_ind]
250
+
251
+ sf_s_dist = sf_s_dist / sf_s_dist.sum()
252
+ sf_c_dist = sf_c_dist / sf_c_dist.sum()
253
+
254
+ cov_mat = np.array([[1, sf_corr], [sf_corr, 1]])
255
+
256
+ if rand_flag: # Uniform
257
+ samps = np.random.multivariate_normal([0, 0], cov_mat, features)
258
+ samps_cdf = stats.norm.cdf(samps)
259
+
260
+ nx = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
261
+ nx = 10**nx
262
+
263
+ if diff_n:
264
+ ny = sample_dist(ny_dist, ny_bins, features, scale='log10')
265
+ else:
266
+ ny = 10**(np.random.normal(np.log10(nx), dnstd))
267
+ ny[ny<0.1] = 0.1
268
+ ny[ny>1] = 1
269
+ # ny = nx
270
+
271
+ sf = np.interp(samps_cdf[:,1], np.hstack(([0], sf_s_dist.cumsum())), np.log2(sf_bins))
272
+ sf = 2**sf
273
+
274
+ else: # Biological
275
+
276
+ if n_sc > 0:
277
+ samps = np.random.multivariate_normal([0, 0], cov_mat, n_sc)
278
+ samps_cdf = stats.norm.cdf(samps)
279
+
280
+ nx_s = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
281
+ nx_s = 10**nx_s
282
+
283
+ ny_samp = np.random.rand(n_sc)
284
+ ny_s = np.zeros(n_sc)
285
+ for samp_ind, nx_samp in enumerate(nx_s):
286
+ bin_id = np.argwhere(nx_bins < nx_samp)[-1]
287
+ ny_s[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
288
+ np.log10(ny_bins))
289
+ ny_s = 10**ny_s
290
+
291
+ sf_s = np.interp(samps_cdf[:,1], np.hstack(([0], sf_s_dist.cumsum())), np.log2(sf_bins))
292
+ sf_s = 2**sf_s
293
+ else:
294
+ nx_s = np.array([])
295
+ ny_s = np.array([])
296
+ sf_s = np.array([])
297
+
298
+ if n_cc > 0:
299
+ samps = np.random.multivariate_normal([0, 0], cov_mat, n_cc)
300
+ samps_cdf = stats.norm.cdf(samps)
301
+
302
+ nx_c = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
303
+ nx_c = 10**nx_c
304
+
305
+ ny_samp = np.random.rand(n_cc)
306
+ ny_c = np.zeros(n_cc)
307
+ for samp_ind, nx_samp in enumerate(nx_c):
308
+ bin_id = np.argwhere(nx_bins < nx_samp)[-1]
309
+ ny_c[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
310
+ np.log10(ny_bins))
311
+ ny_c = 10**ny_c
312
+
313
+ sf_c = np.interp(samps_cdf[:,1], np.hstack(([0], sf_c_dist.cumsum())), np.log2(sf_bins))
314
+ sf_c = 2**sf_c
315
+ else:
316
+ nx_c = np.array([])
317
+ ny_c = np.array([])
318
+ sf_c = np.array([])
319
+
320
+ nx = np.concatenate((nx_s, nx_c))
321
+ ny = np.concatenate((ny_s, ny_c))
322
+ sf = np.concatenate((sf_s, sf_c))
323
+
324
+ color = np.random.randint(low=0, high=in_channels, size=features, dtype=np.int8)
325
+
326
+ return sf, ori, phase, nx, ny, color
@@ -0,0 +1,142 @@
1
+ import math
2
+ import torch
3
+ import numpy as np
4
+ import random
5
+
6
+ def gaussian_kernel(
7
+ sigma: float, k: float=1, size:float=15, norm:bool=False
8
+ ) -> torch.tensor:
9
+ """Returns a 2D Gaussian kernel.
10
+
11
+ :param sigma (float): standard deviation of the Gaussian
12
+ :param k (float, optional): height of the Gaussian
13
+ :param size (float, optional): kernel size
14
+ :param norm (bool, optional): whether no normalize the kernel
15
+ :return: gaussian kernel
16
+ """
17
+ assert size % 2 == 1
18
+ w = size // 2
19
+ grid_val = torch.arange(-w, w+1, dtype=torch.float)
20
+ x, y = torch.meshgrid(grid_val, grid_val, indexing='ij')
21
+ gaussian = k * torch.exp(-(x**2 + y**2) / (2*(sigma)**2))
22
+ if norm: gaussian /= torch.abs(gaussian.sum())
23
+ return gaussian
24
+
25
+
26
+ def dog_kernel(
27
+ sigma_c: float, sigma_s: float, k_c: float, k_s: float,
28
+ polarity:int, size:int=21
29
+ ) -> torch.tensor:
30
+ """Returns a 2D Difference-of-Gaussians kernel.
31
+
32
+ :param sigma_c: standard deviation of the center Gaussian
33
+ :param sigma_s: standard deviation of the surround Gaussian
34
+ :param k_c: peak sensitivity of the center
35
+ :param k_s: peak sensitivity of the surround
36
+ :param polarity: polarity of the center Gaussian (+1 or -1)
37
+ :param size: kernel size
38
+ :return: difference-of-gaussians kernel
39
+ """
40
+ assert size % 2 == 1
41
+ assert polarity in [-1 , 1]
42
+ center_gaussian = gaussian_kernel(sigma=sigma_c, k=k_c, size=size)
43
+ surround_gaussian = gaussian_kernel(sigma=sigma_s, k=k_s, size=size)
44
+ dog = polarity * (center_gaussian - surround_gaussian)
45
+ dog /= torch.sum(dog)
46
+ return dog
47
+
48
+ def circular_kernel(size:int, radius:float) -> torch.tensor:
49
+ """Returns circular kernel.
50
+
51
+ :param size (int): kernel size
52
+ :param radius (float): radius of the circle
53
+ :return: circular kernel
54
+ """
55
+
56
+ w = size // 2
57
+ grid_val = torch.arange(-w, w+1, dtype=torch.float)
58
+ x, y = torch.meshgrid(grid_val, grid_val, indexing='ij')
59
+ kernel = torch.zeros(y.shape)
60
+ kernel[torch.sqrt(x**2 + y**2) <= radius] = 1
61
+ kernel /= torch.sum(kernel)
62
+ return kernel
63
+
64
+ def gabor_kernel(
65
+ frequency:float, sigma_x:float, sigma_y:float,
66
+ theta:float=0, offset:float=0, ks:int=61
67
+ ):
68
+ """Returns gabor kernel.
69
+
70
+ :param frequency (float): spatial frequency of gabor
71
+ :param sigma_x (float): standard deviation in x direction
72
+ :param sigma_y (float): standard deviation in y direction
73
+ :param theta (int, optional): Angle theta. Defaults to 0.
74
+ :param offset (int, optional): Offset. Defaults to 0.
75
+ :param ks (int, optional): Kernel size. Defaults to 61.
76
+ :return: np.ndarray: 2-dimensional Gabor kernel
77
+ """
78
+ w = ks // 2
79
+ grid_val = torch.arange(-w, w+1, dtype=torch.float)
80
+ x, y = torch.meshgrid(grid_val, grid_val)
81
+ rotx = x * np.cos(theta) + y * np.sin(theta)
82
+ roty = -x * np.sin(theta) + y * np.cos(theta)
83
+ g = torch.zeros(y.shape)
84
+ g[:] = torch.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
85
+ g /= 2 * np.pi * sigma_x * sigma_y
86
+ g *= torch.cos(2 * np.pi * frequency * rotx + offset)
87
+ return g
88
+
89
+ def generate_grating(
90
+ size:int, radius:float, sf:float, theta:float=0, phase:float=0,
91
+ contrast:float=1, gaussian_mask:bool=False
92
+ ) -> torch.tensor:
93
+ """Returns masked grating array.
94
+
95
+ :param size (int): kernel size
96
+ :param radius (float): standard deviation times sqrt(2) of the mask if gaussian_mask is True, and the radius if is false
97
+ :param sf (float): spatial frequency of the grating
98
+ :param theta (float, optional): angle of the grating
99
+ :param phase (float, optional): phase of the grating
100
+ :param gaussian_mask (bool, optional): mask is a Gaussian if true and a circle if false
101
+ :param contrast (float, optional): maximum contrast of the grating
102
+ :return: 2d masked grating array
103
+ """
104
+ grid_val = torch.linspace(-size//2, size//2+1, size, dtype=torch.float)
105
+ X, Y = torch.meshgrid(grid_val, grid_val, indexing='ij')
106
+ grating = torch.sin(2*math.pi*sf*(X*math.cos(theta) + Y*math.sin(theta)) + phase) * contrast
107
+ mask = torch.exp(-((X**2 + Y**2)/(2*(radius/np.sqrt(2))**2))) if gaussian_mask else torch.sqrt(X**2 + Y**2) <= radius
108
+ return grating * mask * .5 + .5
109
+
110
+
111
+ def sample_dist(hist:np.array, bins:int, ns:float, scale:str='linear'):
112
+ """Samples from distributions with different scales.
113
+
114
+ Args:
115
+ hist (np.array): histogram
116
+ bins (int): number of bins
117
+ ns (float): sample size
118
+ scale (str, optional): distribution scale. Defaults to 'linear'.
119
+
120
+ :returns rand_sample (np.array):
121
+ """
122
+ rand_sample = np.random.rand(ns)
123
+ if scale == 'linear':
124
+ rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), bins)
125
+ elif scale == 'log2':
126
+ rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log2(bins))
127
+ rand_sample = 2**rand_sample
128
+ elif scale == 'log10':
129
+ rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log10(bins))
130
+ rand_sample = 10**rand_sample
131
+ return rand_sample
132
+
133
+ def set_seed(seed):
134
+ """Enforces deterministic behaviour and sets RNG seed for numpy and pytorch.
135
+
136
+ :param seed (int): seed
137
+ """
138
+ random.seed(seed)
139
+ torch.manual_seed(seed)
140
+ torch.cuda.manual_seed_all(seed)
141
+ torch.backends.cudnn.deterministic = True
142
+
@@ -0,0 +1,46 @@
1
+ import functools
2
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
3
+ from brainscore_vision.model_helpers.check_submission import check_models
4
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
5
+ from brainscore_vision.model_helpers.s3 import load_weight_file
6
+ import torch
7
+ from .evnet.evnet import EVNet
8
+
9
+ def get_model(name):
10
+ assert name == 'resnet_50_2'
11
+ model = EVNet(
12
+ with_retinablock=False, with_voneblock=False,
13
+ model_arch='resnet50', image_size=224, num_classes=1000
14
+ )
15
+ weight_file = load_weight_file(
16
+ bucket="evnets-model-weights",
17
+ relative_path="resnet_50_1.pth",
18
+ sha1="aad77aaf2213858ef49c36b97d7b52855dc6e168",
19
+ version_id="null"
20
+ )
21
+ model.to(torch.device('cpu'))
22
+ checkpoint = torch.load(weight_file, map_location=torch.device('cpu'))
23
+ model.load_state_dict(checkpoint['model'])
24
+ preprocessing = functools.partial(
25
+ load_preprocess_images,
26
+ image_size=224,
27
+ normalize_mean=(.5,.5,.5),
28
+ normalize_std=(.5,.5,.5)
29
+ )
30
+ wrapper = PytorchWrapper(
31
+ identifier='resnet_50_2',
32
+ model=model, preprocessing=preprocessing
33
+ )
34
+ wrapper.image_size = 224
35
+ return wrapper
36
+
37
+ def get_layers(name):
38
+ assert name == 'resnet_50_2'
39
+ return ['model.conv1','model.layer1', 'model.layer2', 'model.layer3', 'model.layer4', 'model.fc']
40
+
41
+ def get_bibtex(model_identifier):
42
+ return """"""
43
+
44
+
45
+ if __name__ == '__main__':
46
+ check_models.check_base_models(__name__)
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "model.layer2",
3
+ "V2": "model.layer2",
4
+ "V4": "model.layer2",
5
+ "IT": "model.layer3"
6
+ }
@@ -0,0 +1,4 @@
1
+ torch
2
+ torchvision
3
+ numpy
4
+ scipy
@@ -0,0 +1,8 @@
1
+ import pytest
2
+ import brainscore_vision
3
+
4
+
5
+ @pytest.mark.travis_slow
6
+ def test_has_identifier():
7
+ model = brainscore_vision.load_model('resnet_50_2')
8
+ assert model.identifier == 'resnet_50_2'
@@ -16,9 +16,9 @@ def get_model(name):
16
16
  model_ctr = getattr(module, 'resnet50')
17
17
  model = model_ctr()
18
18
  preprocessing = functools.partial(load_preprocess_images, image_size=224)
19
- weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models",
19
+ weights_path = load_weight_file(bucket="brainscore-storage", folder_name="brainscore-vision/models",
20
20
  relative_path="resnet-50-robust/ImageNet.pt",
21
- version_id=".shHB0L_L9L3Mtco0Kf4EBP3Xj9nLKnC",
21
+ version_id="null",
22
22
  sha1="cc6e4441abc8ad6d2f4da5db84836e544bfb53fd")
23
23
  checkpoint = torch.load(weights_path, map_location=torch.device('cpu'))
24
24
 
@@ -0,0 +1 @@
1
+ {"V2": "layer4.0.downsample.0", "IT": "layer4.0.downsample.0", "V4": "layer3.0.downsample.0", "V1": "layer3.0.downsample.0"}
@@ -0,0 +1,5 @@
1
+ from brainscore_vision import model_registry
2
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
3
+ from .model import get_model, get_layers
4
+
5
+ model_registry['resnet_50_v1'] = lambda: ModelCommitment(identifier='resnet_50_v1', activations_model=get_model('resnet_50_v1'), layers=get_layers('resnet_50_v1'))
@@ -0,0 +1,42 @@
1
+ from torchvision.models import resnet50
2
+ from brainscore_vision.model_helpers.check_submission import check_models
3
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
4
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
5
+ from brainscore_vision.model_helpers.check_submission import check_models
6
+ import functools
7
+
8
+
9
+ model = resnet50(weights='IMAGENET1K_V1')
10
+
11
+ def get_model(name):
12
+ assert name == 'resnet_50_v1'
13
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
14
+ wrapper = PytorchWrapper(identifier='resnet_50_v1', model=model, preprocessing=preprocessing)
15
+ wrapper.image_size = 224
16
+ return wrapper
17
+
18
+ def get_layers(name):
19
+ assert name == 'resnet_50_v1'
20
+ units = [3, 4, 6, 3]
21
+ layer_names = ['conv1'] + [f'layer{block+1}.{unit}' for block, block_units in
22
+ enumerate(units) for unit in range(block_units)] + ['avgpool']
23
+ return layer_names
24
+
25
+
26
+ def get_bibtex(model_identifier):
27
+ assert model_identifier == 'resnet_50_v1'
28
+ return """
29
+ @inproceedings{he2016deep,
30
+ title={Deep residual learning for image recognition},
31
+ author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian},
32
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
33
+ pages={770--778},
34
+ year={2016}
35
+ }"""
36
+
37
+
38
+
39
+ if __name__ == '__main__':
40
+ # Use this method to ensure the correctness of the BaseModel implementations.
41
+ # It executes a mock run of brain-score benchmarks.
42
+ check_models.check_base_models(__name__)
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer2.3",
3
+ "V2": "layer3.0",
4
+ "V4": "layer2.3",
5
+ "IT": "layer4.0"
6
+ }
@@ -0,0 +1 @@
1
+ torchvision
@@ -0,0 +1,8 @@
1
+ import pytest
2
+ import brainscore_vision
3
+
4
+
5
+ @pytest.mark.travis_slow
6
+ def test_has_identifier():
7
+ model = brainscore_vision.load_model('resnet_50_v1')
8
+ assert model.identifier == 'resnet_50_v1'
@@ -0,0 +1,8 @@
1
+ from brainscore_vision import model_registry
2
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
3
+ from .model import get_model, get_layers
4
+
5
+
6
+ model_registry['resnet_50_v2'] = lambda: ModelCommitment(identifier='resnet_50_v2',
7
+ activations_model=get_model('resnet_50_v2'),
8
+ layers=get_layers('resnet_50_v2'))
@@ -0,0 +1,33 @@
1
+ import functools
2
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
3
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
4
+ import torchvision
5
+ import ssl
6
+
7
+
8
+ ssl._create_default_https_context = ssl._create_unverified_context
9
+
10
+ '''
11
+ This is a Pytorch implementation of resnet50.
12
+ The model template can be found at the following URL:
13
+ https://pytorch.org/vision/main/models/generated/torchvision.models.resnet50.html
14
+ '''
15
+
16
+ MODEL = torchvision.models.resnet50(weights='ResNet50_Weights.IMAGENET1K_V2') # use V2 weights
17
+
18
+
19
+ def get_model(name):
20
+ assert name == 'resnet_50_v2'
21
+ preprocessing = functools.partial(load_preprocess_images, image_size=224, preprocess_type='inception')
22
+ wrapper = PytorchWrapper(identifier=name, model=MODEL, preprocessing=preprocessing)
23
+ wrapper.image_size = 224
24
+ return wrapper
25
+
26
+
27
+ def get_layers(name):
28
+ assert name == 'resnet_50_v2'
29
+ layer_names = (['conv1'] + [f'layer1.{i}' for i in range(3)] +
30
+ [f'layer2.{i}' for i in range(4)] +
31
+ [f'layer3.{i}' for i in range(6)] +
32
+ [f'layer4.{i}' for i in range(3)] + ['avgpool'])
33
+ return layer_names
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer1.0",
3
+ "V2": "layer3.0",
4
+ "V4": "layer2.3",
5
+ "IT": "layer4.0"
6
+ }
@@ -0,0 +1,2 @@
1
+ torch
2
+ torchvision