brainscore-vision 2.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (1009) hide show
  1. brainscore_vision/__init__.py +105 -0
  2. brainscore_vision/__main__.py +20 -0
  3. brainscore_vision/benchmark_helpers/__init__.py +67 -0
  4. brainscore_vision/benchmark_helpers/neural_common.py +70 -0
  5. brainscore_vision/benchmark_helpers/properties_common.py +424 -0
  6. brainscore_vision/benchmark_helpers/screen.py +126 -0
  7. brainscore_vision/benchmark_helpers/test_helper.py +160 -0
  8. brainscore_vision/benchmarks/README.md +7 -0
  9. brainscore_vision/benchmarks/__init__.py +122 -0
  10. brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
  11. brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
  12. brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
  13. brainscore_vision/benchmarks/baker2022/test.py +90 -0
  14. brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
  15. brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
  16. brainscore_vision/benchmarks/bmd2024/test.py +29 -0
  17. brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
  18. brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
  19. brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
  20. brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
  21. brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
  22. brainscore_vision/benchmarks/cadena2017/test.py +35 -0
  23. brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
  24. brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
  25. brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
  26. brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
  27. brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
  28. brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
  29. brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
  30. brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
  31. brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
  32. brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
  33. brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
  34. brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
  35. brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
  36. brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
  37. brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
  38. brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
  39. brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
  40. brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
  41. brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
  42. brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
  43. brainscore_vision/benchmarks/hebart2023/test.py +19 -0
  44. brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
  45. brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
  46. brainscore_vision/benchmarks/hermann2020/test.py +28 -0
  47. brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
  48. brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
  49. brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
  50. brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
  51. brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
  52. brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
  53. brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
  54. brainscore_vision/benchmarks/imagenet/test.py +32 -0
  55. brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
  56. brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
  57. brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
  58. brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
  59. brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
  60. brainscore_vision/benchmarks/islam2021/test.py +47 -0
  61. brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
  62. brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
  63. brainscore_vision/benchmarks/kar2019/test.py +93 -0
  64. brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
  65. brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
  66. brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
  67. brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
  68. brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
  69. brainscore_vision/benchmarks/malania2007/test.py +64 -0
  70. brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
  71. brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
  72. brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
  73. brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
  74. brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
  75. brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
  76. brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
  77. brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
  78. brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
  79. brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
  80. brainscore_vision/benchmarks/marques2020/test.py +135 -0
  81. brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
  82. brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
  83. brainscore_vision/benchmarks/objectnet/test.py +33 -0
  84. brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
  85. brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
  86. brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
  87. brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
  88. brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
  89. brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
  90. brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
  91. brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
  92. brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
  93. brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
  94. brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
  95. brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
  96. brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
  97. brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
  98. brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
  99. brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
  100. brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
  101. brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
  102. brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
  103. brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
  104. brainscore_vision/benchmarks/scialom2024/test.py +162 -0
  105. brainscore_vision/data/__init__.py +0 -0
  106. brainscore_vision/data/baker2022/__init__.py +40 -0
  107. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
  108. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
  109. brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
  110. brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
  111. brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
  112. brainscore_vision/data/baker2022/test.py +135 -0
  113. brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
  114. brainscore_vision/data/barbumayo2019/__init__.py +23 -0
  115. brainscore_vision/data/barbumayo2019/test.py +10 -0
  116. brainscore_vision/data/bashivankar2019/__init__.py +52 -0
  117. brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
  118. brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
  119. brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
  120. brainscore_vision/data/bashivankar2019/test.py +15 -0
  121. brainscore_vision/data/bmd2024/__init__.py +69 -0
  122. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
  123. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
  124. brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
  125. brainscore_vision/data/bmd2024/test.py +130 -0
  126. brainscore_vision/data/bracci2019/__init__.py +36 -0
  127. brainscore_vision/data/bracci2019/data_packaging.py +221 -0
  128. brainscore_vision/data/bracci2019/test.py +16 -0
  129. brainscore_vision/data/cadena2017/__init__.py +52 -0
  130. brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
  131. brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
  132. brainscore_vision/data/cadena2017/test.py +24 -0
  133. brainscore_vision/data/cichy2019/__init__.py +38 -0
  134. brainscore_vision/data/cichy2019/test.py +8 -0
  135. brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
  136. brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
  137. brainscore_vision/data/coggan2024_behavior/test.py +32 -0
  138. brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
  139. brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
  140. brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
  141. brainscore_vision/data/david2004/__init__.py +34 -0
  142. brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
  143. brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
  144. brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
  145. brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
  146. brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
  147. brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
  148. brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
  149. brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
  150. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
  151. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
  152. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
  153. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
  154. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
  155. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
  156. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
  157. brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
  158. brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
  159. brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
  160. brainscore_vision/data/david2004/test.py +8 -0
  161. brainscore_vision/data/deng2009/__init__.py +22 -0
  162. brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
  163. brainscore_vision/data/deng2009/test.py +9 -0
  164. brainscore_vision/data/ferguson2024/__init__.py +401 -0
  165. brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
  166. brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
  167. brainscore_vision/data/ferguson2024/requirements.txt +2 -0
  168. brainscore_vision/data/ferguson2024/test.py +155 -0
  169. brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
  170. brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
  171. brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
  172. brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
  173. brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
  174. brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
  175. brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
  176. brainscore_vision/data/freemanziemba2013/test.py +97 -0
  177. brainscore_vision/data/geirhos2021/__init__.py +358 -0
  178. brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
  179. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
  180. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
  181. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
  182. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
  183. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
  184. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
  185. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
  186. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
  187. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
  188. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
  189. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
  190. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
  191. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
  192. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
  193. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
  194. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
  195. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
  196. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
  197. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
  198. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
  199. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
  200. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
  201. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
  202. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
  203. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
  204. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
  205. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
  206. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
  207. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
  208. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
  209. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
  210. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
  211. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
  212. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
  213. brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
  214. brainscore_vision/data/geirhos2021/test.py +330 -0
  215. brainscore_vision/data/hebart2023/__init__.py +23 -0
  216. brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
  217. brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
  218. brainscore_vision/data/hebart2023/test.py +42 -0
  219. brainscore_vision/data/hendrycks2019/__init__.py +45 -0
  220. brainscore_vision/data/hendrycks2019/test.py +26 -0
  221. brainscore_vision/data/igustibagus2024/__init__.py +23 -0
  222. brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
  223. brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
  224. brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
  225. brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
  226. brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
  227. brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
  228. brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
  229. brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
  230. brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
  231. brainscore_vision/data/igustibagus2024/test.py +26 -0
  232. brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
  233. brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
  234. brainscore_vision/data/imagenetslim15000/test.py +8 -0
  235. brainscore_vision/data/islam2021/__init__.py +18 -0
  236. brainscore_vision/data/islam2021/data_packaging.py +64 -0
  237. brainscore_vision/data/islam2021/test.py +11 -0
  238. brainscore_vision/data/kar2018/__init__.py +58 -0
  239. brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
  240. brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
  241. brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
  242. brainscore_vision/data/kar2018/test.py +10 -0
  243. brainscore_vision/data/kar2019/__init__.py +43 -0
  244. brainscore_vision/data/kar2019/data_packaging.py +116 -0
  245. brainscore_vision/data/kar2019/test.py +8 -0
  246. brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
  247. brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
  248. brainscore_vision/data/kuzovkin2018/test.py +8 -0
  249. brainscore_vision/data/majajhong2015/__init__.py +113 -0
  250. brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
  251. brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
  252. brainscore_vision/data/majajhong2015/test.py +38 -0
  253. brainscore_vision/data/malania2007/__init__.py +254 -0
  254. brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
  255. brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
  256. brainscore_vision/data/malania2007/test.py +147 -0
  257. brainscore_vision/data/maniquet2024/__init__.py +57 -0
  258. brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
  259. brainscore_vision/data/maniquet2024/test.py +16 -0
  260. brainscore_vision/data/marques2020/__init__.py +123 -0
  261. brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
  262. brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
  263. brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
  264. brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
  265. brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
  266. brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
  267. brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
  268. brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
  269. brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
  270. brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
  271. brainscore_vision/data/marques2020/test.py +54 -0
  272. brainscore_vision/data/rajalingham2018/__init__.py +56 -0
  273. brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
  274. brainscore_vision/data/rajalingham2018/test.py +10 -0
  275. brainscore_vision/data/rajalingham2020/__init__.py +39 -0
  276. brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
  277. brainscore_vision/data/rajalingham2020/test.py +8 -0
  278. brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
  279. brainscore_vision/data/rust2012/__init__.py +45 -0
  280. brainscore_vision/data/rust2012/rust305.py +35 -0
  281. brainscore_vision/data/rust2012/test.py +47 -0
  282. brainscore_vision/data/sanghavi2020/__init__.py +119 -0
  283. brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
  284. brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
  285. brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
  286. brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
  287. brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
  288. brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
  289. brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
  290. brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
  291. brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
  292. brainscore_vision/data/sanghavi2020/test.py +13 -0
  293. brainscore_vision/data/scialom2024/__init__.py +386 -0
  294. brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
  295. brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
  296. brainscore_vision/data/scialom2024/test.py +301 -0
  297. brainscore_vision/data/seibert2019/__init__.py +25 -0
  298. brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
  299. brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
  300. brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
  301. brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
  302. brainscore_vision/data/seibert2019/test.py +35 -0
  303. brainscore_vision/data/zhang2018/__init__.py +38 -0
  304. brainscore_vision/data/zhang2018/test.py +29 -0
  305. brainscore_vision/data_helpers/__init__.py +0 -0
  306. brainscore_vision/data_helpers/lookup_legacy.py +15 -0
  307. brainscore_vision/data_helpers/s3.py +79 -0
  308. brainscore_vision/metric_helpers/__init__.py +5 -0
  309. brainscore_vision/metric_helpers/temporal.py +119 -0
  310. brainscore_vision/metric_helpers/transformations.py +379 -0
  311. brainscore_vision/metric_helpers/utils.py +71 -0
  312. brainscore_vision/metric_helpers/xarray_utils.py +151 -0
  313. brainscore_vision/metrics/__init__.py +7 -0
  314. brainscore_vision/metrics/accuracy/__init__.py +4 -0
  315. brainscore_vision/metrics/accuracy/metric.py +16 -0
  316. brainscore_vision/metrics/accuracy/test.py +11 -0
  317. brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
  318. brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
  319. brainscore_vision/metrics/accuracy_distance/test.py +57 -0
  320. brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
  321. brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
  322. brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
  323. brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
  324. brainscore_vision/metrics/cka/__init__.py +14 -0
  325. brainscore_vision/metrics/cka/metric.py +105 -0
  326. brainscore_vision/metrics/cka/test.py +28 -0
  327. brainscore_vision/metrics/dimensionality/__init__.py +13 -0
  328. brainscore_vision/metrics/dimensionality/metric.py +45 -0
  329. brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
  330. brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
  331. brainscore_vision/metrics/distribution_similarity/test.py +10 -0
  332. brainscore_vision/metrics/error_consistency/__init__.py +13 -0
  333. brainscore_vision/metrics/error_consistency/metric.py +93 -0
  334. brainscore_vision/metrics/error_consistency/test.py +39 -0
  335. brainscore_vision/metrics/i1i2/__init__.py +16 -0
  336. brainscore_vision/metrics/i1i2/metric.py +299 -0
  337. brainscore_vision/metrics/i1i2/requirements.txt +2 -0
  338. brainscore_vision/metrics/i1i2/test.py +36 -0
  339. brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
  340. brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
  341. brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
  342. brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
  343. brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
  344. brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
  345. brainscore_vision/metrics/internal_consistency/test.py +39 -0
  346. brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
  347. brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
  348. brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
  349. brainscore_vision/metrics/mask_regression/__init__.py +16 -0
  350. brainscore_vision/metrics/mask_regression/metric.py +242 -0
  351. brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
  352. brainscore_vision/metrics/mask_regression/test.py +0 -0
  353. brainscore_vision/metrics/ost/__init__.py +23 -0
  354. brainscore_vision/metrics/ost/metric.py +350 -0
  355. brainscore_vision/metrics/ost/requirements.txt +2 -0
  356. brainscore_vision/metrics/ost/test.py +0 -0
  357. brainscore_vision/metrics/rdm/__init__.py +14 -0
  358. brainscore_vision/metrics/rdm/metric.py +101 -0
  359. brainscore_vision/metrics/rdm/requirements.txt +2 -0
  360. brainscore_vision/metrics/rdm/test.py +63 -0
  361. brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
  362. brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
  363. brainscore_vision/metrics/regression_correlation/metric.py +125 -0
  364. brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
  365. brainscore_vision/metrics/regression_correlation/test.py +36 -0
  366. brainscore_vision/metrics/threshold/__init__.py +5 -0
  367. brainscore_vision/metrics/threshold/metric.py +481 -0
  368. brainscore_vision/metrics/threshold/test.py +71 -0
  369. brainscore_vision/metrics/value_delta/__init__.py +4 -0
  370. brainscore_vision/metrics/value_delta/metric.py +30 -0
  371. brainscore_vision/metrics/value_delta/requirements.txt +1 -0
  372. brainscore_vision/metrics/value_delta/test.py +40 -0
  373. brainscore_vision/model_helpers/__init__.py +3 -0
  374. brainscore_vision/model_helpers/activations/__init__.py +1 -0
  375. brainscore_vision/model_helpers/activations/core.py +635 -0
  376. brainscore_vision/model_helpers/activations/pca.py +117 -0
  377. brainscore_vision/model_helpers/activations/pytorch.py +152 -0
  378. brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
  379. brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
  380. brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
  381. brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
  382. brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
  383. brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
  384. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
  385. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
  386. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
  387. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
  388. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
  389. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
  390. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
  391. brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
  392. brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
  393. brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
  394. brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
  395. brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
  396. brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
  397. brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
  398. brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
  399. brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
  400. brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
  401. brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
  402. brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
  403. brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
  404. brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
  405. brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
  406. brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
  407. brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
  408. brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
  409. brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
  410. brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
  411. brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
  412. brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
  413. brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
  414. brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
  415. brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
  416. brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
  417. brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
  418. brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
  419. brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
  420. brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
  421. brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
  422. brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
  423. brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
  424. brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
  425. brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
  426. brainscore_vision/model_helpers/conftest.py +3 -0
  427. brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
  428. brainscore_vision/model_helpers/s3.py +62 -0
  429. brainscore_vision/model_helpers/utils/__init__.py +15 -0
  430. brainscore_vision/model_helpers/utils/s3.py +42 -0
  431. brainscore_vision/model_interface.py +214 -0
  432. brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
  433. brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
  434. brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
  435. brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
  436. brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
  437. brainscore_vision/models/AlexNet_SIN/model.py +29 -0
  438. brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
  439. brainscore_vision/models/AlexNet_SIN/test.py +1 -0
  440. brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
  441. brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
  442. brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
  443. brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
  444. brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
  445. brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
  446. brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
  447. brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
  448. brainscore_vision/models/__init__.py +0 -0
  449. brainscore_vision/models/alexnet/__init__.py +8 -0
  450. brainscore_vision/models/alexnet/model.py +28 -0
  451. brainscore_vision/models/alexnet/requirements.txt +2 -0
  452. brainscore_vision/models/alexnet/test.py +15 -0
  453. brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
  454. brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
  455. brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
  456. brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
  457. brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
  458. brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
  459. brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
  460. brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
  461. brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
  462. brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
  463. brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
  464. brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
  465. brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
  466. brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
  467. brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
  468. brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
  469. brainscore_vision/models/alexnet_testing/__init__.py +8 -0
  470. brainscore_vision/models/alexnet_testing/model.py +28 -0
  471. brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
  472. brainscore_vision/models/alexnet_testing/setup.py +24 -0
  473. brainscore_vision/models/alexnet_testing/test.py +15 -0
  474. brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
  475. brainscore_vision/models/antialias_resnet152/model.py +35 -0
  476. brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
  477. brainscore_vision/models/antialias_resnet152/test.py +8 -0
  478. brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
  479. brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
  480. brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
  481. brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
  482. brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
  483. brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
  484. brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
  485. brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
  486. brainscore_vision/models/clip/__init__.py +5 -0
  487. brainscore_vision/models/clip/model.py +179 -0
  488. brainscore_vision/models/clip/requirements.txt +4 -0
  489. brainscore_vision/models/clip/test.py +1 -0
  490. brainscore_vision/models/clipvision/__init__.py +5 -0
  491. brainscore_vision/models/clipvision/model.py +179 -0
  492. brainscore_vision/models/clipvision/requirements.txt +4 -0
  493. brainscore_vision/models/clipvision/test.py +1 -0
  494. brainscore_vision/models/cornet_s/__init__.py +8 -0
  495. brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
  496. brainscore_vision/models/cornet_s/model.py +77 -0
  497. brainscore_vision/models/cornet_s/requirements.txt +7 -0
  498. brainscore_vision/models/cornet_s/test.py +8 -0
  499. brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
  500. brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
  501. brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
  502. brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
  503. brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
  504. brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
  505. brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
  506. brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
  507. brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
  508. brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
  509. brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
  510. brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
  511. brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
  512. brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
  513. brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
  514. brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
  515. brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
  516. brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
  517. brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
  518. brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
  519. brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
  520. brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
  521. brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
  522. brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
  523. brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
  524. brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
  525. brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
  526. brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
  527. brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
  528. brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
  529. brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
  530. brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
  531. brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
  532. brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
  533. brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
  534. brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
  535. brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
  536. brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
  537. brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
  538. brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
  539. brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
  540. brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
  541. brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
  542. brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
  543. brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
  544. brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
  545. brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
  546. brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
  547. brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
  548. brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
  549. brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
  550. brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
  551. brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
  552. brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
  553. brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
  554. brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
  555. brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
  556. brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
  557. brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
  558. brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
  559. brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
  560. brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
  561. brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
  562. brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
  563. brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
  564. brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
  565. brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
  566. brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
  567. brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
  568. brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
  569. brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
  570. brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
  571. brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
  572. brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
  573. brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
  574. brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
  575. brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
  576. brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
  577. brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
  578. brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
  579. brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
  580. brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
  581. brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
  582. brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
  583. brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
  584. brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
  585. brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
  586. brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
  587. brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
  588. brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
  589. brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
  590. brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
  591. brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
  592. brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
  593. brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
  594. brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
  595. brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
  596. brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
  597. brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
  598. brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
  599. brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
  600. brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
  601. brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
  602. brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
  603. brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
  604. brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
  605. brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
  606. brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
  607. brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
  608. brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
  609. brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
  610. brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
  611. brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
  612. brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
  613. brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
  614. brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
  615. brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
  616. brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
  617. brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
  618. brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
  619. brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
  620. brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
  621. brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
  622. brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
  623. brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
  624. brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
  625. brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
  626. brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
  627. brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
  628. brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
  629. brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
  630. brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
  631. brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
  632. brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
  633. brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
  634. brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
  635. brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
  636. brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
  637. brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
  638. brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
  639. brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
  640. brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
  641. brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
  642. brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
  643. brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
  644. brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
  645. brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
  646. brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
  647. brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
  648. brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
  649. brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
  650. brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
  651. brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
  652. brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
  653. brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
  654. brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
  655. brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
  656. brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
  657. brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
  658. brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
  659. brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
  660. brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
  661. brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
  662. brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
  663. brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
  664. brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
  665. brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
  666. brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
  667. brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
  668. brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
  669. brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
  670. brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
  671. brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
  672. brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
  673. brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
  674. brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
  675. brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
  676. brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
  677. brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
  678. brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
  679. brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
  680. brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
  681. brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
  682. brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
  683. brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
  684. brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
  685. brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
  686. brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
  687. brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
  688. brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
  689. brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
  690. brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
  691. brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
  692. brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
  693. brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
  694. brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
  695. brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
  696. brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
  697. brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
  698. brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
  699. brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
  700. brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
  701. brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
  702. brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
  703. brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
  704. brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
  705. brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
  706. brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
  707. brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
  708. brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
  709. brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
  710. brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
  711. brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
  712. brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
  713. brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
  714. brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
  715. brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
  716. brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
  717. brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
  718. brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
  719. brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
  720. brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
  721. brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
  722. brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
  723. brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
  724. brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
  725. brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
  726. brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
  727. brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
  728. brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
  729. brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
  730. brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
  731. brainscore_vision/models/effnetb1_272x240/model.py +126 -0
  732. brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
  733. brainscore_vision/models/effnetb1_272x240/test.py +9 -0
  734. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
  735. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
  736. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
  737. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
  738. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
  739. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
  740. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
  741. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
  742. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
  743. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
  744. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
  745. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
  746. brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
  747. brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
  748. brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
  749. brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
  750. brainscore_vision/models/hmax/__init__.py +7 -0
  751. brainscore_vision/models/hmax/helpers/hmax.py +438 -0
  752. brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
  753. brainscore_vision/models/hmax/model.py +69 -0
  754. brainscore_vision/models/hmax/requirements.txt +5 -0
  755. brainscore_vision/models/hmax/test.py +8 -0
  756. brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
  757. brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
  758. brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
  759. brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
  760. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
  761. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
  762. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
  763. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
  764. brainscore_vision/models/mobilevit_small/__init__.py +7 -0
  765. brainscore_vision/models/mobilevit_small/model.py +49 -0
  766. brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
  767. brainscore_vision/models/mobilevit_small/test.py +8 -0
  768. brainscore_vision/models/pixels/__init__.py +8 -0
  769. brainscore_vision/models/pixels/model.py +35 -0
  770. brainscore_vision/models/pixels/test.py +15 -0
  771. brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
  772. brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
  773. brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
  774. brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
  775. brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
  776. brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
  777. brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
  778. brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
  779. brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
  780. brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
  781. brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
  782. brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
  783. brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
  784. brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
  785. brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
  786. brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
  787. brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
  788. brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
  789. brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
  790. brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
  791. brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
  792. brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
  793. brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
  794. brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
  795. brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
  796. brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
  797. brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
  798. brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
  799. brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
  800. brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
  801. brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
  802. brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
  803. brainscore_vision/models/r50_tvpt/__init__.py +9 -0
  804. brainscore_vision/models/r50_tvpt/model.py +47 -0
  805. brainscore_vision/models/r50_tvpt/setup.py +24 -0
  806. brainscore_vision/models/r50_tvpt/test.py +1 -0
  807. brainscore_vision/models/regnet/__init__.py +14 -0
  808. brainscore_vision/models/regnet/model.py +17 -0
  809. brainscore_vision/models/regnet/requirements.txt +2 -0
  810. brainscore_vision/models/regnet/test.py +17 -0
  811. brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
  812. brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
  813. brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
  814. brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
  815. brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
  816. brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
  817. brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
  818. brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
  819. brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
  820. brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
  821. brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
  822. brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
  823. brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
  824. brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
  825. brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
  826. brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
  827. brainscore_vision/models/resnet50_julios/__init__.py +5 -0
  828. brainscore_vision/models/resnet50_julios/model.py +54 -0
  829. brainscore_vision/models/resnet50_julios/setup.py +24 -0
  830. brainscore_vision/models/resnet50_julios/test.py +1 -0
  831. brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
  832. brainscore_vision/models/resnet50_tutorial/model.py +34 -0
  833. brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
  834. brainscore_vision/models/resnet50_tutorial/test.py +8 -0
  835. brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
  836. brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
  837. brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
  838. brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
  839. brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
  840. brainscore_vision/models/resnet_50_robust/model.py +55 -0
  841. brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
  842. brainscore_vision/models/resnet_50_robust/test.py +8 -0
  843. brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
  844. brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
  845. brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
  846. brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
  847. brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
  848. brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
  849. brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
  850. brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
  851. brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
  852. brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
  853. brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
  854. brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
  855. brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
  856. brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
  857. brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
  858. brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
  859. brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
  860. brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
  861. brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
  862. brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
  863. brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
  864. brainscore_vision/models/temporal_model_GDT/model.py +72 -0
  865. brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
  866. brainscore_vision/models/temporal_model_GDT/test.py +17 -0
  867. brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
  868. brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
  869. brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
  870. brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
  871. brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
  872. brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
  873. brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
  874. brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
  875. brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
  876. brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
  877. brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
  878. brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
  879. brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
  880. brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
  881. brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
  882. brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
  883. brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
  884. brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
  885. brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
  886. brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
  887. brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
  888. brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
  889. brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
  890. brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
  891. brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
  892. brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
  893. brainscore_vision/models/temporal_model_openstl/model.py +206 -0
  894. brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
  895. brainscore_vision/models/temporal_model_openstl/test.py +19 -0
  896. brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
  897. brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
  898. brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
  899. brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
  900. brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
  901. brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
  902. brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
  903. brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
  904. brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
  905. brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
  906. brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
  907. brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
  908. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
  909. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
  910. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
  911. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
  912. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
  913. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
  914. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
  915. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
  916. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
  917. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
  918. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
  919. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
  920. brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
  921. brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
  922. brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
  923. brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
  924. brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
  925. brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
  926. brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
  927. brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
  928. brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
  929. brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
  930. brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
  931. brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
  932. brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
  933. brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
  934. brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
  935. brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
  936. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
  937. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
  938. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
  939. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
  940. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
  941. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
  942. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
  943. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
  944. brainscore_vision/submission/__init__.py +0 -0
  945. brainscore_vision/submission/actions_helpers.py +153 -0
  946. brainscore_vision/submission/config.py +7 -0
  947. brainscore_vision/submission/endpoints.py +58 -0
  948. brainscore_vision/utils/__init__.py +91 -0
  949. brainscore_vision-2.1.dist-info/LICENSE +11 -0
  950. brainscore_vision-2.1.dist-info/METADATA +152 -0
  951. brainscore_vision-2.1.dist-info/RECORD +1009 -0
  952. brainscore_vision-2.1.dist-info/WHEEL +5 -0
  953. brainscore_vision-2.1.dist-info/top_level.txt +4 -0
  954. docs/Makefile +20 -0
  955. docs/source/conf.py +78 -0
  956. docs/source/index.rst +21 -0
  957. docs/source/modules/api_reference.rst +10 -0
  958. docs/source/modules/benchmarks.rst +8 -0
  959. docs/source/modules/brainscore_submission.png +0 -0
  960. docs/source/modules/developer_clarifications.rst +36 -0
  961. docs/source/modules/metrics.rst +8 -0
  962. docs/source/modules/model_interface.rst +8 -0
  963. docs/source/modules/submission.rst +112 -0
  964. docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
  965. docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
  966. docs/source/modules/tutorial_screenshots/init_py.png +0 -0
  967. docs/source/modules/tutorial_screenshots/mms.png +0 -0
  968. docs/source/modules/tutorial_screenshots/setup.png +0 -0
  969. docs/source/modules/tutorial_screenshots/sms.png +0 -0
  970. docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
  971. docs/source/modules/utils.rst +22 -0
  972. migrations/2020-12-20_pkl_to_nc.py +90 -0
  973. tests/__init__.py +6 -0
  974. tests/conftest.py +26 -0
  975. tests/test_benchmark_helpers/__init__.py +0 -0
  976. tests/test_benchmark_helpers/test_screen.py +75 -0
  977. tests/test_examples.py +41 -0
  978. tests/test_integration.py +43 -0
  979. tests/test_metric_helpers/__init__.py +0 -0
  980. tests/test_metric_helpers/test_temporal.py +80 -0
  981. tests/test_metric_helpers/test_transformations.py +171 -0
  982. tests/test_metric_helpers/test_xarray_utils.py +85 -0
  983. tests/test_model_helpers/__init__.py +6 -0
  984. tests/test_model_helpers/activations/__init__.py +0 -0
  985. tests/test_model_helpers/activations/test___init__.py +404 -0
  986. tests/test_model_helpers/brain_transformation/__init__.py +0 -0
  987. tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
  988. tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
  989. tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
  990. tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
  991. tests/test_model_helpers/temporal/__init__.py +0 -0
  992. tests/test_model_helpers/temporal/activations/__init__.py +0 -0
  993. tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
  994. tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
  995. tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
  996. tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
  997. tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
  998. tests/test_model_helpers/temporal/test_utils.py +61 -0
  999. tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
  1000. tests/test_model_helpers/test_imports.py +10 -0
  1001. tests/test_model_helpers/test_s3.py +38 -0
  1002. tests/test_models.py +15 -0
  1003. tests/test_stimuli.py +0 -0
  1004. tests/test_submission/__init__.py +0 -0
  1005. tests/test_submission/mock_config.py +3 -0
  1006. tests/test_submission/test_actions_helpers.py +67 -0
  1007. tests/test_submission/test_db.py +54 -0
  1008. tests/test_submission/test_endpoints.py +125 -0
  1009. tests/test_utils.py +21 -0
@@ -0,0 +1,849 @@
1
+ import itertools
2
+ import math
3
+ import pickle
4
+ import importlib
5
+
6
+ import brainscore
7
+
8
+ import xarray as xr
9
+ from PIL import Image
10
+ from pixelmatch.contrib.PIL import pixelmatch
11
+
12
+ from brainio.assemblies import NeuroidAssembly
13
+ from pathlib import Path
14
+
15
+ import copy
16
+ import numpy as np
17
+ import pandas as pd
18
+ from brainscore.benchmarks.screen import place_on_screen
19
+ from candidate_models.base_models import cornet
20
+
21
+ from model_tools.activations import PytorchWrapper
22
+
23
+ from sklearn import preprocessing
24
+ from sklearn.model_selection import train_test_split
25
+ from model_tools.brain_transformation import ModelCommitment
26
+ from tqdm import tqdm
27
+
28
+
29
+ SPLIT_NUMBER = 100
30
+ MAX_NUM_NEURONS = 71
31
+ HVM_TEST_IMAGES_NUM = 30
32
+ OOD_TEST_IMAGES_NUM = 30
33
+
34
+ CATEGORIES = ['apple', 'bear', 'bird', 'car', 'chair', 'dog', 'elephant', 'face', 'plane', 'zebra']
35
+ SILHOUETTE_DOMAINS = ['convex_hull', 'outline', 'skeleton', 'silhouette']
36
+
37
+ List_all_models = ['resnext101_32x16d_wsl', 'resnext101_32x32d_wsl', 'resnext101_32x48d_wsl', 'resnext101_32x8d_wsl']
38
+
39
+
40
+ ############################################################
41
+ # Loading functions: brain model specific
42
+ ############################################################
43
+
44
+ def get_brainmodel(identifier, penultimate_layer:False):
45
+ '''
46
+ Load brain model from the correct source
47
+ Arguments:
48
+ identifier: Architecture name of brain model
49
+ penultimate_layer: boolean (True: take the penultimate layer, False: take the IT layer of model,
50
+ Returns:
51
+ brain_model: brain model scaffold
52
+ '''
53
+ if identifier in ['resnet50-barlow', 'custom_model_cv_18_dagger_408', 'efficientnet-b6', 'ViT_L_32_imagenet1k', 'ViT_L_16_imagenet1k', 'r3m_resnet34', 'r3m_resnet50']:
54
+ identifier_package_mapping = {'resnet50-barlow': 'resnet_selfsup_submission', 'custom_model_cv_18_dagger_408': 'crossvit_18_dagger_408_finetuned',
55
+ 'efficientnet-b6': 'efficientnet_models', 'ViT_L_32_imagenet1k': 'ViT', 'ViT_L_16_imagenet1k': 'ViT',
56
+ 'r3m_resnet34': 'r3m_main', 'r3m_resnet50': 'r3m_main'}
57
+ packagename = identifier_package_mapping[identifier]
58
+ module = importlib.import_module(f"{packagename}.models.base_models")
59
+ get_submission_model = getattr(module, "get_model")
60
+ get_submission_layers = getattr(module, "get_layers")
61
+ basemodel = get_submission_model(identifier)
62
+ layers = get_submission_layers(identifier)
63
+ brain_model = ModelCommitment(identifier=identifier, activations_model=basemodel, layers=layers)
64
+ return brain_model
65
+
66
+ if (identifier == 'CORnet-S') & (penultimate_layer == True): #TODO: Does this work?
67
+ # only do this when choosing penultimate layer, *not* when choosing IT layer
68
+ basemodel = cornet(identifier)
69
+ basemodel = PytorchWrapper(model=basemodel._model, preprocessing=basemodel._extractor.preprocess)
70
+ brain_model = ModelCommitment(identifier=identifier, activations_model=basemodel, layers=['decoder.avgpool'])
71
+ return brain_model
72
+ brain_model = brain_translated_pool[identifier]
73
+ return brain_model
74
+
75
+
76
+ def retrieve_activations_from_brainmodel(brain_model, image_source: str, penultimate_layer) -> NeuroidAssembly:
77
+ '''
78
+ Returns a xarray DataArray with two dimensions stimulus_path and neuroid as well as the additional metadata layer on the neuroid
79
+
80
+ Arguments:
81
+ brain_model: Architecture name of brain model
82
+ image_source: images for model activation
83
+ penultimate_layer: either None if IT layer is chosen or the desired penultimate layer name
84
+
85
+ Returns:
86
+ activations(NeuroidAssembly xarray): activated brain model with the desired image source and layer
87
+ '''
88
+ # Get stimulus set for images
89
+ stimulus_set = brainscore.get_stimulus_set(image_source)
90
+ # Reshape images for brain model
91
+ stimset = place_on_screen(stimulus_set, brain_model.visual_degrees(), 8)
92
+ if penultimate_layer != None:
93
+ # Define "recording area" in brain model
94
+ brain_model.layer_model.region_layer_map['IT'] = penultimate_layer
95
+ brain_model.start_recording('IT', time_bins=[(70, 170)])
96
+ # Activate the brain model with given image dataset
97
+ activations = brain_model.look_at(stimset)
98
+ # Reduce to 2d array, deleting time_bin dimension
99
+ activations = activations.squeeze()
100
+ # Reshape the dimensions
101
+ activations = activations.transpose('presentation', 'neuroid')
102
+
103
+ if image_source == 'dicarlo.domain_transfer':
104
+ # Delete unwanted sources
105
+ activations = activations.where(activations.stimulus_source != 'GeirhosOOD', drop=True)
106
+ activations = activations.where(activations.stimulus_source != 'CueConflict', drop=True)
107
+ activations = activations.where(activations.stimulus_source != 'ObjectNet', drop=True)
108
+
109
+ return activations
110
+
111
+ def get_brain_model_activation(brain_model_name, image_source, penultimate_layer_boolean=False):
112
+ '''
113
+ Activates brain model with respective image source and the respective layer.
114
+
115
+ Arguments:
116
+ brain_model_name: Architecture name of brain model
117
+ image_source: images for model activation
118
+ penultimate_layer: boolean (True: take the penultimate layer, False: take the IT layer of model,
119
+ Returns:
120
+ brain_model_activations (NeuroidAssembly xarray): activated brain model with the desired image source and layer (xarray)
121
+ '''
122
+ # Activate brain model with image dataset
123
+ brain_model = get_brainmodel(brain_model_name, penultimate_layer_boolean)
124
+ # Activate the disred layer
125
+ if penultimate_layer_boolean:
126
+ penultimate_layer = brain_model.layers[-1]
127
+ brain_model_activations = retrieve_activations_from_brainmodel(brain_model, image_source, penultimate_layer)
128
+ else:
129
+ brain_model_activations = retrieve_activations_from_brainmodel(brain_model, image_source, penultimate_layer=None)
130
+ return brain_model_activations
131
+
132
+ def loading_brain_model_activation(brain_model_name, image_source, penultimate_layer): #TODO:Correct?
133
+ '''
134
+ Loads brain model activation and adds background ids to each of the HVM-like images (Silhouette images).
135
+
136
+ Arguments:
137
+ brain_model_name: Architecture name of brain model
138
+ image_source: images for model activation
139
+ penultimate_layer: boolean (True: take the penultimate layer, False: take the IT layer of model,
140
+ Returns:
141
+ domain_transfer_data
142
+ '''
143
+ brain_model_activation = get_brain_model_activation(brain_model_name, image_source, penultimate_layer)
144
+ hvm_data, rest_data, non_silhouette_data = load_silhouette_data(data=brain_model_activation)
145
+ domain_transfer_data = create_background_ids(hvm_data, rest_data, non_silhouette_data)
146
+ return domain_transfer_data
147
+
148
+
149
+ ############################################################
150
+ ############################################################
151
+ ############################################################
152
+
153
+
154
+ def get_brain_model_performance(brain_model_name: str, image_source: str, estimator, image_arry, penultimate_layer: False, split_num):
155
+ '''
156
+ Returns a pandas dataframe with brain model performance for full image and neurons range. Performance is averaged over split_num of splits.
157
+
158
+ Arguments:
159
+ brain_model_name: Architecture name of brain model
160
+ image_source: images for model activation
161
+ estimator: Classifier for decoder
162
+ image_arry: array with number of training images,
163
+ penultimate_layer: boolean (True: take the penultimate layer, False: take the IT layer of model,
164
+ split_num: number of splits to average over
165
+
166
+ Returns:
167
+ save pandas dataframe with full image & neuron range performance. Saves dataframe for each split and the averaged performance over all splits
168
+ Split dataframe columns: #Neurons, #Images training, Accuracy test data
169
+ Averaged dataframe columns: #Neurons, #Images training, Accuracy test data, Std test data
170
+ '''
171
+ brain_model_activations = loading_brain_model_activation(brain_model_name, image_source, penultimate_layer)
172
+ # Calculate performance
173
+ get_performance_splits_and_average(brain_model_activations=brain_model_activations, num_images_arry=image_arry, num_splits=split_num,
174
+ estimator=estimator, brain_model_name=brain_model_name)
175
+ # get_performance_splits_and_average_single_image(brain_model_activations=brain_model_activations, num_images=MAX_NUM_IMAGES, num_splits=SPLIT_NUMBER,
176
+ # estimator=estimator, brain_model_name=brain_model_name, num_primate_it_neurons_scaling_factor_matching=NEURONS)
177
+ print(f'{brain_model_name} brain model performance was saved')
178
+
179
+ #################################################
180
+ #################################################
181
+ #################################################
182
+ #################################################
183
+ # Functions overlapping with hvm_crossdomain
184
+ #################################################
185
+
186
+ def create_background_ids(hvm_data, rest_data, non_silhouette_data):
187
+ '''
188
+ Domain-transfer data is loaded and hvm-like images get their respective background ids assigned based on their matching hvm images.
189
+
190
+ Arguments:
191
+ hvm_data: HVM data,
192
+ rest_data: HVM-like (Silhouette) data,
193
+ non_silhouette_data: non-HVM-like data (non Silhouette)
194
+
195
+ Returns:
196
+ domain_transfer_data: full data with an additional column: background_ids which indicates the matching background between hvm and hvm-like images
197
+ '''
198
+ # Add background_ids to hvm images
199
+ hvm_data = hvm_data.assign_coords(background_id=('presentation', np.arange(1, 121)))
200
+ non_silhouette_data = non_silhouette_data.assign_coords(
201
+ background_id=('presentation', np.zeros(len(non_silhouette_data))))
202
+
203
+ # Loop through each category to find the respective images
204
+ for category in tqdm(CATEGORIES, desc='looping categories'):
205
+ hvm_category = hvm_data[hvm_data['object_label'] == category]
206
+ oods_category = rest_data[rest_data['object_label'] == category]
207
+ # Find the matching backgrounds in hvm images
208
+ background_ids = find_matching_background(oods_category, hvm_category)
209
+ # Store the background ids in NeuronAssembly
210
+ oods_category = oods_category.assign_coords(background_id=('presentation', background_ids))
211
+ category_data = xr.concat((hvm_category, oods_category), dim='presentation')
212
+ # Concate all categories together
213
+ if category == 'apple':
214
+ full_data = copy.copy(category_data)
215
+ else:
216
+ full_data = xr.concat((full_data, category_data), dim='presentation')
217
+ domain_transfer_data = xr.concat((full_data, non_silhouette_data), dim='presentation')
218
+
219
+ return domain_transfer_data
220
+
221
+
222
+ def find_matching_background(oods_category, hvm_category):
223
+ '''
224
+ hvm and hvm-like images share the same background. To identify similar backgrounds images are compared pixel-wise to each over.
225
+ Images that share the most overlapp are then labeled with the same background id as the respective hvm-image.
226
+
227
+ Arguments:
228
+ oods_category (NeuronRecordingAssembly): all images from one single hvm-like domain without background id,
229
+ hvm_category (NeuronRecordingAssembly): hvm images with background id
230
+
231
+ Returns:
232
+ background_ids: list of matching background ids for the single hvm-like domian
233
+ '''
234
+ background_ids = []
235
+ # Find the respective background id from hvm images for each OOD image
236
+ #'https://brainio.dicarlo.s3.amazonaws.com/assy_dicarlo_Sanghavi2021_domain_transfer.nc,8c6a02348ca892d75a83a6ffa0551e098e1edae0,dicarlo.domain_transferdicarlo.Marques2020_size,stimulus_set,StimulusSet,S3,https://brainio.dicarlo.s3.amazonaws.com/image_dicarlo_Marques2020_size.csv,0fd0aeea8fa6ff2b30ee9a6a684d4600590d631f'
237
+
238
+ data_path = Path(__file__).parent / 'Sanghavi-domain_transfer-data/image_dicarlo_domain_transfer'
239
+ oods_category_image_file_path = oods_category.filename
240
+
241
+ for ood_image in tqdm(oods_category_image_file_path, desc='looping images'):
242
+ image_filename = ood_image.item()
243
+ image_path = str(data_path / image_filename)
244
+ image_ood = Image.open(image_path)
245
+
246
+ hvm_image_file_path = hvm_category.filename
247
+ for hvm_image, hvm_background_id in zip(hvm_image_file_path, hvm_category.background_id):
248
+ image_filename = hvm_image.item()
249
+ image_path = str(data_path / image_filename)
250
+ image_hvm = Image.open(image_path)
251
+ mismatch = pixelmatch(image_hvm, image_ood)
252
+ if mismatch <= 20000:
253
+ background_ids.append(hvm_background_id.item())
254
+ break
255
+ else:
256
+ pass
257
+ return background_ids
258
+
259
+
260
+ def load_silhouette_data(data):
261
+ '''
262
+ Separating domain-transfer data into hvm, hvm-like (silhouette) and rest (non-silhouette) data. This separation is needed to give each hvm-like
263
+ image the same background number as its respective hvm version (images are sharing the same background).
264
+
265
+ Arguements:
266
+ data: full data that is going to be split into hvm, hvm-like (silhouette) and rest (non-silhouette) data
267
+
268
+ Returns:
269
+ hvm_data (NeuronRecordingAssembly): hvm data
270
+ rest_data (NeuronRecordingAssembly): hvm-like data
271
+ non_silhouette_style_data (NeuronRecordingAssembly): rest data
272
+ '''
273
+ try:
274
+ silhouette_style_data = data[data['identifier'] == 'Silhouette']
275
+ non_silhouette_style_data = data[data['identifier'] != 'Silhouette']
276
+ except:
277
+ silhouette_style_data = data[data['stimulus_source'] == 'Silhouette']
278
+ non_silhouette_style_data = data[data['stimulus_source'] != 'Silhouette']
279
+ hvm_data = silhouette_style_data[silhouette_style_data['object_style'] == 'original']
280
+ rest_data = silhouette_style_data[silhouette_style_data['object_style'] != 'original']
281
+
282
+ return hvm_data, rest_data, non_silhouette_style_data
283
+
284
+ def get_single_domain_data(data, image_source_in_domain, object_style_in_domain):
285
+ '''
286
+ Filters the data for a single domain
287
+
288
+ Arguments:
289
+ data: NeuronRecordingAssembly xarray
290
+ image_source_in_domain (str): Image source of wanted domain
291
+ object_style_in_domain (str): Image style of wanted domain
292
+
293
+ Returns:
294
+ domain_data: data for single domain
295
+ '''
296
+ # Get domain data
297
+ if image_source_in_domain in ['Art', 'Silhouette']:
298
+ try:
299
+ domain_data = data.where((data.identifier == image_source_in_domain) & (data.object_style == object_style_in_domain), drop=True)
300
+ except:
301
+ domain_data = data.where((data.stimulus_source == image_source_in_domain) & (data.object_style == object_style_in_domain), drop=True)
302
+
303
+ else:
304
+ try:
305
+ domain_data = data.where(data.identifier == image_source_in_domain, drop=True)
306
+ except:
307
+ domain_data = data.where(data.stimulus_source == image_source_in_domain, drop=True)
308
+
309
+ return domain_data
310
+
311
+ def get_crossdomain_data_dictionary(domain_transfer_data):
312
+ '''
313
+ Create a dictionary with each crossdomain data as key and its data as values
314
+
315
+ Arguments:
316
+ domain_transfer_data (NeuronRecordingAssembly): complete dataset
317
+
318
+ Returns:
319
+ dictionary with each crossdomain data as key and its data as values
320
+ '''
321
+ # Create dictionary
322
+ crossdomain_data_dict = {}
323
+ crossdomains = ['original', 'cartoon', 'line_drawing', 'mosaic', 'painting', 'sketch', 'convex_hull', 'outline', 'skeleton', 'silhouette', 'cococolor', 'cocogray', 'tdw']
324
+ crossdomain_image_source = ['Silhouette', 'Art', 'Art', 'Art', 'Art', 'Art', 'Silhouette', 'Silhouette', 'Silhouette', 'Silhouette', 'COCOColor', 'COCOGray', 'TDW']
325
+ for image_source, object_style in zip(crossdomain_image_source, crossdomains):
326
+ crossdomain_data = get_single_domain_data(data=domain_transfer_data, image_source_in_domain=image_source, object_style_in_domain=object_style)
327
+ if object_style == 'original':
328
+ crossdomain_data_dict['hvm'] = crossdomain_data
329
+ else:
330
+ crossdomain_data_dict[object_style] = crossdomain_data
331
+
332
+ return crossdomain_data_dict
333
+
334
+
335
+ def get_crossdomain_dataframes(single_neuron_image=False):
336
+ '''
337
+ Creates a dictionary with each crossdomain data as key and an empty dataframe as value
338
+
339
+ Arguments:
340
+ single_neuron_image: boolean (True: add additional column with split number, False: no additional column)
341
+
342
+ Returns:
343
+ dictionary with each crossdomain data as key and an empty dataframe as value. Columns are #Neurons, #Images training, Accuracy test data
344
+ '''
345
+ dataframe_dict = {}
346
+ # Create dataframe
347
+ if not single_neuron_image:
348
+ df = pd.DataFrame(columns=['#Neurons', '#Images training', 'Accuracy test data'])
349
+ else:
350
+ df = pd.DataFrame(columns=['#Neurons', '#Images training', 'Accuracy test data', 'Split number'])
351
+
352
+ crossdomains = ['hvm', 'cartoon', 'line_drawing', 'mosaic', 'painting', 'sketch', 'convex_hull', 'outline', 'skeleton', 'silhouette', 'cococolor', 'cocogray', 'tdw']
353
+ for crossdomain in crossdomains:
354
+ dataframe_dict[crossdomain] = copy.copy(df)
355
+ return dataframe_dict
356
+
357
+ def split_training_test_images(crossdomain_data_dictionary):
358
+ '''
359
+ Splits data into training data pool and test images. Make sure that background id of testing hvm and training non-hvm images are not identical.
360
+
361
+ Arguments:
362
+ crossdomain_data_dictionary (dict): dictionary which contains each crossdomain as key and respective NeuronRecordingAssembly as values
363
+
364
+ Returns:
365
+ crossdomain_test_data_dictionary (dict): dictionary with each crossdomain as key and respective test images (NeuronRecordingAssembly) as values
366
+ training_images (NeuronRecordingAssembly): training images pool. Contains only HVM images
367
+ '''
368
+ # Create crossdomain testing images dictionary
369
+ crossdomain_test_data_dictionary = {}
370
+ # Loop through each crossdomain and seed a random subset of 50 images for testing
371
+ for crossdomain in crossdomain_data_dictionary.keys():
372
+ crossdomain_data = crossdomain_data_dictionary[crossdomain]
373
+ if crossdomain == 'hvm':
374
+ test_images, training_images = reduce_data_num_images(data_complete=crossdomain_data, number_images=HVM_TEST_IMAGES_NUM)
375
+ background_ids_silhouette_img = test_images.background_id.values
376
+
377
+ elif crossdomain in SILHOUETTE_DOMAINS:
378
+ test_indices = np.where(np.in1d(crossdomain_data.background_id, background_ids_silhouette_img))
379
+ test_images = crossdomain_data[test_indices]
380
+ else:
381
+ test_images, _ = reduce_data_num_images(data_complete=crossdomain_data, number_images=OOD_TEST_IMAGES_NUM)
382
+ crossdomain_test_data_dictionary[crossdomain] = test_images
383
+
384
+ return crossdomain_test_data_dictionary, training_images
385
+
386
+ def reduce_data_num_images(data_complete, number_images):
387
+ '''
388
+ Draws a randomly seeded subset of data while making sure that each object category is represented equally
389
+
390
+ Arguments:
391
+ data_complete (NeuronRecordingAssembly): complete dataset
392
+ number_images (int): number of images for training dataset
393
+
394
+ Returns:
395
+ stratified_training_data (NeuronRecordingAssembly): training data with equal number of each object category
396
+ rest_data (NeuronRecordingAssembly): remaining data from complete data - training data
397
+ '''
398
+ if number_images == len(data_complete):
399
+ place_holder = None
400
+ return data_complete, place_holder
401
+ else:
402
+ try:
403
+ stratified_training_data, rest_data = train_test_split(data_complete, train_size=number_images, stratify=data_complete.object_label)
404
+ except:
405
+ stratified_training_data, rest_data = train_test_split(data_complete, train_size=number_images, stratify=data_complete.category_name)
406
+
407
+ return stratified_training_data, rest_data
408
+
409
+
410
+ def get_final_traning_data(complete_training_data, num_images_training, num_neurons):
411
+ '''
412
+ Draws final traning images and neurons for one split.
413
+
414
+ Arguments:
415
+ complete_training_data (dict with NeuronRecordingAssembly): keys: domain names, values: complete training data pool for one split,
416
+ num_images_training: desired number of training images,
417
+ num_neurons: desired number of training neurons
418
+
419
+ Returns:
420
+ final_traning_data (dict with NeuronRecordingAssembly): keys: domain names, values: final training data for this split,
421
+ neuron_indices: training neurons indices (is used to align with the neurons in testing data)
422
+ '''
423
+ # Draw random subset of images from training data
424
+ final_training_images, _ = reduce_data_num_images(data_complete=complete_training_data, number_images=num_images_training)
425
+ # Draw random subset of neurons
426
+ final_traning_data, neuron_indices = reduce_data_num_neurons(data=final_training_images, num_neurons=num_neurons)
427
+ return final_traning_data, neuron_indices
428
+
429
+ def reduce_data_num_neurons(data, num_neurons):
430
+ '''
431
+ Reduces the number of neurons in data by randomly drawing neuron ids from complete dataset
432
+
433
+ Arguments:
434
+ data: (NeuronRecordingAssembly) complete dataset
435
+ num_neurons: (int) wanted number of neurons that data shoulbe be reduced to
436
+
437
+ Returns:
438
+ reduced_neurons_num_data: data with reduced number of neurons
439
+ random_indices_neurons: indices of neurons in reduced_neurons_num_data
440
+
441
+ '''
442
+ # Seed random numbers
443
+ random_indices_neurons = np.random.choice(len(data.neuroid), num_neurons, replace=False)
444
+ # Select only the random chosen neurons for training and testing data
445
+ reduced_neurons_num_data = data[:, random_indices_neurons]
446
+ return reduced_neurons_num_data, random_indices_neurons
447
+
448
+ def get_decoder(data, estimator):
449
+ '''
450
+ Trains decoder.
451
+
452
+ Arguments:
453
+ data: (NeuronRecordingAssembly) xarray
454
+ estimator: (sklearn classifier function) Estimator e.g. RidgeClassifierCV, ElasticNetCV etc.
455
+
456
+ Returns:
457
+ clf: trained decoder
458
+ '''
459
+ # Get input & output data
460
+ X = data.data
461
+ try:
462
+ y = data.object_label.data
463
+ except:
464
+ y = data.category_name.data
465
+
466
+ # Get estimator
467
+ clf = copy.copy(estimator) # Ridge Regression CV
468
+
469
+ try:
470
+ clf.fit(X, y)
471
+ except:
472
+ binary_label = preprocessing.LabelBinarizer()
473
+ y = binary_label.fit_transform(y)
474
+ clf.fit(X, y)
475
+
476
+
477
+
478
+ return clf
479
+
480
+ def get_final_testing_data(crossdomain_test_images_dictionary, neuron_indices):
481
+ '''
482
+ Reduce testing data to the correct (number of) neurons.
483
+
484
+ Arguments:
485
+ crossdomain_test_images_dictionary (dict with NeuronRecordingAssembly): key: domain names, values: training data,
486
+ neuron_indices: indices of desired neurons
487
+
488
+ Returns:
489
+ crossdomain_test_images_dictionary_final: (dict with NeuronRecordingAssembly): key: domain names, values: final training data with the correct neurons
490
+ '''
491
+ crossdomain_test_images_dictionary_final = {}
492
+ for crossdomain in crossdomain_test_images_dictionary.keys():
493
+ crossdomain_test_images_dictionary_final[crossdomain] = crossdomain_test_images_dictionary[crossdomain][:, neuron_indices]
494
+ return crossdomain_test_images_dictionary_final
495
+
496
+ def add_accuracies_to_split_df(final_test_data_dictionary, decoder, split_dataframe, num_neurons, num_training_images):
497
+ '''
498
+ Fill split dataframe with decoder performance and correct number of training images and neurons that had been used in this split
499
+ Arguments:
500
+ final_test_data_dictionary (dict with NeuronRecordingAssembly): key: domain names, values: final training data with the correct neurons,
501
+ decoder: trained decoder,
502
+ split_dataframe (dict): keys: domain names, values: dataframe with columns: #Neurons, #Images training, Accuracy test data,
503
+ num_neurons: number of training neurons,
504
+ num_training_images: number of training images
505
+
506
+ Retruns:
507
+ split_dataframe (dict): keys: domain names, values: dataframe with columns: #Neurons, #Images training, Accuracy test data
508
+ '''
509
+ # Get and store the test accuracy for each crossdomain
510
+ for crossdomain in final_test_data_dictionary.keys():
511
+ test_accuracy = get_classifier_score_2AFC(classifier=decoder, data=final_test_data_dictionary[crossdomain])
512
+ crossdomain_df = split_dataframe[crossdomain]
513
+ # Fill dataframe
514
+ crossdomain_df = crossdomain_df.append({
515
+ '#Neurons': num_neurons,
516
+ '#Images training': num_training_images,
517
+ 'Accuracy test data': test_accuracy
518
+ }, ignore_index=True)
519
+ split_dataframe[crossdomain] = crossdomain_df
520
+
521
+ return split_dataframe
522
+
523
+
524
+ def get_classifier_score_2AFC(classifier, data):
525
+ '''
526
+ Calculates the 2AFC score
527
+
528
+ Arguments:
529
+ classifier: pre-trained classifier
530
+ data (NeuronRecordingAssembly): test data
531
+
532
+ Returns:
533
+ 2AFC score
534
+ '''
535
+ # Get input & output data
536
+ X = data.data
537
+ try:
538
+ y = data.object_label.data
539
+ except:
540
+ y = data.category_name.data
541
+
542
+ categories = np.unique(y)
543
+ number_of_categories = len(categories)
544
+ predict_probs = classifier.decision_function(X)
545
+ scores = np.zeros(len(y))
546
+ indices_row = np.arange(len(y))
547
+ indices_column = np.arange(len(categories))
548
+
549
+ for indx in indices_row:
550
+ category_index = np.where(categories == y[indx])
551
+ sum = 0
552
+ indx_column = np.delete(indices_column, category_index)
553
+ for idx in indx_column:
554
+ if predict_probs[indx, category_index] > predict_probs[indx, idx]:
555
+ sum = sum + 1
556
+ else:
557
+ continue
558
+
559
+ score = sum / (number_of_categories - 1)
560
+ scores[indx] = score
561
+
562
+ avg_score = np.mean(scores)
563
+ return avg_score
564
+
565
+
566
+ #################################################
567
+ #################################################
568
+ #################################################
569
+ #################################################
570
+ #################################################
571
+ # Brain model speficic functions
572
+ #################################################
573
+
574
+
575
+ def get_performance_splits_and_average(brain_model_activations, num_images_arry, num_splits, estimator, brain_model_name):
576
+ '''Saves the real dataframes for each crossdomain and split, the fitted extrapolation parameters for each and the averaged real data performance'''
577
+
578
+ # Check dimensionality of NeuroAssembly
579
+ assert set(brain_model_activations.dims) == {'presentation', 'neuroid'}
580
+ # Secure reproducibility of data
581
+ np.random.seed(42)
582
+ # Load data: get a dctionary with all crossdomain data
583
+ crossdomain_data_dict = get_crossdomain_data_dictionary(domain_transfer_data=brain_model_activations)
584
+ # Load dataframes for each crossdomain
585
+ crossdomain_dataframes = get_crossdomain_dataframes()
586
+ # Get the correct neuron_array for each brain_model
587
+ num_neurons_arry = create_power_of_two_array_neurons(brain_model_activations=brain_model_activations) #TODO: Undo the #
588
+ # num_neurons_arry = np.asarray((1, 3, 5, 10, 20, 30, 40, 50, 71))
589
+
590
+ # Loop through the splits
591
+ for split in np.arange(num_splits):
592
+ # Create in each split a new dataframe and save this one
593
+ split_crossdomain_dataframes = get_crossdomain_dataframes()
594
+ # Get new test images for each split, want to keep the test images consistent for one split over all images x neurons rounds
595
+ crossdomain_test_images_dict, complete_training_data = split_training_test_images(crossdomain_data_dictionary=crossdomain_data_dict) # TODO: test if the background ids are identical for all Silhouette images in test data
596
+
597
+ # Loop through the number of neurons
598
+ for num_neurons, num_images_train in tqdm(itertools.product(num_neurons_arry, num_images_arry), desc='Neuron & image round'):
599
+ # Sample final training data with the right number of neurons & images
600
+ final_training_data, neuron_indices = get_final_traning_data(complete_training_data=complete_training_data, num_images_training=num_images_train,
601
+ num_neurons=num_neurons)
602
+ # Train the decoder #
603
+ split_decoder = get_decoder(data=final_training_data, estimator=estimator)
604
+
605
+ # Get the final testing data with the correct number of neurons
606
+ final_test_data_dict = get_final_testing_data(crossdomain_test_images_dictionary=crossdomain_test_images_dict, neuron_indices=neuron_indices)
607
+ # Get the test accuracy and store it in the split dataframe
608
+ split_crossdomain_dataframes = add_accuracies_to_split_df(final_test_data_dictionary=final_test_data_dict, decoder=split_decoder,
609
+ split_dataframe=split_crossdomain_dataframes, num_neurons=num_neurons, num_training_images=num_images_train)
610
+ #TODO: correct number of training test images?
611
+ crossdomain_dataframes = save_split_dataframes(split_crossdomain_dataframes=split_crossdomain_dataframes, crossdomain_dataframes=crossdomain_dataframes, split=split,
612
+ brain_model_name=brain_model_name)
613
+
614
+ save_split_averaged_dataframes(crossdomain_dataframes=crossdomain_dataframes, neurons_array=num_neurons_arry, images_array=num_images_arry, brain_model_name=brain_model_name)
615
+
616
+
617
+ ########################
618
+ # Other functions that are brain model specific
619
+ ########################
620
+ def create_power_of_two_array_neurons(brain_model_activations):
621
+ max_number = len(brain_model_activations.neuroid)
622
+ # Get the potenzial for power of 2 and round the number down
623
+ potenzial = math.floor(np.log2(max_number))
624
+ # Create an power of two array until max number
625
+ power_of_two_array = 2 ** np.arange(potenzial+1)
626
+
627
+ # Add the max number of neurons to the array
628
+ if power_of_two_array[-1] != max_number:
629
+ power_of_two_array = np.append(power_of_two_array, max_number)
630
+
631
+ return power_of_two_array
632
+
633
+ ############################################################
634
+ # Saving functions
635
+ ############################################################
636
+ def save_dataframe(dataframe, csv_dataframe_name):
637
+ savepath = Path(__file__).parent / 'dataframes_new_models' / csv_dataframe_name #TODO: undo folder name
638
+ dataframe.to_csv(savepath)
639
+ print(f"Saved to {savepath}")
640
+
641
+ def save_dictionary(dictionary, pkl_filename):
642
+ with open(pkl_filename, 'wb') as file:
643
+ # A new file will be created
644
+ pickle.dump(dictionary, file)
645
+
646
+ def open_pkl(filename_pkl):
647
+ with open(filename_pkl, 'rb') as f:
648
+ dictionary = pickle.load(f)
649
+ return dictionary
650
+
651
+ ####################################
652
+ # Brain model specific saving functions
653
+ ####################################
654
+
655
+ def save_split_dataframes(split_crossdomain_dataframes, crossdomain_dataframes, split, brain_model_name):
656
+ #def save_split_dataframes(split_crossdomain_dataframes, crossdomain_dataframes, split, brain_model_name,primate_it_num_neurons):
657
+ '''
658
+ Concats each split dataframe together to get on single dataframe at the end with all performances over multiple splits and save the current split dataframe
659
+
660
+ Arguments:
661
+ split_crossdomain_dataframes (dict): keys: domain name, values: dataframes with performance for each #Neurons x #Images combination
662
+ crossdomain_dataframes (dict): keys: domain name, values: dataframes with performance for each #Neurons x #Images combination stored over multiple splits
663
+ split: number of split
664
+ brain_model_name: name of brain model
665
+ Returns:
666
+ saves split dataframe for each domain
667
+ crossdomain_dataframes (dict): keys: domain name, values: dataframes with performance for each #Neurons x #Images combination stored over multiple splits
668
+
669
+ '''
670
+ for crossdomain in split_crossdomain_dataframes.keys():
671
+ crossdomain_dataframes[crossdomain] = pd.concat([crossdomain_dataframes[crossdomain], split_crossdomain_dataframes[crossdomain]], ignore_index=True)
672
+ # save_dataframe(dataframe=split_crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_split_{split}_num_neurons_primate.csv')
673
+ # save_dataframe(dataframe=split_crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_split_{split}_penultimate_layer.csv')
674
+ # save_dataframe(dataframe=split_crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_split_{split}_penultimate_layer_multiple_neurons_{primate_it_num_neurons}_neuron_match.csv')
675
+ save_dataframe(dataframe=split_crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_split_{split}_it_layer.csv')
676
+
677
+ return crossdomain_dataframes
678
+
679
+ def save_split_averaged_dataframes(crossdomain_dataframes, neurons_array, images_array, brain_model_name):
680
+ '''
681
+ Saves dataframe with perfromance averaged over multiple splits for each domain.
682
+
683
+ Arguments:
684
+ crossdomain_dataframes (dict): keys: domain name, values: dataframes with performance for each #Neurons x #Images combination stored over multiple splits
685
+ neurons_array: array with the number of training neurons over all splits
686
+ images_array: array with the number of training images over all splits
687
+ brain_model_name: name of brain model
688
+ Returns:
689
+ saves averaged performance dataframe for each domain
690
+ '''
691
+ # Average over all splits. Get mean and standard deviation
692
+ crossdomain_dataframes_averaged = get_crossdomain_dataframes()
693
+ for crossdomain in crossdomain_dataframes.keys():
694
+ crossdomain_dataframes_averaged[crossdomain]['#Neurons'] = np.repeat(neurons_array, len(images_array))
695
+ crossdomain_dataframes_averaged[crossdomain]['#Images training'] = np.tile(images_array, len(neurons_array))
696
+ crossdomain_dataframes_averaged[crossdomain]['Accuracy test data'] = crossdomain_dataframes[crossdomain].groupby(['#Neurons', '#Images training']).mean().values
697
+ crossdomain_dataframes_averaged[crossdomain]['Std test data'] = crossdomain_dataframes[crossdomain].groupby(['#Neurons', '#Images training']).std().values
698
+ # save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_num_neurons_primate.csv')
699
+ # save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_penultimate_layer.csv')
700
+ # save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_penultimate_layer_multiple_neurons.csv')
701
+ save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_it_layer.csv')
702
+
703
+
704
+ ##############################################################
705
+ ##############################################################
706
+ ##############################################################
707
+ ##############################################################
708
+ ##############################################################
709
+ ##############################################################
710
+ ##############################################################
711
+ ##############################################################
712
+
713
+ def get_neuron_array_for_single_img(brain_model_name, brain_model_activation, primate_it_number_of_neurons):
714
+ brain_model_scaling_factor = get_scaling_factor_num_neurons(brain_model=brain_model_name, primate_it_num_neurons=primate_it_number_of_neurons)
715
+ scaling_factor_multiplier = brain_model_scaling_factor/primate_it_number_of_neurons
716
+ neuron_arry = np.asarray((10, 20, 30, 40, 50, 71))
717
+ num_neurons_arry = neuron_arry * scaling_factor_multiplier
718
+ round_up = np.vectorize(math.ceil)
719
+ num_neurons_arry = round_up(num_neurons_arry)
720
+ max_num_neurons_brain_model = len(brain_model_activation.neuroid)
721
+ if num_neurons_arry[-1] > max_num_neurons_brain_model:
722
+ num_neurons_arry[-1] = max_num_neurons_brain_model
723
+ else:
724
+ num_neurons_arry = np.append(num_neurons_arry, max_num_neurons_brain_model)
725
+
726
+ return num_neurons_arry
727
+
728
+ def get_performance_splits_and_average_single_image(brain_model_activations, num_images, num_splits, estimator, brain_model_name, num_primate_it_neurons_scaling_factor_matching):
729
+ '''Saves the real dataframes for each crossdomain and split, the fitted extrapolation parameters for each and the averaged real data performance'''
730
+
731
+ # Check dimensionality of NeuroAssembly
732
+ assert set(brain_model_activations.dims) == {'presentation', 'neuroid'}
733
+
734
+ # Load data: get a dctionary with all crossdomain data
735
+ crossdomain_data_dict = get_crossdomain_data_dictionary(brain_model_activations)
736
+ # Load dataframes for each crossdomain
737
+ crossdomain_dataframes = get_crossdomain_dataframes()
738
+ # Get the correct neuron_array for each brain_model
739
+ num_neurons_arry = get_neuron_array_for_single_img(brain_model_name, brain_model_activations, primate_it_number_of_neurons=num_primate_it_neurons_scaling_factor_matching)
740
+
741
+ # Loop through splits
742
+ for split in np.arange(num_splits):
743
+ # Create in each split a new dataframe and save this one
744
+ split_crossdomain_dataframes = get_crossdomain_dataframes()
745
+ # Get new test images for each split, want to keep the test images consistent for one split over all images x neurons rounds
746
+ crossdomain_test_images_dict, complete_training_data = split_training_test_images(crossdomain_data_dictionary=crossdomain_data_dict)
747
+
748
+ # Loop through the number of neurons
749
+ for num_neurons in tqdm(num_neurons_arry, desc='Neurons'):
750
+ # Round the number of units up
751
+ num_neurons = math.ceil(num_neurons)
752
+ # Sample final training data with the right number of neurons & images
753
+ final_training_data, neuron_indices = get_final_traning_data(complete_training_data=complete_training_data, num_images_training=num_images,
754
+ num_neurons=num_neurons)
755
+ # Train the decoder #
756
+ split_decoder, _ = get_decoder(data=final_training_data, estimator=estimator)
757
+ # Get the final testing data with the correct number of neurons
758
+ final_test_data_dict = get_final_testing_data(crossdomain_test_images_dictionary=crossdomain_test_images_dict, neuron_indices=neuron_indices)
759
+ # Get the test accuracy and store it in the split dataframe
760
+ split_crossdomain_dataframes = add_accuracies_to_split_df(final_test_data_dictionary=final_test_data_dict, decoder=split_decoder,
761
+ split_dataframe=split_crossdomain_dataframes, num_neurons=num_neurons, num_training_images=num_images)
762
+
763
+ crossdomain_dataframes = save_split_dataframes(split_crossdomain_dataframes=split_crossdomain_dataframes, crossdomain_dataframes=crossdomain_dataframes, split=split,
764
+ brain_model_name=brain_model_name, primate_it_num_neurons=num_primate_it_neurons_scaling_factor_matching)
765
+
766
+ save_split_averaged_dataframes_single_image(crossdomain_dataframes=crossdomain_dataframes, neurons_array=num_neurons_arry, image_num=num_images, brain_model_name=brain_model_name, primate_it_num_neurons=num_primate_it_neurons_scaling_factor_matching)
767
+
768
+ def get_scaling_factor_num_neurons(brain_model, primate_it_num_neurons):
769
+ if primate_it_num_neurons == None:
770
+ neuron_dict = open_pkl(filename_pkl='Deep_nets_crossdomain_performance_scaling_factors_penultimate_layer.pkl')
771
+ else:
772
+ neuron_dict = open_pkl(filename_pkl=f'Deep_nets_crossdomain_performance_scaling_factors_penultimate_layer_{primate_it_num_neurons}_neuron_match.pkl')
773
+ brain_model_num_neurons = neuron_dict[brain_model]
774
+ return brain_model_num_neurons
775
+
776
+
777
+ def get_performance_splits_and_average_single_neuron_image(brain_model_activations, num_images, num_splits, estimator, brain_model_name, num_primate_it_neurons_for_scaling_factor_match):
778
+ '''Saves the real dataframes for each crossdomain and split, the fitted extrapolation parameters for each and the averaged real data performance'''
779
+ # Check dimensionality of NeuroAssembly
780
+ assert set(brain_model_activations.dims) == {'presentation', 'neuroid'}
781
+
782
+ # Load data: get a dctionary with all crossdomain data
783
+ crossdomain_data_dict = get_crossdomain_data_dictionary(brain_model_activations)
784
+ # Load dataframes for each crossdomain
785
+ crossdomain_dataframes = get_crossdomain_dataframes(single_neuron_image=True)
786
+ # Get the correct neuron_array for each brain_model
787
+ num_neurons = get_scaling_factor_num_neurons(brain_model=brain_model_name, primate_it_num_neurons=num_primate_it_neurons_for_scaling_factor_match)
788
+
789
+ # Loop through the kfold splits
790
+ for split in np.arange(num_splits):
791
+ # Get new test images for each split
792
+ crossdomain_test_images_dict, complete_training_data = split_training_test_images(crossdomain_data_dictionary=crossdomain_data_dict)
793
+
794
+ # Sample final training data with the right number of neurons & images
795
+ final_training_data, neuron_indices = get_final_traning_data(complete_training_data=complete_training_data, num_images_training=num_images,
796
+ num_neurons=num_neurons)
797
+ # Train the decoder #
798
+ split_decoder, _ = get_decoder(data=final_training_data, estimator=estimator)
799
+ # Get the final testing data with the correct number of neurons
800
+ final_test_data_dict = get_final_testing_data(crossdomain_test_images_dictionary=crossdomain_test_images_dict, neuron_indices=neuron_indices)
801
+
802
+ # Get the test accuracy and store it in the split dataframe
803
+ crossdomain_dataframes = add_accuracies_to_split_df_single_neuron_image(final_test_data_dictionary=final_test_data_dict, decoder=split_decoder,
804
+ split_dataframe=crossdomain_dataframes, num_neurons=num_neurons, num_training_images=num_images,
805
+ split_num=split)
806
+
807
+ save_dataframes_single_neuron_image(crossdomain_dataframes=crossdomain_dataframes, brain_model_name=brain_model_name)
808
+
809
+ def save_dataframes_single_neuron_image(crossdomain_dataframes, brain_model_name):
810
+ for crossdomain in crossdomain_dataframes.keys():
811
+ save_dataframe(dataframe=crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_scaling_factor_penultimate_layer_all_splits.csv')
812
+
813
+
814
+
815
+
816
+
817
+ def save_split_averaged_dataframes_single_image(crossdomain_dataframes, neurons_array, image_num, brain_model_name, primate_it_num_neurons):
818
+ # Average over all splits. Get mean and standard deviation
819
+ crossdomain_dataframes_averaged = get_crossdomain_dataframes()
820
+ for crossdomain in crossdomain_dataframes.keys():
821
+ crossdomain_dataframes_averaged[crossdomain]['#Neurons'] = neurons_array
822
+ crossdomain_dataframes_averaged[crossdomain]['#Images training'] = np.repeat(image_num, repeats=len(neurons_array))
823
+ crossdomain_dataframes_averaged[crossdomain]['Accuracy test data'] = crossdomain_dataframes[crossdomain].groupby(['#Neurons', '#Images training']).mean().values
824
+ crossdomain_dataframes_averaged[crossdomain]['Std test data'] = crossdomain_dataframes[crossdomain].groupby(['#Neurons', '#Images training']).std().values
825
+ save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_penultimate_layer_multiple_neurons_{primate_it_num_neurons}_neuron_match.csv')
826
+
827
+
828
+ ####################################
829
+ # Data handling functions
830
+ #################################
831
+
832
+ def add_accuracies_to_split_df_single_neuron_image(final_test_data_dictionary, decoder, split_dataframe, num_neurons, num_training_images, split_num):
833
+ # Get and store the test accuracy for each crossdomain
834
+ for crossdomain in final_test_data_dictionary.keys():
835
+ test_accuracy = get_classifier_score_2AFC(classifier=decoder, data=final_test_data_dictionary[crossdomain])
836
+ crossdomain_df = split_dataframe[crossdomain]
837
+ # Fill dataframe
838
+ crossdomain_df = crossdomain_df.append({
839
+ '#Neurons': num_neurons,
840
+ '#Images training': num_training_images,
841
+ 'Accuracy test data': test_accuracy,
842
+ 'Split number': split_num
843
+ }, ignore_index=True)
844
+ split_dataframe[crossdomain] = crossdomain_df
845
+
846
+ return split_dataframe
847
+
848
+
849
+