brainscore-vision 2.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (1009) hide show
  1. brainscore_vision/__init__.py +105 -0
  2. brainscore_vision/__main__.py +20 -0
  3. brainscore_vision/benchmark_helpers/__init__.py +67 -0
  4. brainscore_vision/benchmark_helpers/neural_common.py +70 -0
  5. brainscore_vision/benchmark_helpers/properties_common.py +424 -0
  6. brainscore_vision/benchmark_helpers/screen.py +126 -0
  7. brainscore_vision/benchmark_helpers/test_helper.py +160 -0
  8. brainscore_vision/benchmarks/README.md +7 -0
  9. brainscore_vision/benchmarks/__init__.py +122 -0
  10. brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
  11. brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
  12. brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
  13. brainscore_vision/benchmarks/baker2022/test.py +90 -0
  14. brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
  15. brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
  16. brainscore_vision/benchmarks/bmd2024/test.py +29 -0
  17. brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
  18. brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
  19. brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
  20. brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
  21. brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
  22. brainscore_vision/benchmarks/cadena2017/test.py +35 -0
  23. brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
  24. brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
  25. brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
  26. brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
  27. brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
  28. brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
  29. brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
  30. brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
  31. brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
  32. brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
  33. brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
  34. brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
  35. brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
  36. brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
  37. brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
  38. brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
  39. brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
  40. brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
  41. brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
  42. brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
  43. brainscore_vision/benchmarks/hebart2023/test.py +19 -0
  44. brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
  45. brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
  46. brainscore_vision/benchmarks/hermann2020/test.py +28 -0
  47. brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
  48. brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
  49. brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
  50. brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
  51. brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
  52. brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
  53. brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
  54. brainscore_vision/benchmarks/imagenet/test.py +32 -0
  55. brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
  56. brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
  57. brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
  58. brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
  59. brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
  60. brainscore_vision/benchmarks/islam2021/test.py +47 -0
  61. brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
  62. brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
  63. brainscore_vision/benchmarks/kar2019/test.py +93 -0
  64. brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
  65. brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
  66. brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
  67. brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
  68. brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
  69. brainscore_vision/benchmarks/malania2007/test.py +64 -0
  70. brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
  71. brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
  72. brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
  73. brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
  74. brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
  75. brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
  76. brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
  77. brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
  78. brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
  79. brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
  80. brainscore_vision/benchmarks/marques2020/test.py +135 -0
  81. brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
  82. brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
  83. brainscore_vision/benchmarks/objectnet/test.py +33 -0
  84. brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
  85. brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
  86. brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
  87. brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
  88. brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
  89. brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
  90. brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
  91. brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
  92. brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
  93. brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
  94. brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
  95. brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
  96. brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
  97. brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
  98. brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
  99. brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
  100. brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
  101. brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
  102. brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
  103. brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
  104. brainscore_vision/benchmarks/scialom2024/test.py +162 -0
  105. brainscore_vision/data/__init__.py +0 -0
  106. brainscore_vision/data/baker2022/__init__.py +40 -0
  107. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
  108. brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
  109. brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
  110. brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
  111. brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
  112. brainscore_vision/data/baker2022/test.py +135 -0
  113. brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
  114. brainscore_vision/data/barbumayo2019/__init__.py +23 -0
  115. brainscore_vision/data/barbumayo2019/test.py +10 -0
  116. brainscore_vision/data/bashivankar2019/__init__.py +52 -0
  117. brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
  118. brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
  119. brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
  120. brainscore_vision/data/bashivankar2019/test.py +15 -0
  121. brainscore_vision/data/bmd2024/__init__.py +69 -0
  122. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
  123. brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
  124. brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
  125. brainscore_vision/data/bmd2024/test.py +130 -0
  126. brainscore_vision/data/bracci2019/__init__.py +36 -0
  127. brainscore_vision/data/bracci2019/data_packaging.py +221 -0
  128. brainscore_vision/data/bracci2019/test.py +16 -0
  129. brainscore_vision/data/cadena2017/__init__.py +52 -0
  130. brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
  131. brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
  132. brainscore_vision/data/cadena2017/test.py +24 -0
  133. brainscore_vision/data/cichy2019/__init__.py +38 -0
  134. brainscore_vision/data/cichy2019/test.py +8 -0
  135. brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
  136. brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
  137. brainscore_vision/data/coggan2024_behavior/test.py +32 -0
  138. brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
  139. brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
  140. brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
  141. brainscore_vision/data/david2004/__init__.py +34 -0
  142. brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
  143. brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
  144. brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
  145. brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
  146. brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
  147. brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
  148. brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
  149. brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
  150. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
  151. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
  152. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
  153. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
  154. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
  155. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
  156. brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
  157. brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
  158. brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
  159. brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
  160. brainscore_vision/data/david2004/test.py +8 -0
  161. brainscore_vision/data/deng2009/__init__.py +22 -0
  162. brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
  163. brainscore_vision/data/deng2009/test.py +9 -0
  164. brainscore_vision/data/ferguson2024/__init__.py +401 -0
  165. brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
  166. brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
  167. brainscore_vision/data/ferguson2024/requirements.txt +2 -0
  168. brainscore_vision/data/ferguson2024/test.py +155 -0
  169. brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
  170. brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
  171. brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
  172. brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
  173. brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
  174. brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
  175. brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
  176. brainscore_vision/data/freemanziemba2013/test.py +97 -0
  177. brainscore_vision/data/geirhos2021/__init__.py +358 -0
  178. brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
  179. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
  180. brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
  181. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
  182. brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
  183. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
  184. brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
  185. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
  186. brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
  187. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
  188. brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
  189. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
  190. brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
  191. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
  192. brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
  193. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
  194. brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
  195. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
  196. brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
  197. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
  198. brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
  199. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
  200. brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
  201. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
  202. brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
  203. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
  204. brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
  205. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
  206. brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
  207. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
  208. brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
  209. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
  210. brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
  211. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
  212. brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
  213. brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
  214. brainscore_vision/data/geirhos2021/test.py +330 -0
  215. brainscore_vision/data/hebart2023/__init__.py +23 -0
  216. brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
  217. brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
  218. brainscore_vision/data/hebart2023/test.py +42 -0
  219. brainscore_vision/data/hendrycks2019/__init__.py +45 -0
  220. brainscore_vision/data/hendrycks2019/test.py +26 -0
  221. brainscore_vision/data/igustibagus2024/__init__.py +23 -0
  222. brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
  223. brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
  224. brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
  225. brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
  226. brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
  227. brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
  228. brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
  229. brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
  230. brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
  231. brainscore_vision/data/igustibagus2024/test.py +26 -0
  232. brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
  233. brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
  234. brainscore_vision/data/imagenetslim15000/test.py +8 -0
  235. brainscore_vision/data/islam2021/__init__.py +18 -0
  236. brainscore_vision/data/islam2021/data_packaging.py +64 -0
  237. brainscore_vision/data/islam2021/test.py +11 -0
  238. brainscore_vision/data/kar2018/__init__.py +58 -0
  239. brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
  240. brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
  241. brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
  242. brainscore_vision/data/kar2018/test.py +10 -0
  243. brainscore_vision/data/kar2019/__init__.py +43 -0
  244. brainscore_vision/data/kar2019/data_packaging.py +116 -0
  245. brainscore_vision/data/kar2019/test.py +8 -0
  246. brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
  247. brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
  248. brainscore_vision/data/kuzovkin2018/test.py +8 -0
  249. brainscore_vision/data/majajhong2015/__init__.py +113 -0
  250. brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
  251. brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
  252. brainscore_vision/data/majajhong2015/test.py +38 -0
  253. brainscore_vision/data/malania2007/__init__.py +254 -0
  254. brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
  255. brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
  256. brainscore_vision/data/malania2007/test.py +147 -0
  257. brainscore_vision/data/maniquet2024/__init__.py +57 -0
  258. brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
  259. brainscore_vision/data/maniquet2024/test.py +16 -0
  260. brainscore_vision/data/marques2020/__init__.py +123 -0
  261. brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
  262. brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
  263. brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
  264. brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
  265. brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
  266. brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
  267. brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
  268. brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
  269. brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
  270. brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
  271. brainscore_vision/data/marques2020/test.py +54 -0
  272. brainscore_vision/data/rajalingham2018/__init__.py +56 -0
  273. brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
  274. brainscore_vision/data/rajalingham2018/test.py +10 -0
  275. brainscore_vision/data/rajalingham2020/__init__.py +39 -0
  276. brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
  277. brainscore_vision/data/rajalingham2020/test.py +8 -0
  278. brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
  279. brainscore_vision/data/rust2012/__init__.py +45 -0
  280. brainscore_vision/data/rust2012/rust305.py +35 -0
  281. brainscore_vision/data/rust2012/test.py +47 -0
  282. brainscore_vision/data/sanghavi2020/__init__.py +119 -0
  283. brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
  284. brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
  285. brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
  286. brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
  287. brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
  288. brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
  289. brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
  290. brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
  291. brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
  292. brainscore_vision/data/sanghavi2020/test.py +13 -0
  293. brainscore_vision/data/scialom2024/__init__.py +386 -0
  294. brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
  295. brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
  296. brainscore_vision/data/scialom2024/test.py +301 -0
  297. brainscore_vision/data/seibert2019/__init__.py +25 -0
  298. brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
  299. brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
  300. brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
  301. brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
  302. brainscore_vision/data/seibert2019/test.py +35 -0
  303. brainscore_vision/data/zhang2018/__init__.py +38 -0
  304. brainscore_vision/data/zhang2018/test.py +29 -0
  305. brainscore_vision/data_helpers/__init__.py +0 -0
  306. brainscore_vision/data_helpers/lookup_legacy.py +15 -0
  307. brainscore_vision/data_helpers/s3.py +79 -0
  308. brainscore_vision/metric_helpers/__init__.py +5 -0
  309. brainscore_vision/metric_helpers/temporal.py +119 -0
  310. brainscore_vision/metric_helpers/transformations.py +379 -0
  311. brainscore_vision/metric_helpers/utils.py +71 -0
  312. brainscore_vision/metric_helpers/xarray_utils.py +151 -0
  313. brainscore_vision/metrics/__init__.py +7 -0
  314. brainscore_vision/metrics/accuracy/__init__.py +4 -0
  315. brainscore_vision/metrics/accuracy/metric.py +16 -0
  316. brainscore_vision/metrics/accuracy/test.py +11 -0
  317. brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
  318. brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
  319. brainscore_vision/metrics/accuracy_distance/test.py +57 -0
  320. brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
  321. brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
  322. brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
  323. brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
  324. brainscore_vision/metrics/cka/__init__.py +14 -0
  325. brainscore_vision/metrics/cka/metric.py +105 -0
  326. brainscore_vision/metrics/cka/test.py +28 -0
  327. brainscore_vision/metrics/dimensionality/__init__.py +13 -0
  328. brainscore_vision/metrics/dimensionality/metric.py +45 -0
  329. brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
  330. brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
  331. brainscore_vision/metrics/distribution_similarity/test.py +10 -0
  332. brainscore_vision/metrics/error_consistency/__init__.py +13 -0
  333. brainscore_vision/metrics/error_consistency/metric.py +93 -0
  334. brainscore_vision/metrics/error_consistency/test.py +39 -0
  335. brainscore_vision/metrics/i1i2/__init__.py +16 -0
  336. brainscore_vision/metrics/i1i2/metric.py +299 -0
  337. brainscore_vision/metrics/i1i2/requirements.txt +2 -0
  338. brainscore_vision/metrics/i1i2/test.py +36 -0
  339. brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
  340. brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
  341. brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
  342. brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
  343. brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
  344. brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
  345. brainscore_vision/metrics/internal_consistency/test.py +39 -0
  346. brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
  347. brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
  348. brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
  349. brainscore_vision/metrics/mask_regression/__init__.py +16 -0
  350. brainscore_vision/metrics/mask_regression/metric.py +242 -0
  351. brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
  352. brainscore_vision/metrics/mask_regression/test.py +0 -0
  353. brainscore_vision/metrics/ost/__init__.py +23 -0
  354. brainscore_vision/metrics/ost/metric.py +350 -0
  355. brainscore_vision/metrics/ost/requirements.txt +2 -0
  356. brainscore_vision/metrics/ost/test.py +0 -0
  357. brainscore_vision/metrics/rdm/__init__.py +14 -0
  358. brainscore_vision/metrics/rdm/metric.py +101 -0
  359. brainscore_vision/metrics/rdm/requirements.txt +2 -0
  360. brainscore_vision/metrics/rdm/test.py +63 -0
  361. brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
  362. brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
  363. brainscore_vision/metrics/regression_correlation/metric.py +125 -0
  364. brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
  365. brainscore_vision/metrics/regression_correlation/test.py +36 -0
  366. brainscore_vision/metrics/threshold/__init__.py +5 -0
  367. brainscore_vision/metrics/threshold/metric.py +481 -0
  368. brainscore_vision/metrics/threshold/test.py +71 -0
  369. brainscore_vision/metrics/value_delta/__init__.py +4 -0
  370. brainscore_vision/metrics/value_delta/metric.py +30 -0
  371. brainscore_vision/metrics/value_delta/requirements.txt +1 -0
  372. brainscore_vision/metrics/value_delta/test.py +40 -0
  373. brainscore_vision/model_helpers/__init__.py +3 -0
  374. brainscore_vision/model_helpers/activations/__init__.py +1 -0
  375. brainscore_vision/model_helpers/activations/core.py +635 -0
  376. brainscore_vision/model_helpers/activations/pca.py +117 -0
  377. brainscore_vision/model_helpers/activations/pytorch.py +152 -0
  378. brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
  379. brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
  380. brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
  381. brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
  382. brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
  383. brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
  384. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
  385. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
  386. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
  387. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
  388. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
  389. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
  390. brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
  391. brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
  392. brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
  393. brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
  394. brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
  395. brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
  396. brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
  397. brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
  398. brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
  399. brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
  400. brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
  401. brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
  402. brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
  403. brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
  404. brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
  405. brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
  406. brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
  407. brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
  408. brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
  409. brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
  410. brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
  411. brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
  412. brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
  413. brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
  414. brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
  415. brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
  416. brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
  417. brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
  418. brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
  419. brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
  420. brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
  421. brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
  422. brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
  423. brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
  424. brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
  425. brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
  426. brainscore_vision/model_helpers/conftest.py +3 -0
  427. brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
  428. brainscore_vision/model_helpers/s3.py +62 -0
  429. brainscore_vision/model_helpers/utils/__init__.py +15 -0
  430. brainscore_vision/model_helpers/utils/s3.py +42 -0
  431. brainscore_vision/model_interface.py +214 -0
  432. brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
  433. brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
  434. brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
  435. brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
  436. brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
  437. brainscore_vision/models/AlexNet_SIN/model.py +29 -0
  438. brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
  439. brainscore_vision/models/AlexNet_SIN/test.py +1 -0
  440. brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
  441. brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
  442. brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
  443. brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
  444. brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
  445. brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
  446. brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
  447. brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
  448. brainscore_vision/models/__init__.py +0 -0
  449. brainscore_vision/models/alexnet/__init__.py +8 -0
  450. brainscore_vision/models/alexnet/model.py +28 -0
  451. brainscore_vision/models/alexnet/requirements.txt +2 -0
  452. brainscore_vision/models/alexnet/test.py +15 -0
  453. brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
  454. brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
  455. brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
  456. brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
  457. brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
  458. brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
  459. brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
  460. brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
  461. brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
  462. brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
  463. brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
  464. brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
  465. brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
  466. brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
  467. brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
  468. brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
  469. brainscore_vision/models/alexnet_testing/__init__.py +8 -0
  470. brainscore_vision/models/alexnet_testing/model.py +28 -0
  471. brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
  472. brainscore_vision/models/alexnet_testing/setup.py +24 -0
  473. brainscore_vision/models/alexnet_testing/test.py +15 -0
  474. brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
  475. brainscore_vision/models/antialias_resnet152/model.py +35 -0
  476. brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
  477. brainscore_vision/models/antialias_resnet152/test.py +8 -0
  478. brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
  479. brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
  480. brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
  481. brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
  482. brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
  483. brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
  484. brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
  485. brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
  486. brainscore_vision/models/clip/__init__.py +5 -0
  487. brainscore_vision/models/clip/model.py +179 -0
  488. brainscore_vision/models/clip/requirements.txt +4 -0
  489. brainscore_vision/models/clip/test.py +1 -0
  490. brainscore_vision/models/clipvision/__init__.py +5 -0
  491. brainscore_vision/models/clipvision/model.py +179 -0
  492. brainscore_vision/models/clipvision/requirements.txt +4 -0
  493. brainscore_vision/models/clipvision/test.py +1 -0
  494. brainscore_vision/models/cornet_s/__init__.py +8 -0
  495. brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
  496. brainscore_vision/models/cornet_s/model.py +77 -0
  497. brainscore_vision/models/cornet_s/requirements.txt +7 -0
  498. brainscore_vision/models/cornet_s/test.py +8 -0
  499. brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
  500. brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
  501. brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
  502. brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
  503. brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
  504. brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
  505. brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
  506. brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
  507. brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
  508. brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
  509. brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
  510. brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
  511. brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
  512. brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
  513. brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
  514. brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
  515. brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
  516. brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
  517. brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
  518. brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
  519. brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
  520. brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
  521. brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
  522. brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
  523. brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
  524. brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
  525. brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
  526. brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
  527. brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
  528. brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
  529. brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
  530. brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
  531. brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
  532. brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
  533. brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
  534. brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
  535. brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
  536. brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
  537. brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
  538. brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
  539. brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
  540. brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
  541. brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
  542. brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
  543. brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
  544. brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
  545. brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
  546. brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
  547. brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
  548. brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
  549. brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
  550. brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
  551. brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
  552. brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
  553. brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
  554. brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
  555. brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
  556. brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
  557. brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
  558. brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
  559. brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
  560. brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
  561. brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
  562. brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
  563. brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
  564. brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
  565. brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
  566. brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
  567. brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
  568. brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
  569. brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
  570. brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
  571. brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
  572. brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
  573. brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
  574. brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
  575. brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
  576. brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
  577. brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
  578. brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
  579. brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
  580. brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
  581. brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
  582. brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
  583. brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
  584. brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
  585. brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
  586. brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
  587. brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
  588. brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
  589. brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
  590. brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
  591. brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
  592. brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
  593. brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
  594. brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
  595. brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
  596. brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
  597. brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
  598. brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
  599. brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
  600. brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
  601. brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
  602. brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
  603. brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
  604. brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
  605. brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
  606. brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
  607. brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
  608. brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
  609. brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
  610. brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
  611. brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
  612. brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
  613. brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
  614. brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
  615. brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
  616. brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
  617. brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
  618. brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
  619. brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
  620. brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
  621. brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
  622. brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
  623. brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
  624. brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
  625. brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
  626. brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
  627. brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
  628. brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
  629. brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
  630. brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
  631. brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
  632. brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
  633. brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
  634. brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
  635. brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
  636. brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
  637. brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
  638. brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
  639. brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
  640. brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
  641. brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
  642. brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
  643. brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
  644. brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
  645. brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
  646. brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
  647. brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
  648. brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
  649. brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
  650. brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
  651. brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
  652. brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
  653. brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
  654. brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
  655. brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
  656. brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
  657. brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
  658. brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
  659. brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
  660. brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
  661. brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
  662. brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
  663. brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
  664. brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
  665. brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
  666. brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
  667. brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
  668. brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
  669. brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
  670. brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
  671. brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
  672. brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
  673. brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
  674. brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
  675. brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
  676. brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
  677. brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
  678. brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
  679. brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
  680. brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
  681. brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
  682. brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
  683. brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
  684. brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
  685. brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
  686. brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
  687. brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
  688. brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
  689. brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
  690. brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
  691. brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
  692. brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
  693. brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
  694. brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
  695. brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
  696. brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
  697. brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
  698. brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
  699. brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
  700. brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
  701. brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
  702. brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
  703. brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
  704. brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
  705. brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
  706. brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
  707. brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
  708. brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
  709. brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
  710. brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
  711. brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
  712. brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
  713. brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
  714. brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
  715. brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
  716. brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
  717. brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
  718. brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
  719. brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
  720. brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
  721. brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
  722. brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
  723. brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
  724. brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
  725. brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
  726. brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
  727. brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
  728. brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
  729. brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
  730. brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
  731. brainscore_vision/models/effnetb1_272x240/model.py +126 -0
  732. brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
  733. brainscore_vision/models/effnetb1_272x240/test.py +9 -0
  734. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
  735. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
  736. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
  737. brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
  738. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
  739. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
  740. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
  741. brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
  742. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
  743. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
  744. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
  745. brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
  746. brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
  747. brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
  748. brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
  749. brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
  750. brainscore_vision/models/hmax/__init__.py +7 -0
  751. brainscore_vision/models/hmax/helpers/hmax.py +438 -0
  752. brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
  753. brainscore_vision/models/hmax/model.py +69 -0
  754. brainscore_vision/models/hmax/requirements.txt +5 -0
  755. brainscore_vision/models/hmax/test.py +8 -0
  756. brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
  757. brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
  758. brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
  759. brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
  760. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
  761. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
  762. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
  763. brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
  764. brainscore_vision/models/mobilevit_small/__init__.py +7 -0
  765. brainscore_vision/models/mobilevit_small/model.py +49 -0
  766. brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
  767. brainscore_vision/models/mobilevit_small/test.py +8 -0
  768. brainscore_vision/models/pixels/__init__.py +8 -0
  769. brainscore_vision/models/pixels/model.py +35 -0
  770. brainscore_vision/models/pixels/test.py +15 -0
  771. brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
  772. brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
  773. brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
  774. brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
  775. brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
  776. brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
  777. brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
  778. brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
  779. brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
  780. brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
  781. brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
  782. brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
  783. brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
  784. brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
  785. brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
  786. brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
  787. brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
  788. brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
  789. brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
  790. brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
  791. brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
  792. brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
  793. brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
  794. brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
  795. brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
  796. brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
  797. brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
  798. brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
  799. brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
  800. brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
  801. brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
  802. brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
  803. brainscore_vision/models/r50_tvpt/__init__.py +9 -0
  804. brainscore_vision/models/r50_tvpt/model.py +47 -0
  805. brainscore_vision/models/r50_tvpt/setup.py +24 -0
  806. brainscore_vision/models/r50_tvpt/test.py +1 -0
  807. brainscore_vision/models/regnet/__init__.py +14 -0
  808. brainscore_vision/models/regnet/model.py +17 -0
  809. brainscore_vision/models/regnet/requirements.txt +2 -0
  810. brainscore_vision/models/regnet/test.py +17 -0
  811. brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
  812. brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
  813. brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
  814. brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
  815. brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
  816. brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
  817. brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
  818. brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
  819. brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
  820. brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
  821. brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
  822. brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
  823. brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
  824. brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
  825. brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
  826. brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
  827. brainscore_vision/models/resnet50_julios/__init__.py +5 -0
  828. brainscore_vision/models/resnet50_julios/model.py +54 -0
  829. brainscore_vision/models/resnet50_julios/setup.py +24 -0
  830. brainscore_vision/models/resnet50_julios/test.py +1 -0
  831. brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
  832. brainscore_vision/models/resnet50_tutorial/model.py +34 -0
  833. brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
  834. brainscore_vision/models/resnet50_tutorial/test.py +8 -0
  835. brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
  836. brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
  837. brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
  838. brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
  839. brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
  840. brainscore_vision/models/resnet_50_robust/model.py +55 -0
  841. brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
  842. brainscore_vision/models/resnet_50_robust/test.py +8 -0
  843. brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
  844. brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
  845. brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
  846. brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
  847. brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
  848. brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
  849. brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
  850. brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
  851. brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
  852. brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
  853. brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
  854. brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
  855. brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
  856. brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
  857. brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
  858. brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
  859. brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
  860. brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
  861. brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
  862. brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
  863. brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
  864. brainscore_vision/models/temporal_model_GDT/model.py +72 -0
  865. brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
  866. brainscore_vision/models/temporal_model_GDT/test.py +17 -0
  867. brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
  868. brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
  869. brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
  870. brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
  871. brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
  872. brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
  873. brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
  874. brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
  875. brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
  876. brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
  877. brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
  878. brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
  879. brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
  880. brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
  881. brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
  882. brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
  883. brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
  884. brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
  885. brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
  886. brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
  887. brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
  888. brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
  889. brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
  890. brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
  891. brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
  892. brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
  893. brainscore_vision/models/temporal_model_openstl/model.py +206 -0
  894. brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
  895. brainscore_vision/models/temporal_model_openstl/test.py +19 -0
  896. brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
  897. brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
  898. brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
  899. brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
  900. brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
  901. brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
  902. brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
  903. brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
  904. brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
  905. brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
  906. brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
  907. brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
  908. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
  909. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
  910. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
  911. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
  912. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
  913. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
  914. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
  915. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
  916. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
  917. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
  918. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
  919. brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
  920. brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
  921. brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
  922. brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
  923. brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
  924. brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
  925. brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
  926. brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
  927. brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
  928. brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
  929. brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
  930. brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
  931. brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
  932. brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
  933. brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
  934. brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
  935. brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
  936. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
  937. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
  938. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
  939. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
  940. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
  941. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
  942. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
  943. brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
  944. brainscore_vision/submission/__init__.py +0 -0
  945. brainscore_vision/submission/actions_helpers.py +153 -0
  946. brainscore_vision/submission/config.py +7 -0
  947. brainscore_vision/submission/endpoints.py +58 -0
  948. brainscore_vision/utils/__init__.py +91 -0
  949. brainscore_vision-2.1.dist-info/LICENSE +11 -0
  950. brainscore_vision-2.1.dist-info/METADATA +152 -0
  951. brainscore_vision-2.1.dist-info/RECORD +1009 -0
  952. brainscore_vision-2.1.dist-info/WHEEL +5 -0
  953. brainscore_vision-2.1.dist-info/top_level.txt +4 -0
  954. docs/Makefile +20 -0
  955. docs/source/conf.py +78 -0
  956. docs/source/index.rst +21 -0
  957. docs/source/modules/api_reference.rst +10 -0
  958. docs/source/modules/benchmarks.rst +8 -0
  959. docs/source/modules/brainscore_submission.png +0 -0
  960. docs/source/modules/developer_clarifications.rst +36 -0
  961. docs/source/modules/metrics.rst +8 -0
  962. docs/source/modules/model_interface.rst +8 -0
  963. docs/source/modules/submission.rst +112 -0
  964. docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
  965. docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
  966. docs/source/modules/tutorial_screenshots/init_py.png +0 -0
  967. docs/source/modules/tutorial_screenshots/mms.png +0 -0
  968. docs/source/modules/tutorial_screenshots/setup.png +0 -0
  969. docs/source/modules/tutorial_screenshots/sms.png +0 -0
  970. docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
  971. docs/source/modules/utils.rst +22 -0
  972. migrations/2020-12-20_pkl_to_nc.py +90 -0
  973. tests/__init__.py +6 -0
  974. tests/conftest.py +26 -0
  975. tests/test_benchmark_helpers/__init__.py +0 -0
  976. tests/test_benchmark_helpers/test_screen.py +75 -0
  977. tests/test_examples.py +41 -0
  978. tests/test_integration.py +43 -0
  979. tests/test_metric_helpers/__init__.py +0 -0
  980. tests/test_metric_helpers/test_temporal.py +80 -0
  981. tests/test_metric_helpers/test_transformations.py +171 -0
  982. tests/test_metric_helpers/test_xarray_utils.py +85 -0
  983. tests/test_model_helpers/__init__.py +6 -0
  984. tests/test_model_helpers/activations/__init__.py +0 -0
  985. tests/test_model_helpers/activations/test___init__.py +404 -0
  986. tests/test_model_helpers/brain_transformation/__init__.py +0 -0
  987. tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
  988. tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
  989. tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
  990. tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
  991. tests/test_model_helpers/temporal/__init__.py +0 -0
  992. tests/test_model_helpers/temporal/activations/__init__.py +0 -0
  993. tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
  994. tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
  995. tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
  996. tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
  997. tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
  998. tests/test_model_helpers/temporal/test_utils.py +61 -0
  999. tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
  1000. tests/test_model_helpers/test_imports.py +10 -0
  1001. tests/test_model_helpers/test_s3.py +38 -0
  1002. tests/test_models.py +15 -0
  1003. tests/test_stimuli.py +0 -0
  1004. tests/test_submission/__init__.py +0 -0
  1005. tests/test_submission/mock_config.py +3 -0
  1006. tests/test_submission/test_actions_helpers.py +67 -0
  1007. tests/test_submission/test_db.py +54 -0
  1008. tests/test_submission/test_endpoints.py +125 -0
  1009. tests/test_utils.py +21 -0
@@ -0,0 +1,386 @@
1
+ from brainio.assemblies import BehavioralAssembly
2
+
3
+ from brainscore_vision import data_registry, stimulus_set_registry, load_stimulus_set
4
+ from brainscore_vision.data_helpers.s3 import load_assembly_from_s3, load_stimulus_set_from_s3
5
+
6
+
7
+ def force_stimulus_set_column_to_str(stimulus_set, columns=('percentage_elements', 'condition')):
8
+ # a function to convert the data type of an entire column to string
9
+ for column in columns:
10
+ stimulus_set[column] = stimulus_set[column].astype(str)
11
+ return stimulus_set
12
+
13
+
14
+ data_registry['Scialom2024_rgb'] = lambda: load_assembly_from_s3(
15
+ identifier='Scialom2024_rgb',
16
+ version_id='g.9QO4x6dLeBLSOAjUfUmgOPliyWLVwT',
17
+ sha1='b79217a6b700760b96ffe60a948d6c2af9e7a615',
18
+ bucket="brainio-brainscore",
19
+ cls=BehavioralAssembly,
20
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_rgb'))
21
+
22
+ data_registry['Scialom2024_contours'] = lambda: load_assembly_from_s3(
23
+ identifier='Scialom2024_contours',
24
+ version_id='Ri4iQZmgxzUKvHDETkafbrnarzMrAVO8',
25
+ sha1='9487edf9f10d019968a77792908fb853fd73f818',
26
+ bucket="brainio-brainscore",
27
+ cls=BehavioralAssembly,
28
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_contours'))
29
+
30
+ data_registry['Scialom2024_phosphenes-12'] = lambda: load_assembly_from_s3(
31
+ identifier='Scialom2024_phosphenes-12',
32
+ version_id='dYCCMwM0Pf5yiK9dmYLRYyijtV9ZoGdx',
33
+ sha1='1badbc7d08e2d135b2383a25e39ca7d22a3cd7ff',
34
+ bucket="brainio-brainscore",
35
+ cls=BehavioralAssembly,
36
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-12'))
37
+
38
+ data_registry['Scialom2024_phosphenes-16'] = lambda: load_assembly_from_s3(
39
+ identifier='Scialom2024_phosphenes-16',
40
+ version_id='p5NzMB.QzH612GaztXi8EMvYL3Js1R70',
41
+ sha1='1059a8126039ca01e8f293bfbe90539ea829b86f',
42
+ bucket="brainio-brainscore",
43
+ cls=BehavioralAssembly,
44
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-16'))
45
+
46
+ data_registry['Scialom2024_phosphenes-21'] = lambda: load_assembly_from_s3(
47
+ identifier='Scialom2024_phosphenes-21',
48
+ version_id='TbXg6.Xcf8.ssN7tkWfWpF0x.5lnolVN',
49
+ sha1='ac28102c6b165a759b102f670a99b33a67c7fc9a',
50
+ bucket="brainio-brainscore",
51
+ cls=BehavioralAssembly,
52
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-21'))
53
+
54
+ data_registry['Scialom2024_phosphenes-27'] = lambda: load_assembly_from_s3(
55
+ identifier='Scialom2024_phosphenes-27',
56
+ version_id='5Sp9DU3CgIaZrnzzq6EntKPnKlgsHrpm',
57
+ sha1='3bb9adfcdb294a9ecfec14af381f3dbdc7f6dfeb',
58
+ bucket="brainio-brainscore",
59
+ cls=BehavioralAssembly,
60
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-27'))
61
+
62
+ data_registry['Scialom2024_phosphenes-35'] = lambda: load_assembly_from_s3(
63
+ identifier='Scialom2024_phosphenes-35',
64
+ version_id='3rZBFqfPkSUJV6GTqlbn6x7JV8E65LoB',
65
+ sha1='fa27c3931f76a696b1f1dde4c67a6733d682bcb2',
66
+ bucket="brainio-brainscore",
67
+ cls=BehavioralAssembly,
68
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-35'))
69
+
70
+ data_registry['Scialom2024_phosphenes-46'] = lambda: load_assembly_from_s3(
71
+ identifier='Scialom2024_phosphenes-46',
72
+ version_id='GsjfhIatQ2wlUMol8jtK9RiC5cCVnTg7',
73
+ sha1='11f42e777a0938879473baa7a4efaefd27681c54',
74
+ bucket="brainio-brainscore",
75
+ cls=BehavioralAssembly,
76
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-46'))
77
+
78
+ data_registry['Scialom2024_phosphenes-59'] = lambda: load_assembly_from_s3(
79
+ identifier='Scialom2024_phosphenes-59',
80
+ version_id='9Mp6HAugSkKeYUVczRDWYMoYI2Qb8K0r',
81
+ sha1='f9e6ecb9013871f357fa56a9bfaa43c80eb0d9f5',
82
+ bucket="brainio-brainscore",
83
+ cls=BehavioralAssembly,
84
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-59'))
85
+
86
+ data_registry['Scialom2024_phosphenes-77'] = lambda: load_assembly_from_s3(
87
+ identifier='Scialom2024_phosphenes-77',
88
+ version_id='h4HdLUn8NEM6mzYf45rYuUnukampVN0l',
89
+ sha1='f3e3c50983b859eff094b75ca53939bf156b0f3f',
90
+ bucket="brainio-brainscore",
91
+ cls=BehavioralAssembly,
92
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-77'))
93
+
94
+ data_registry['Scialom2024_phosphenes-100'] = lambda: load_assembly_from_s3(
95
+ identifier='Scialom2024_phosphenes-100',
96
+ version_id='NFmHKPwWEDkQiCexKX2ROBchfDgwOHLw',
97
+ sha1='b97bda0f9ea10a684b81cf2118578edb483d9e27',
98
+ bucket="brainio-brainscore",
99
+ cls=BehavioralAssembly,
100
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-100'))
101
+
102
+ data_registry['Scialom2024_segments-12'] = lambda: load_assembly_from_s3(
103
+ identifier='Scialom2024_segments-12',
104
+ version_id='2ezI3rg_t1nycV_8FF9_N.mC8hjT.Abz',
105
+ sha1='fec2104fc53f04af727174ca54fec2c9ad3b553d',
106
+ bucket="brainio-brainscore",
107
+ cls=BehavioralAssembly,
108
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-12'))
109
+
110
+ data_registry['Scialom2024_segments-16'] = lambda: load_assembly_from_s3(
111
+ identifier='Scialom2024_segments-16',
112
+ version_id='7q9AyMlmD4oHWYCDpYK5UlwidCrdP7kc',
113
+ sha1='4e30fc27bf98fa374af29a3bbb0de27c95ff845c',
114
+ bucket="brainio-brainscore",
115
+ cls=BehavioralAssembly,
116
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-16'))
117
+
118
+ data_registry['Scialom2024_segments-21'] = lambda: load_assembly_from_s3(
119
+ identifier='Scialom2024_segments-21',
120
+ version_id='k2EC3vY8ZT_qv_0ErMErRISxu7uKtzkH',
121
+ sha1='280dde1bb8ad226307ce25505a54f852197d0686',
122
+ bucket="brainio-brainscore",
123
+ cls=BehavioralAssembly,
124
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-21'))
125
+
126
+ data_registry['Scialom2024_segments-27'] = lambda: load_assembly_from_s3(
127
+ identifier='Scialom2024_segments-27',
128
+ version_id='LLZMZjrx6sqmbxc16i6rXsB3IR4q48rx',
129
+ sha1='249148aaf4bed26ac2dcf07e4ab1d4dd19fc6531',
130
+ bucket="brainio-brainscore",
131
+ cls=BehavioralAssembly,
132
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-27'))
133
+
134
+ data_registry['Scialom2024_segments-35'] = lambda: load_assembly_from_s3(
135
+ identifier='Scialom2024_segments-35',
136
+ version_id='OZLLP4b953wlOiFD3PqCQWXc4Hp2391W',
137
+ sha1='67b729721928c5f1e85fc5f48e0c574ecfa196e4',
138
+ bucket="brainio-brainscore",
139
+ cls=BehavioralAssembly,
140
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-35'))
141
+
142
+ data_registry['Scialom2024_segments-46'] = lambda: load_assembly_from_s3(
143
+ identifier='Scialom2024_segments-46',
144
+ version_id='XsmgXxqEAXaz7luA4pWgCp3CWZ_FWMe3',
145
+ sha1='2e8e7f937407eb74a77e006d09df4364945f24bf',
146
+ bucket="brainio-brainscore",
147
+ cls=BehavioralAssembly,
148
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-46'))
149
+
150
+ data_registry['Scialom2024_segments-59'] = lambda: load_assembly_from_s3(
151
+ identifier='Scialom2024_segments-59',
152
+ version_id='oUJYFbkNdRRnL_vwDa18tsVtyg3kS47E',
153
+ sha1='ba161343f424673e2dbc123bc058b49ffa16af07',
154
+ bucket="brainio-brainscore",
155
+ cls=BehavioralAssembly,
156
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-59'))
157
+
158
+ data_registry['Scialom2024_segments-77'] = lambda: load_assembly_from_s3(
159
+ identifier='Scialom2024_segments-77',
160
+ version_id='fm5hSgpwgftQoAiyc5sj0mkiZ8qooJGM',
161
+ sha1='1f29d37fa6b84defd268cb35d8e26c6798c63714',
162
+ bucket="brainio-brainscore",
163
+ cls=BehavioralAssembly,
164
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-77'))
165
+
166
+ data_registry['Scialom2024_segments-100'] = lambda: load_assembly_from_s3(
167
+ identifier='Scialom2024_segments-100',
168
+ version_id='kDsLxv8JxenqL79Uwzvtz5STE5TvDrYW',
169
+ sha1='298823d65ceacccd3247fe05e21b2df85c46343d',
170
+ bucket="brainio-brainscore",
171
+ cls=BehavioralAssembly,
172
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-100'))
173
+
174
+ data_registry['Scialom2024_phosphenes-all'] = lambda: load_assembly_from_s3(
175
+ identifier='Scialom2024_phosphenes-all',
176
+ version_id='U_i4FlNK4GNohBOFPpUwb7EZZ35Z_EWW',
177
+ sha1='ae0fd1095846f0c637e54ad8ff96e44bdac8117d',
178
+ bucket="brainio-brainscore",
179
+ cls=BehavioralAssembly,
180
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_phosphenes-all'))
181
+
182
+ data_registry['Scialom2024_segments-all'] = lambda: load_assembly_from_s3(
183
+ identifier='Scialom2024_segments-all',
184
+ version_id='La4EBnFDur5GkyI.zKu4QjusTUpXM4sy',
185
+ sha1='0ecd8e45b4eb5a2afba91b5fe06cacc8696e5925',
186
+ bucket="brainio-brainscore",
187
+ cls=BehavioralAssembly,
188
+ stimulus_set_loader=lambda: load_stimulus_set('Scialom2024_segments-all'))
189
+
190
+ stimulus_set_registry['Scialom2024_rgb'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
191
+ identifier='Scialom2024_rgb',
192
+ bucket="brainio-brainscore",
193
+ csv_sha1='c7e66eca214ffd9e38a5ebc1de159ef5f7755df9',
194
+ zip_sha1='6dce0513eb20c6d501a4e2e574028d12f910a1da',
195
+ csv_version_id='5bqWqpuVrl4St01_lRg3utq5cdOOGGJs',
196
+ zip_version_id='kdGhqj0cDkpWdgMVLadpZGrf6PK8Dirz',
197
+ filename_prefix='stimulus_'))
198
+
199
+ stimulus_set_registry['Scialom2024_contours'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
200
+ identifier='Scialom2024_contours',
201
+ bucket="brainio-brainscore",
202
+ csv_sha1='378ea511a906a1c440d556bd9b6232d433ba186e',
203
+ zip_sha1='70a2277a327b7aa150654f46f46ea4bd247c5273',
204
+ csv_version_id='SqXFodaAoW7GbxjeDinX4PrOJP6lybUh',
205
+ zip_version_id='JgLMb99lgnHd8qW0dAdp4dzqeZW4sOFE',
206
+ filename_prefix = 'stimulus_'))
207
+
208
+ stimulus_set_registry['Scialom2024_phosphenes-12'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
209
+ identifier='Scialom2024_phosphenes-12',
210
+ bucket="brainio-brainscore",
211
+ csv_sha1='762a114b716b8414322d0de620bd800f35beb02b',
212
+ zip_sha1='701ecb4e2c87fac322a9d06b238e361d4c45b5bd',
213
+ csv_version_id='QpBcMWbziLzLiZCbF7WlIx7BxO0Gnpef',
214
+ zip_version_id='TzjrwhkU37r7Ek9SvzzK5xVlYZJxv_bE',
215
+ filename_prefix='stimulus_'))
216
+
217
+ stimulus_set_registry['Scialom2024_phosphenes-16'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
218
+ identifier='Scialom2024_phosphenes-16',
219
+ bucket="brainio-brainscore",
220
+ csv_sha1='40979a641232dab1515b7061171921221d86a13a',
221
+ zip_sha1='26563eff7a42561204fb6c158ae00ef4acf62e4d',
222
+ csv_version_id='EPTbc8QroMufOV85_XwB7WqPQfvqaGTl',
223
+ zip_version_id='Yvr7I7PDyjqUoCsC0IVau374r0aYSupx',
224
+ filename_prefix='stimulus_'))
225
+
226
+ stimulus_set_registry['Scialom2024_phosphenes-21'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
227
+ identifier='Scialom2024_phosphenes-21',
228
+ bucket="brainio-brainscore",
229
+ csv_sha1='72cffd4c0c8d30b815825475d864a87b7eb6e386',
230
+ zip_sha1='22f9ecd333fcc177c16ed6c005989fe04f63700c',
231
+ csv_version_id='klnmUb0aDEnfzk5UPE.6V7XEMN6sQKRm',
232
+ zip_version_id='YMQqJmwbEzRTrdm2ZoMy9rmRTsPnqm1j',
233
+ filename_prefix='stimulus_'))
234
+
235
+ stimulus_set_registry['Scialom2024_phosphenes-27'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
236
+ identifier='Scialom2024_phosphenes-27',
237
+ bucket="brainio-brainscore",
238
+ csv_sha1='94e91373299e82bcaa413e626bcaab6c914a9649',
239
+ zip_sha1='69f17f8b1121a19738625da78b4b73de2b151926',
240
+ csv_version_id='6daseIjtYi.w.bqa4xxVwY74494T.IUr',
241
+ zip_version_id='AtqEH__oIbcA76nSn6BqRxxVsffdqIoo',
242
+ filename_prefix='stimulus_'))
243
+
244
+ stimulus_set_registry['Scialom2024_phosphenes-35'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
245
+ identifier='Scialom2024_phosphenes-35',
246
+ bucket="brainio-brainscore",
247
+ csv_sha1='dc5657b53cf5d77dc6b62db856d7c2fd6151284c',
248
+ zip_sha1='e26e6e5fabca6bd52d85b38416df92de0b16b81e',
249
+ csv_version_id='yh3oZ1E7TP1WrwEv2kzBqweu9R9soly5',
250
+ zip_version_id='wjSfkL4RamMzbt7nlOgTiXp5r468yosf',
251
+ filename_prefix='stimulus_'))
252
+
253
+ stimulus_set_registry['Scialom2024_phosphenes-46'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
254
+ identifier='Scialom2024_phosphenes-46',
255
+ bucket="brainio-brainscore",
256
+ csv_sha1='5838fabb81840bba4fc667e48c1e82569fcbff29',
257
+ zip_sha1='6bb41a20bea70b3e521320258682dba1bef7a0e6',
258
+ csv_version_id='aRLe.zBrwTyU6rv4TvobKW0bkW4wWVZp',
259
+ zip_version_id='5glt.ZHhImjlCM7AVNXvDsbucvpIVTU7',
260
+ filename_prefix='stimulus_'))
261
+
262
+ stimulus_set_registry['Scialom2024_phosphenes-59'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
263
+ identifier='Scialom2024_phosphenes-59',
264
+ bucket="brainio-brainscore",
265
+ csv_sha1='126123ae0596ebe93bb8b0fb985b3fd6e2db1aea',
266
+ zip_sha1='3e7d106796050a0fd4868567c8ef2325f294a13c',
267
+ csv_version_id='j4w9iSTF2R4z6lxGUVVOrW2QWy5l0E7J',
268
+ zip_version_id='aI3UVaw9ewMenhJnAxsf38VJ6HYLBNHb',
269
+ filename_prefix='stimulus_'))
270
+
271
+ stimulus_set_registry['Scialom2024_phosphenes-77'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
272
+ identifier='Scialom2024_phosphenes-77',
273
+ bucket="brainio-brainscore",
274
+ csv_sha1='04eccad03672f7122b2821c667e8ba352eca8262',
275
+ zip_sha1='5b5ae2437193d75d26f5ffa87245ee479ce2674a',
276
+ csv_version_id='pEBfaM_9kwUxagDYVazs3Bk1RpHUUOxA',
277
+ zip_version_id='AQczJ9zAtfNNS7qenBw6dyXnLEoNnYBo',
278
+ filename_prefix='stimulus_'))
279
+
280
+ stimulus_set_registry['Scialom2024_phosphenes-100'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
281
+ identifier='Scialom2024_phosphenes-100',
282
+ bucket="brainio-brainscore",
283
+ csv_sha1='f75998b7e2624edbdf511a4f88d4665a0258e377',
284
+ zip_sha1='b416d47663cd6e5d747f75f2f15eb52637313509',
285
+ csv_version_id='EZQCfqMrjcQo0gT1w4nDp.yDyQkRpabH',
286
+ zip_version_id='RdS_d8Ld2tN8PCmllvU9vPGhDMIfAo8E',
287
+ filename_prefix='stimulus_'))
288
+
289
+ stimulus_set_registry['Scialom2024_segments-12'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
290
+ identifier='Scialom2024_segments-12',
291
+ bucket="brainio-brainscore",
292
+ csv_sha1='c9de15f5aec2d2fbbd3669e8f3620ae4e884556b',
293
+ zip_sha1='0e04090ea7cc6cabc9efa7695dab5d217ebad70b',
294
+ csv_version_id='btMm1X3.9pzkXrd4GPHoBav8ONyyOeBg',
295
+ zip_version_id='pc4Q2pK.tv6XKF5RlY8vZnEk8_Bx349v',
296
+ filename_prefix='stimulus_'))
297
+
298
+ stimulus_set_registry['Scialom2024_segments-16'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
299
+ identifier='Scialom2024_segments-16',
300
+ bucket="brainio-brainscore",
301
+ csv_sha1='dc66edcd3f6e051418878e07002c06d2a936d142',
302
+ zip_sha1='2aa3cb8c89bf5956ea5c7a6c0f86ce8759056b41',
303
+ csv_version_id='jXnfBBor.RNtD_Bg53eSdCLCTgJKv1LO',
304
+ zip_version_id='TAGJKzpndcfo8ckUyMv6OWnLUlV3OiN8',
305
+ filename_prefix='stimulus_'))
306
+
307
+ stimulus_set_registry['Scialom2024_segments-21'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
308
+ identifier='Scialom2024_segments-21',
309
+ bucket="brainio-brainscore",
310
+ csv_sha1='c461e47b42f17c05399387c16b3d76db6389d05f',
311
+ zip_sha1='e8116c9b1b2822bf2235b7ef91193baf2a7f47fb',
312
+ csv_version_id='cMvCmdKyyXsMDsgDDIJl.A1VEpDV6k5a',
313
+ zip_version_id='y6bAR8Y5Kbi0B7rlxEvk0j5qNO2VrZph',
314
+ filename_prefix='stimulus_'))
315
+
316
+ stimulus_set_registry['Scialom2024_segments-27'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
317
+ identifier='Scialom2024_segments-27',
318
+ bucket="brainio-brainscore",
319
+ csv_sha1='31e35368dbe090fe2748765b82bc0dd956818887',
320
+ zip_sha1='607d8589a270220046455e150d3589095d8270b1',
321
+ csv_version_id='MdeJ2KQtCsN5oLa3.lQDJFwGm02R4BiX',
322
+ zip_version_id='yF.EUfpHFbMJHAtTCrnEhLQ9D1zD5M.T',
323
+ filename_prefix='stimulus_'))
324
+
325
+ stimulus_set_registry['Scialom2024_segments-35'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
326
+ identifier='Scialom2024_segments-35',
327
+ bucket="brainio-brainscore",
328
+ csv_sha1='3758af9739943817254ed79e12b441bb055858b9',
329
+ zip_sha1='1df725d457f8092b2cc75a7f1509359383ae5420',
330
+ csv_version_id='ABPjQCWfRRaa0xy4ESKahIG1_I5eIhM4',
331
+ zip_version_id='dpbgFyI3s_BynU62DML3V866JXH5Q_aw',
332
+ filename_prefix='stimulus_'))
333
+
334
+ stimulus_set_registry['Scialom2024_segments-46'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
335
+ identifier='Scialom2024_segments-46',
336
+ bucket="brainio-brainscore",
337
+ csv_sha1='5c2982d3b079f5dd15dc18b5f127624256ca75b8',
338
+ zip_sha1='1f0bd6bcb476bff6937dc2bb0bf9222b381e41d5',
339
+ csv_version_id='jhH9v7SieY6y6ff83XUPmLh95QPjrtIK',
340
+ zip_version_id='88I4SDLBlhlNKBTZ0yRgZurknooVObUA',
341
+ filename_prefix='stimulus_'))
342
+
343
+ stimulus_set_registry['Scialom2024_segments-59'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
344
+ identifier='Scialom2024_segments-59',
345
+ bucket="brainio-brainscore",
346
+ csv_sha1='22df0dcfec63af39a3df97555c085ccd57a5ec09',
347
+ zip_sha1='63cd9ee774ed4aeca54b8aa54c037037a67537d6',
348
+ csv_version_id='eOxRFJhOKIm.mk78V0vjuhmd5FUBTzgh',
349
+ zip_version_id='_6ThdN0H9c0bOvqH0MfWKPuGSwWYkJef',
350
+ filename_prefix='stimulus_'))
351
+
352
+ stimulus_set_registry['Scialom2024_segments-77'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
353
+ identifier='Scialom2024_segments-77',
354
+ bucket="brainio-brainscore",
355
+ csv_sha1='350f3c1ad3ae52150783e8be0135d79ef1f88c9f',
356
+ zip_sha1='be2c2039e5eea7c3dcd90b885243fb6a1ebd40b9',
357
+ csv_version_id='_X.oelHKQFdyqEmurki57Zf49L6CRJII',
358
+ zip_version_id='Qn10b4obb0UCLezVGCwjzaSkbUyIeInp',
359
+ filename_prefix='stimulus_'))
360
+
361
+ stimulus_set_registry['Scialom2024_segments-100'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
362
+ identifier='Scialom2024_segments-100',
363
+ bucket="brainio-brainscore",
364
+ csv_sha1='d2d7e231c7420730d5b3118c82b859a684d89a5b',
365
+ zip_sha1='9d7ccb70f2231c8e62aecf18f1f48c160046c98b',
366
+ csv_version_id='bacyQYdD0g4Puj4MB0d5qkdmanvy8RG7',
367
+ zip_version_id='PXGICnHlx0mNo5MU_xW4v708e411SVdQ',
368
+ filename_prefix='stimulus_'))
369
+
370
+ stimulus_set_registry['Scialom2024_phosphenes-all'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
371
+ identifier='Scialom2024_phosphenes-all',
372
+ bucket="brainio-brainscore",
373
+ csv_sha1='16276359d917b16fc84c736881cf70b5de3d9c5c',
374
+ zip_sha1='c2c26b17a4b4d152d1f20e78fe085e321080bcf6',
375
+ csv_version_id='ar1W5DUVRQNZeUFCFp0OFKQYPlfr4004',
376
+ zip_version_id='NM9aLLdkv0cZuUFFiKK7NJgh8_DAcrOB',
377
+ filename_prefix='stimulus_'))
378
+
379
+ stimulus_set_registry['Scialom2024_segments-all'] = lambda: force_stimulus_set_column_to_str(load_stimulus_set_from_s3(
380
+ identifier='Scialom2024_segments-all',
381
+ bucket="brainio-brainscore",
382
+ csv_sha1='4e66191b81517f3601e7bb27c9c3a84b9ee51a89',
383
+ zip_sha1='59b279b2c65be4cdab50bf17b7295da2426744ee',
384
+ csv_version_id='Nl3ywB0CEEjEZmB3NH2yQkYGqxAiAD4y',
385
+ zip_version_id='N5bg4NWu7qbJASRULKgJsarEYRyLHlnt',
386
+ filename_prefix='stimulus_'))
@@ -0,0 +1,164 @@
1
+ from pathlib import Path
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+ from brainio.assemblies import BehavioralAssembly
7
+ from brainio.packaging import package_data_assembly
8
+
9
+
10
+ '''
11
+ Experiment Information:
12
+
13
+ - 25 or 50 subjects depending on condition
14
+ - rgb & contours have 50, others have 25
15
+ - 48 images each, except in the composites, where all conditions are shown (all phosphene/segment + contours = rgb)
16
+ - 48 * 11 = 528 images each in composites
17
+ - 1200 or 2400 total images shown
18
+ - rgb & contours have 2400, composites have 13200, others have 1200
19
+ - 12-AFC categorization task
20
+ - 12 image categories; one of:
21
+ 'Banana',
22
+ 'Beanie',
23
+ 'Binoculars',
24
+ 'Boot',
25
+ 'Bowl',
26
+ 'Cup',
27
+ 'Glasses',
28
+ 'Lamp',
29
+ 'Pan',
30
+ 'Sewing machine',
31
+ 'Shovel',
32
+ 'Truck'
33
+ '''
34
+
35
+ SUBJECT_GROUPS = ['rgb', 'contours', 'phosphenes-12', 'phosphenes-16', 'phosphenes-21', 'phosphenes-27',
36
+ 'phosphenes-35', 'phosphenes-46', 'phosphenes-59', 'phosphenes-77', 'phosphenes-100', 'segments-12',
37
+ 'segments-16', 'segments-21', 'segments-27', 'segments-35', 'segments-46', 'segments-59',
38
+ 'segments-77', 'segments-100', 'phosphenes-all', 'segments-all']
39
+ PERCENTAGE_ELEMENTS = {'rgb': 'RGB', 'contours': 'contours', 'phosphenes-12': 12, 'phosphenes-16': 16,
40
+ 'phosphenes-21': 21, 'phosphenes-27': 27, 'phosphenes-35': 35, 'phosphenes-46': 46,
41
+ 'phosphenes-59': 59, 'phosphenes-77': 77, 'phosphenes-100': 100, 'segments-12': 12,
42
+ 'segments-16': 16, 'segments-21': 21, 'segments-27': 27, 'segments-35': 35, 'segments-46': 46,
43
+ 'segments-59': 59, 'segments-77': 77, 'segments-100': 100, 'phosphenes-all': 'all',
44
+ 'segments-all': 'all'}
45
+
46
+
47
+ def collect_scialom_behavioral_assembly(data_path, subject_group, percentage_elements, which_composite):
48
+ # load and filter the data to only take this benchmark
49
+ data = pd.read_csv(data_path)
50
+ data['percentage_elements'] = data['percentage_elements'].astype(str)
51
+ percentage_elements = str(percentage_elements)
52
+ subject_group = subject_group.split('-')[0]
53
+ if which_composite is not None:
54
+ filtered_data = data[(data['subject_group'] == which_composite) |
55
+ (data['subject_group'] == 'RGB') |
56
+ (data['subject_group'] == 'contours')]
57
+ elif subject_group in ['phosphenes', 'segments']:
58
+ filtered_data = data[(data['subject_group'] == subject_group) &
59
+ (data['percentage_elements'] == percentage_elements)]
60
+ else:
61
+ filtered_data = data[(data['percentage_elements'] == percentage_elements)]
62
+
63
+ # construct the assembly
64
+ if which_composite is not None:
65
+ assembly = BehavioralAssembly(filtered_data['subject_answer'],
66
+ coords={
67
+ 'subject': ('presentation', filtered_data['subject_id']),
68
+ 'subject_group': ('presentation', filtered_data['subject_group']),
69
+ 'visual_degrees': ('presentation', filtered_data['visual_degrees']),
70
+ 'image_duration': ('presentation', filtered_data['image_duration']),
71
+ 'is_correct': ('presentation', filtered_data['is_correct']),
72
+ 'subject_answer': ('presentation', filtered_data['subject_answer']),
73
+ 'condition': ('presentation', filtered_data['subject_group']),
74
+ 'percentage_elements': ('presentation', filtered_data['percentage_elements']),
75
+ 'stimulus_id': ('presentation', filtered_data['stimulus_id'].astype(int)),
76
+ 'truth': ('presentation', filtered_data['correct_answer'])
77
+ },
78
+ dims=['presentation']
79
+ )
80
+ else:
81
+ assembly = BehavioralAssembly(filtered_data['subject_answer'],
82
+ coords={
83
+ 'subject': ('presentation', filtered_data['subject_id']),
84
+ 'subject_group': ('presentation', filtered_data['subject_group']),
85
+ 'visual_degrees': ('presentation', filtered_data['visual_degrees']),
86
+ 'image_duration': ('presentation', filtered_data['image_duration']),
87
+ 'is_correct': ('presentation', filtered_data['is_correct']),
88
+ 'subject_answer': ('presentation', filtered_data['subject_answer']),
89
+ 'condition': ('presentation', filtered_data['percentage_elements']),
90
+ 'percentage_elements': ('presentation', filtered_data['percentage_elements']),
91
+ 'stimulus_id': ('presentation', filtered_data['stimulus_id'].astype(int)),
92
+ 'truth': ('presentation', filtered_data['correct_answer'])
93
+ },
94
+ dims=['presentation']
95
+ )
96
+
97
+ # give the assembly an identifier name
98
+ if which_composite is not None:
99
+ assembly.name = f'Scialom2024_{which_composite}-all'
100
+ elif subject_group in ['phosphenes', 'segments']:
101
+ assembly.name = f'Scialom2024_{subject_group}-{percentage_elements}'
102
+ else:
103
+ assembly.name = f'Scialom2024_{subject_group}'
104
+ return assembly
105
+
106
+
107
+ if __name__ == '__main__':
108
+ data_path = Path(r'../Data_Results_experiment.csv')
109
+ for subject_group in SUBJECT_GROUPS:
110
+ percentage_elements = PERCENTAGE_ELEMENTS[subject_group]
111
+ if subject_group == 'rgb' or subject_group == 'contours':
112
+ num_dims = 2400
113
+ num_subjects = 50
114
+ which_composite = None
115
+ elif subject_group == 'phosphenes-all' or subject_group == 'segments-all':
116
+ num_dims = 13200
117
+ num_subjects = 25
118
+ which_composite = subject_group[:-4]
119
+ else:
120
+ num_dims = 1200
121
+ num_subjects = 25
122
+ which_composite = None
123
+
124
+ assembly = collect_scialom_behavioral_assembly(data_path, subject_group, percentage_elements,
125
+ which_composite=which_composite)
126
+
127
+ # make sure assembly dims are correct length
128
+ assert len(assembly['presentation']) == num_dims
129
+
130
+ # make sure assembly coords are correct length
131
+ assert len(assembly['subject']) == num_dims
132
+ assert len(assembly['subject_group']) == num_dims
133
+ assert len(assembly['visual_degrees']) == num_dims
134
+ assert len(assembly['image_duration']) == num_dims
135
+ assert len(assembly['is_correct']) == num_dims
136
+ assert len(assembly['subject_answer']) == num_dims
137
+ assert len(assembly['truth']) == num_dims
138
+ assert len(assembly['condition']) == num_dims
139
+ assert len(assembly['stimulus_id']) == num_dims
140
+
141
+ if subject_group == 'phosphenes-all' or subject_group == 'segments-all':
142
+ # all stimuli within-group shown to all subjects (11 conditions * 48 stimuli = 528 stimuli)
143
+ assert len(np.unique(assembly['stimulus_id'].values)) == 528
144
+ else:
145
+ # make sure there are 48 unique images (shown 1 time for each of 25 or 50 subjects, total of
146
+ # 25 * 48 = 1200 images shown or 50 * 48 = 2400 images shown)
147
+ assert len(np.unique(assembly['stimulus_id'].values)) == 48
148
+
149
+ # make sure there are the correct number of unique subjects:
150
+ assert len(np.unique(assembly['subject'].values)) == num_subjects
151
+
152
+ # make sure there are 12 unique object categories (ground truths):
153
+ assert len(np.unique(assembly['truth'].values)) == 12
154
+ assert len(np.unique(assembly['subject_answer'].values)) == 12
155
+
156
+ # upload to S3
157
+ prints = package_data_assembly(catalog_identifier=None,
158
+ proto_data_assembly=assembly,
159
+ assembly_identifier=assembly.name,
160
+ stimulus_set_identifier=assembly.name,
161
+ assembly_class_name="BehavioralAssembly",
162
+ bucket_name="brainio-brainscore")
163
+
164
+ print(prints)
@@ -0,0 +1,117 @@
1
+ from pathlib import Path
2
+ import csv
3
+
4
+ from brainio.stimuli import StimulusSet
5
+ from brainio.packaging import package_stimulus_set
6
+
7
+
8
+ '''
9
+ Dataset Meta Info
10
+
11
+ Reported in pixels:
12
+ - image_height
13
+ - image_width
14
+
15
+ Others:
16
+ - num_channels (3 in the case of RGB and contours, 1 otherwise)
17
+ - dataset (RGB, contours, phosphenes or segments stimuli)
18
+ - object_id (a unique identifier of the specific exemplar of a given category. For each object category, there are
19
+ 4 objects)
20
+ - category (the category of the object; one of:
21
+ 'Banana',
22
+ 'Beanie',
23
+ 'Binoculars',
24
+ 'Boot',
25
+ 'Bowl',
26
+ 'Cup',
27
+ 'Glasses',
28
+ 'Lamp',
29
+ 'Pan',
30
+ 'Sewing machine',
31
+ 'Shovel',
32
+ 'Truck'
33
+ )
34
+ '''
35
+
36
+ DATASETS = ['rgb', 'contours', 'phosphenes-12', 'phosphenes-16', 'phosphenes-21', 'phosphenes-27', 'phosphenes-35',
37
+ 'phosphenes-46', 'phosphenes-59', 'phosphenes-77', 'phosphenes-100', 'segments-12', 'segments-16',
38
+ 'segments-21', 'segments-27', 'segments-35', 'segments-46', 'segments-59', 'segments-77', 'segments-100',
39
+ 'phosphenes-all', 'segments-all']
40
+ PERCENTAGE_ELEMENTS = {'rgb': 'RGB', 'contours': 'contours', 'phosphenes-12': 12, 'phosphenes-16': 16,
41
+ 'phosphenes-21': 21, 'phosphenes-27': 27, 'phosphenes-35': 35, 'phosphenes-46': 46,
42
+ 'phosphenes-59': 59, 'phosphenes-77': 77, 'phosphenes-100': 100, 'segments-12': 12,
43
+ 'segments-16': 16, 'segments-21': 21, 'segments-27': 27, 'segments-35': 35, 'segments-46': 46,
44
+ 'segments-59': 59, 'segments-77': 77, 'segments-100': 100, 'phosphenes-all': 'all',
45
+ 'segments-all': 'all'}
46
+
47
+
48
+ def collect_scialom_stimulus_set(dataset, percentage_elements, stimuli_directory, metadata_filepath, which_composite):
49
+ stimuli = []
50
+ stimulus_paths = {}
51
+ dataset = dataset.split('-')[0]
52
+
53
+ with open(metadata_filepath, 'r') as metadata:
54
+ reader = csv.DictReader(metadata)
55
+ for row in reader:
56
+ stimulus_meta = {
57
+ 'image_height': int(row['image_height']),
58
+ 'image_width': int(row['image_width']),
59
+ 'num_channels': int(row['channel']),
60
+ 'dataset': str(row['representation_mode']),
61
+ 'object_id': int(row['object_id']),
62
+ 'stimulus_id': str(row['stimulus_id']),
63
+ 'truth': str(row['category']),
64
+ 'percentage_elements': str(row['percentage_elements']),
65
+ }
66
+ if which_composite is not None:
67
+ if row['representation_mode'].lower() == which_composite or \
68
+ row['representation_mode'].lower() == 'rgb' or \
69
+ row['representation_mode'].lower() == 'contours':
70
+ stimulus_meta = {**stimulus_meta,
71
+ 'condition': which_composite}
72
+ stimuli.append(stimulus_meta)
73
+ stimulus_paths[str(row['stimulus_id'])] = Path(f'{stimuli_directory}/{row["file_name"]}')
74
+ elif row['percentage_elements'] == str(percentage_elements) and \
75
+ row['representation_mode'].lower() == dataset:
76
+ stimulus_meta = {**stimulus_meta,
77
+ 'condition': str(row['percentage_elements'])}
78
+ stimuli.append(stimulus_meta)
79
+ stimulus_paths[str(row['stimulus_id'])] = Path(f'{stimuli_directory}/{row["file_name"]}')
80
+
81
+ stimuli = StimulusSet(stimuli)
82
+ stimuli.stimulus_paths = stimulus_paths
83
+ if dataset in ['phosphenes', 'segments']:
84
+ stimuli.name = f'Scialom2024_{dataset}-{percentage_elements}'
85
+ stimuli.identifier = f'Scialom2024_{dataset}-{percentage_elements}'
86
+ else:
87
+ stimuli.name = f'Scialom2024_{dataset}'
88
+ stimuli.identifier = f'Scialom2024_{dataset}'
89
+
90
+ return stimuli
91
+
92
+
93
+ if __name__ == '__main__':
94
+ stimuli_directory = Path(r'../dataset')
95
+ metadata_filepath = Path('../MetaData_Stimuli_experiment.csv')
96
+ for dataset in DATASETS:
97
+ if dataset == 'phosphenes-all':
98
+ which_composite = 'phosphenes'
99
+ elif dataset == 'segments-all':
100
+ which_composite = 'segments'
101
+ else:
102
+ which_composite = None
103
+ percentage_elements = PERCENTAGE_ELEMENTS[dataset]
104
+ stimuli = collect_scialom_stimulus_set(dataset, percentage_elements, stimuli_directory, metadata_filepath,
105
+ which_composite=which_composite)
106
+
107
+ # Ensure expected number of stimuli in datasets
108
+ if which_composite is None:
109
+ assert len(stimuli) == 48
110
+ else:
111
+ assert len(stimuli) == 528
112
+ # upload to S3
113
+ prints = package_stimulus_set(catalog_name=None,
114
+ proto_stimulus_set=stimuli,
115
+ stimulus_set_identifier=stimuli.name,
116
+ bucket_name="brainio-brainscore")
117
+ print(prints)