brainscore-vision 2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/__init__.py +105 -0
- brainscore_vision/__main__.py +20 -0
- brainscore_vision/benchmark_helpers/__init__.py +67 -0
- brainscore_vision/benchmark_helpers/neural_common.py +70 -0
- brainscore_vision/benchmark_helpers/properties_common.py +424 -0
- brainscore_vision/benchmark_helpers/screen.py +126 -0
- brainscore_vision/benchmark_helpers/test_helper.py +160 -0
- brainscore_vision/benchmarks/README.md +7 -0
- brainscore_vision/benchmarks/__init__.py +122 -0
- brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
- brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
- brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
- brainscore_vision/benchmarks/baker2022/test.py +90 -0
- brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
- brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
- brainscore_vision/benchmarks/bmd2024/test.py +29 -0
- brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
- brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
- brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
- brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
- brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
- brainscore_vision/benchmarks/cadena2017/test.py +35 -0
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
- brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
- brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
- brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
- brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
- brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
- brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
- brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
- brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
- brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
- brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
- brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
- brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
- brainscore_vision/benchmarks/hebart2023/test.py +19 -0
- brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
- brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
- brainscore_vision/benchmarks/hermann2020/test.py +28 -0
- brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
- brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
- brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
- brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
- brainscore_vision/benchmarks/imagenet/test.py +32 -0
- brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
- brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
- brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
- brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
- brainscore_vision/benchmarks/islam2021/test.py +47 -0
- brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
- brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
- brainscore_vision/benchmarks/kar2019/test.py +93 -0
- brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
- brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
- brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
- brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
- brainscore_vision/benchmarks/malania2007/test.py +64 -0
- brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
- brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
- brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
- brainscore_vision/benchmarks/marques2020/test.py +135 -0
- brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
- brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
- brainscore_vision/benchmarks/objectnet/test.py +33 -0
- brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
- brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
- brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
- brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
- brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
- brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
- brainscore_vision/benchmarks/scialom2024/test.py +162 -0
- brainscore_vision/data/__init__.py +0 -0
- brainscore_vision/data/baker2022/__init__.py +40 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
- brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
- brainscore_vision/data/baker2022/test.py +135 -0
- brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
- brainscore_vision/data/barbumayo2019/__init__.py +23 -0
- brainscore_vision/data/barbumayo2019/test.py +10 -0
- brainscore_vision/data/bashivankar2019/__init__.py +52 -0
- brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
- brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
- brainscore_vision/data/bashivankar2019/test.py +15 -0
- brainscore_vision/data/bmd2024/__init__.py +69 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
- brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
- brainscore_vision/data/bmd2024/test.py +130 -0
- brainscore_vision/data/bracci2019/__init__.py +36 -0
- brainscore_vision/data/bracci2019/data_packaging.py +221 -0
- brainscore_vision/data/bracci2019/test.py +16 -0
- brainscore_vision/data/cadena2017/__init__.py +52 -0
- brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
- brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
- brainscore_vision/data/cadena2017/test.py +24 -0
- brainscore_vision/data/cichy2019/__init__.py +38 -0
- brainscore_vision/data/cichy2019/test.py +8 -0
- brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
- brainscore_vision/data/coggan2024_behavior/test.py +32 -0
- brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
- brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/data/david2004/__init__.py +34 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
- brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
- brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
- brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
- brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
- brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
- brainscore_vision/data/david2004/test.py +8 -0
- brainscore_vision/data/deng2009/__init__.py +22 -0
- brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
- brainscore_vision/data/deng2009/test.py +9 -0
- brainscore_vision/data/ferguson2024/__init__.py +401 -0
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
- brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
- brainscore_vision/data/ferguson2024/requirements.txt +2 -0
- brainscore_vision/data/ferguson2024/test.py +155 -0
- brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
- brainscore_vision/data/freemanziemba2013/test.py +97 -0
- brainscore_vision/data/geirhos2021/__init__.py +358 -0
- brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
- brainscore_vision/data/geirhos2021/test.py +330 -0
- brainscore_vision/data/hebart2023/__init__.py +23 -0
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
- brainscore_vision/data/hebart2023/test.py +42 -0
- brainscore_vision/data/hendrycks2019/__init__.py +45 -0
- brainscore_vision/data/hendrycks2019/test.py +26 -0
- brainscore_vision/data/igustibagus2024/__init__.py +23 -0
- brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
- brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
- brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
- brainscore_vision/data/igustibagus2024/test.py +26 -0
- brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
- brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
- brainscore_vision/data/imagenetslim15000/test.py +8 -0
- brainscore_vision/data/islam2021/__init__.py +18 -0
- brainscore_vision/data/islam2021/data_packaging.py +64 -0
- brainscore_vision/data/islam2021/test.py +11 -0
- brainscore_vision/data/kar2018/__init__.py +58 -0
- brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
- brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
- brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
- brainscore_vision/data/kar2018/test.py +10 -0
- brainscore_vision/data/kar2019/__init__.py +43 -0
- brainscore_vision/data/kar2019/data_packaging.py +116 -0
- brainscore_vision/data/kar2019/test.py +8 -0
- brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
- brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
- brainscore_vision/data/kuzovkin2018/test.py +8 -0
- brainscore_vision/data/majajhong2015/__init__.py +113 -0
- brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
- brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
- brainscore_vision/data/majajhong2015/test.py +38 -0
- brainscore_vision/data/malania2007/__init__.py +254 -0
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
- brainscore_vision/data/malania2007/test.py +147 -0
- brainscore_vision/data/maniquet2024/__init__.py +57 -0
- brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
- brainscore_vision/data/maniquet2024/test.py +16 -0
- brainscore_vision/data/marques2020/__init__.py +123 -0
- brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
- brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
- brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
- brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
- brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
- brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
- brainscore_vision/data/marques2020/test.py +54 -0
- brainscore_vision/data/rajalingham2018/__init__.py +56 -0
- brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
- brainscore_vision/data/rajalingham2018/test.py +10 -0
- brainscore_vision/data/rajalingham2020/__init__.py +39 -0
- brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
- brainscore_vision/data/rajalingham2020/test.py +8 -0
- brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
- brainscore_vision/data/rust2012/__init__.py +45 -0
- brainscore_vision/data/rust2012/rust305.py +35 -0
- brainscore_vision/data/rust2012/test.py +47 -0
- brainscore_vision/data/sanghavi2020/__init__.py +119 -0
- brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
- brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
- brainscore_vision/data/sanghavi2020/test.py +13 -0
- brainscore_vision/data/scialom2024/__init__.py +386 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
- brainscore_vision/data/scialom2024/test.py +301 -0
- brainscore_vision/data/seibert2019/__init__.py +25 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
- brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
- brainscore_vision/data/seibert2019/test.py +35 -0
- brainscore_vision/data/zhang2018/__init__.py +38 -0
- brainscore_vision/data/zhang2018/test.py +29 -0
- brainscore_vision/data_helpers/__init__.py +0 -0
- brainscore_vision/data_helpers/lookup_legacy.py +15 -0
- brainscore_vision/data_helpers/s3.py +79 -0
- brainscore_vision/metric_helpers/__init__.py +5 -0
- brainscore_vision/metric_helpers/temporal.py +119 -0
- brainscore_vision/metric_helpers/transformations.py +379 -0
- brainscore_vision/metric_helpers/utils.py +71 -0
- brainscore_vision/metric_helpers/xarray_utils.py +151 -0
- brainscore_vision/metrics/__init__.py +7 -0
- brainscore_vision/metrics/accuracy/__init__.py +4 -0
- brainscore_vision/metrics/accuracy/metric.py +16 -0
- brainscore_vision/metrics/accuracy/test.py +11 -0
- brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
- brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
- brainscore_vision/metrics/accuracy_distance/test.py +57 -0
- brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
- brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
- brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
- brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
- brainscore_vision/metrics/cka/__init__.py +14 -0
- brainscore_vision/metrics/cka/metric.py +105 -0
- brainscore_vision/metrics/cka/test.py +28 -0
- brainscore_vision/metrics/dimensionality/__init__.py +13 -0
- brainscore_vision/metrics/dimensionality/metric.py +45 -0
- brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
- brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
- brainscore_vision/metrics/distribution_similarity/test.py +10 -0
- brainscore_vision/metrics/error_consistency/__init__.py +13 -0
- brainscore_vision/metrics/error_consistency/metric.py +93 -0
- brainscore_vision/metrics/error_consistency/test.py +39 -0
- brainscore_vision/metrics/i1i2/__init__.py +16 -0
- brainscore_vision/metrics/i1i2/metric.py +299 -0
- brainscore_vision/metrics/i1i2/requirements.txt +2 -0
- brainscore_vision/metrics/i1i2/test.py +36 -0
- brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
- brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
- brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
- brainscore_vision/metrics/internal_consistency/test.py +39 -0
- brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
- brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
- brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
- brainscore_vision/metrics/mask_regression/__init__.py +16 -0
- brainscore_vision/metrics/mask_regression/metric.py +242 -0
- brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
- brainscore_vision/metrics/mask_regression/test.py +0 -0
- brainscore_vision/metrics/ost/__init__.py +23 -0
- brainscore_vision/metrics/ost/metric.py +350 -0
- brainscore_vision/metrics/ost/requirements.txt +2 -0
- brainscore_vision/metrics/ost/test.py +0 -0
- brainscore_vision/metrics/rdm/__init__.py +14 -0
- brainscore_vision/metrics/rdm/metric.py +101 -0
- brainscore_vision/metrics/rdm/requirements.txt +2 -0
- brainscore_vision/metrics/rdm/test.py +63 -0
- brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
- brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
- brainscore_vision/metrics/regression_correlation/metric.py +125 -0
- brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
- brainscore_vision/metrics/regression_correlation/test.py +36 -0
- brainscore_vision/metrics/threshold/__init__.py +5 -0
- brainscore_vision/metrics/threshold/metric.py +481 -0
- brainscore_vision/metrics/threshold/test.py +71 -0
- brainscore_vision/metrics/value_delta/__init__.py +4 -0
- brainscore_vision/metrics/value_delta/metric.py +30 -0
- brainscore_vision/metrics/value_delta/requirements.txt +1 -0
- brainscore_vision/metrics/value_delta/test.py +40 -0
- brainscore_vision/model_helpers/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/__init__.py +1 -0
- brainscore_vision/model_helpers/activations/core.py +635 -0
- brainscore_vision/model_helpers/activations/pca.py +117 -0
- brainscore_vision/model_helpers/activations/pytorch.py +152 -0
- brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
- brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
- brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
- brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
- brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
- brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
- brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
- brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
- brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
- brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
- brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
- brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
- brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
- brainscore_vision/model_helpers/conftest.py +3 -0
- brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
- brainscore_vision/model_helpers/s3.py +62 -0
- brainscore_vision/model_helpers/utils/__init__.py +15 -0
- brainscore_vision/model_helpers/utils/s3.py +42 -0
- brainscore_vision/model_interface.py +214 -0
- brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
- brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
- brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
- brainscore_vision/models/AlexNet_SIN/model.py +29 -0
- brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
- brainscore_vision/models/AlexNet_SIN/test.py +1 -0
- brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
- brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
- brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
- brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
- brainscore_vision/models/__init__.py +0 -0
- brainscore_vision/models/alexnet/__init__.py +8 -0
- brainscore_vision/models/alexnet/model.py +28 -0
- brainscore_vision/models/alexnet/requirements.txt +2 -0
- brainscore_vision/models/alexnet/test.py +15 -0
- brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
- brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
- brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
- brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
- brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
- brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
- brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
- brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
- brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
- brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
- brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
- brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
- brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
- brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
- brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
- brainscore_vision/models/alexnet_testing/__init__.py +8 -0
- brainscore_vision/models/alexnet_testing/model.py +28 -0
- brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
- brainscore_vision/models/alexnet_testing/setup.py +24 -0
- brainscore_vision/models/alexnet_testing/test.py +15 -0
- brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
- brainscore_vision/models/antialias_resnet152/model.py +35 -0
- brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
- brainscore_vision/models/antialias_resnet152/test.py +8 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
- brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/clip/__init__.py +5 -0
- brainscore_vision/models/clip/model.py +179 -0
- brainscore_vision/models/clip/requirements.txt +4 -0
- brainscore_vision/models/clip/test.py +1 -0
- brainscore_vision/models/clipvision/__init__.py +5 -0
- brainscore_vision/models/clipvision/model.py +179 -0
- brainscore_vision/models/clipvision/requirements.txt +4 -0
- brainscore_vision/models/clipvision/test.py +1 -0
- brainscore_vision/models/cornet_s/__init__.py +8 -0
- brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
- brainscore_vision/models/cornet_s/model.py +77 -0
- brainscore_vision/models/cornet_s/requirements.txt +7 -0
- brainscore_vision/models/cornet_s/test.py +8 -0
- brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
- brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
- brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
- brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
- brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
- brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
- brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
- brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
- brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
- brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
- brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
- brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
- brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
- brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
- brainscore_vision/models/effnetb1_272x240/model.py +126 -0
- brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
- brainscore_vision/models/effnetb1_272x240/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
- brainscore_vision/models/hmax/__init__.py +7 -0
- brainscore_vision/models/hmax/helpers/hmax.py +438 -0
- brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
- brainscore_vision/models/hmax/model.py +69 -0
- brainscore_vision/models/hmax/requirements.txt +5 -0
- brainscore_vision/models/hmax/test.py +8 -0
- brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
- brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
- brainscore_vision/models/mobilevit_small/__init__.py +7 -0
- brainscore_vision/models/mobilevit_small/model.py +49 -0
- brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
- brainscore_vision/models/mobilevit_small/test.py +8 -0
- brainscore_vision/models/pixels/__init__.py +8 -0
- brainscore_vision/models/pixels/model.py +35 -0
- brainscore_vision/models/pixels/test.py +15 -0
- brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
- brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
- brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r50_tvpt/__init__.py +9 -0
- brainscore_vision/models/r50_tvpt/model.py +47 -0
- brainscore_vision/models/r50_tvpt/setup.py +24 -0
- brainscore_vision/models/r50_tvpt/test.py +1 -0
- brainscore_vision/models/regnet/__init__.py +14 -0
- brainscore_vision/models/regnet/model.py +17 -0
- brainscore_vision/models/regnet/requirements.txt +2 -0
- brainscore_vision/models/regnet/test.py +17 -0
- brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
- brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
- brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
- brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
- brainscore_vision/models/resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/resnet50_julios/model.py +54 -0
- brainscore_vision/models/resnet50_julios/setup.py +24 -0
- brainscore_vision/models/resnet50_julios/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
- brainscore_vision/models/resnet50_tutorial/model.py +34 -0
- brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
- brainscore_vision/models/resnet50_tutorial/test.py +8 -0
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
- brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
- brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/resnet_50_robust/model.py +55 -0
- brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
- brainscore_vision/models/resnet_50_robust/test.py +8 -0
- brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
- brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
- brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
- brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
- brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
- brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
- brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
- brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
- brainscore_vision/models/temporal_model_GDT/model.py +72 -0
- brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_GDT/test.py +17 -0
- brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
- brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
- brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
- brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
- brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
- brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
- brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
- brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
- brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
- brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
- brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
- brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
- brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
- brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
- brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
- brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
- brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
- brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
- brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
- brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
- brainscore_vision/models/temporal_model_openstl/model.py +206 -0
- brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_openstl/test.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
- brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
- brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
- brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
- brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
- brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
- brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
- brainscore_vision/submission/__init__.py +0 -0
- brainscore_vision/submission/actions_helpers.py +153 -0
- brainscore_vision/submission/config.py +7 -0
- brainscore_vision/submission/endpoints.py +58 -0
- brainscore_vision/utils/__init__.py +91 -0
- brainscore_vision-2.1.dist-info/LICENSE +11 -0
- brainscore_vision-2.1.dist-info/METADATA +152 -0
- brainscore_vision-2.1.dist-info/RECORD +1009 -0
- brainscore_vision-2.1.dist-info/WHEEL +5 -0
- brainscore_vision-2.1.dist-info/top_level.txt +4 -0
- docs/Makefile +20 -0
- docs/source/conf.py +78 -0
- docs/source/index.rst +21 -0
- docs/source/modules/api_reference.rst +10 -0
- docs/source/modules/benchmarks.rst +8 -0
- docs/source/modules/brainscore_submission.png +0 -0
- docs/source/modules/developer_clarifications.rst +36 -0
- docs/source/modules/metrics.rst +8 -0
- docs/source/modules/model_interface.rst +8 -0
- docs/source/modules/submission.rst +112 -0
- docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
- docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
- docs/source/modules/tutorial_screenshots/init_py.png +0 -0
- docs/source/modules/tutorial_screenshots/mms.png +0 -0
- docs/source/modules/tutorial_screenshots/setup.png +0 -0
- docs/source/modules/tutorial_screenshots/sms.png +0 -0
- docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
- docs/source/modules/utils.rst +22 -0
- migrations/2020-12-20_pkl_to_nc.py +90 -0
- tests/__init__.py +6 -0
- tests/conftest.py +26 -0
- tests/test_benchmark_helpers/__init__.py +0 -0
- tests/test_benchmark_helpers/test_screen.py +75 -0
- tests/test_examples.py +41 -0
- tests/test_integration.py +43 -0
- tests/test_metric_helpers/__init__.py +0 -0
- tests/test_metric_helpers/test_temporal.py +80 -0
- tests/test_metric_helpers/test_transformations.py +171 -0
- tests/test_metric_helpers/test_xarray_utils.py +85 -0
- tests/test_model_helpers/__init__.py +6 -0
- tests/test_model_helpers/activations/__init__.py +0 -0
- tests/test_model_helpers/activations/test___init__.py +404 -0
- tests/test_model_helpers/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
- tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
- tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
- tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
- tests/test_model_helpers/temporal/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
- tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
- tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
- tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
- tests/test_model_helpers/temporal/test_utils.py +61 -0
- tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
- tests/test_model_helpers/test_imports.py +10 -0
- tests/test_model_helpers/test_s3.py +38 -0
- tests/test_models.py +15 -0
- tests/test_stimuli.py +0 -0
- tests/test_submission/__init__.py +0 -0
- tests/test_submission/mock_config.py +3 -0
- tests/test_submission/test_actions_helpers.py +67 -0
- tests/test_submission/test_db.py +54 -0
- tests/test_submission/test_endpoints.py +125 -0
- tests/test_utils.py +21 -0
@@ -0,0 +1,93 @@
|
|
1
|
+
import itertools
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
from brainscore_core.metrics import Metric, Score
|
6
|
+
from brainscore_vision.metric_helpers.transformations import apply_aggregate
|
7
|
+
|
8
|
+
|
9
|
+
class ErrorConsistency(Metric):
|
10
|
+
"""
|
11
|
+
Computes the error consistency using :meth:`~brainscore.metrics.error_consistency.cohens_kappa`
|
12
|
+
implemented in Geirhos et al., 2020 https://arxiv.org/abs/2006.16736
|
13
|
+
"""
|
14
|
+
|
15
|
+
def __call__(self, source, target) -> Score:
|
16
|
+
assert len(set(source['stimulus_id'].values)) == len(set(target['stimulus_id'].values))
|
17
|
+
# https://github.com/bethgelab/model-vs-human/blob/745046c4d82ff884af618756bd6a5f47b6f36c45/modelvshuman/plotting/analyses.py#L161
|
18
|
+
subject_scores = []
|
19
|
+
for subject in self.extract_subjects(target):
|
20
|
+
for condition in sorted(set(target['condition'].values)):
|
21
|
+
this_source = source.sel(condition=condition)
|
22
|
+
this_target = target.sel(subject=subject, condition=condition)
|
23
|
+
subject_score = self.compare_single_subject(this_source, this_target)
|
24
|
+
subject_score = subject_score.expand_dims('subject').expand_dims('condition')
|
25
|
+
subject_score['subject'] = [subject]
|
26
|
+
subject_score['condition'] = [condition]
|
27
|
+
subject_scores.append(subject_score)
|
28
|
+
subject_scores = Score.merge(*subject_scores)
|
29
|
+
subject_scores = apply_aggregate(aggregate_fnc=self.aggregate, values=subject_scores)
|
30
|
+
return subject_scores
|
31
|
+
|
32
|
+
@classmethod
|
33
|
+
def aggregate(cls, scores):
|
34
|
+
mean = scores.mean('condition').mean('subject')
|
35
|
+
# This deviates from the original paper which did not deal with scores < 0
|
36
|
+
# (neither for a single subject/condition, nor in the aggregate).
|
37
|
+
# We here avoid negative scores, so that we comply with all 0 <= scores <= 1.
|
38
|
+
score = abs(mean)
|
39
|
+
# estimate error; but note that the original paper did not have error estimates
|
40
|
+
score.attrs['error'] = scores.std(['condition', 'subject'])
|
41
|
+
return score
|
42
|
+
|
43
|
+
def ceiling(self, assembly):
|
44
|
+
"""
|
45
|
+
Computes subject consistency by comparing each subject to all other subjects.
|
46
|
+
"""
|
47
|
+
subjects = self.extract_subjects(assembly)
|
48
|
+
subject_scores = []
|
49
|
+
for subject1, subject2 in itertools.combinations(subjects, 2):
|
50
|
+
for condition in sorted(set(assembly['condition'].values)):
|
51
|
+
subject1_assembly = assembly.sel(subject=subject1, condition=condition)
|
52
|
+
subject2_assembly = assembly.sel(subject=subject2, condition=condition)
|
53
|
+
pairwise_score = self.compare_single_subject(subject1_assembly, subject2_assembly)
|
54
|
+
pairwise_score = pairwise_score.expand_dims('subject').expand_dims('condition')
|
55
|
+
pairwise_score['subject_left'] = 'subject', [subject1]
|
56
|
+
pairwise_score['subject_right'] = 'subject', [subject2]
|
57
|
+
pairwise_score['condition'] = [condition]
|
58
|
+
subject_scores.append(Score(pairwise_score))
|
59
|
+
subject_scores = Score.merge(*subject_scores)
|
60
|
+
subject_scores = apply_aggregate(aggregate_fnc=self.aggregate, values=subject_scores)
|
61
|
+
return subject_scores
|
62
|
+
|
63
|
+
def extract_subjects(self, assembly):
|
64
|
+
return list(sorted(set(assembly['subject'].values)))
|
65
|
+
|
66
|
+
def compare_single_subject(self, source, target):
|
67
|
+
assert len(source['stimulus_id']) == len(target['stimulus_id'])
|
68
|
+
source = source.sortby('stimulus_id')
|
69
|
+
target = target.sortby('stimulus_id')
|
70
|
+
assert all(source['stimulus_id'].values == target['stimulus_id'].values)
|
71
|
+
|
72
|
+
correct_source = source.values == source['truth'].values
|
73
|
+
correct_target = target.values == target['truth'].values
|
74
|
+
accuracy_source = np.mean(correct_source)
|
75
|
+
accuracy_target = np.mean(correct_target)
|
76
|
+
|
77
|
+
expected_consistency = accuracy_source * accuracy_target + (1 - accuracy_source) * (1 - accuracy_target)
|
78
|
+
observed_consistency = (correct_source == correct_target).sum() / len(target)
|
79
|
+
error_consistency = cohens_kappa(expected_consistency=expected_consistency,
|
80
|
+
observed_consistency=observed_consistency)
|
81
|
+
error_consistency = error_consistency
|
82
|
+
return Score(error_consistency)
|
83
|
+
|
84
|
+
|
85
|
+
def cohens_kappa(expected_consistency, observed_consistency):
|
86
|
+
"""
|
87
|
+
Computes the error consistency using Cohen's Kappa.
|
88
|
+
Cohen, 1960 https://doi.org/10.1177%2F001316446002000104
|
89
|
+
"""
|
90
|
+
# https://github.com/bethgelab/model-vs-human/blob/745046c4d82ff884af618756bd6a5f47b6f36c45/modelvshuman/plotting/analyses.py#L147-L158
|
91
|
+
assert 0.0 <= expected_consistency <= 1.0
|
92
|
+
assert 0.0 <= observed_consistency <= 1.0
|
93
|
+
return (observed_consistency - expected_consistency) / (1.0 - expected_consistency)
|
@@ -0,0 +1,39 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from pytest import approx
|
3
|
+
|
4
|
+
from brainio.assemblies import BehavioralAssembly
|
5
|
+
from brainscore_vision import load_metric
|
6
|
+
|
7
|
+
|
8
|
+
def test_score():
|
9
|
+
assembly = _make_data()
|
10
|
+
metric = load_metric('error_consistency')
|
11
|
+
score = metric(assembly.sel(subject='A'), assembly)
|
12
|
+
assert score == approx(0.2)
|
13
|
+
|
14
|
+
|
15
|
+
def test_has_error():
|
16
|
+
assembly = _make_data()
|
17
|
+
metric = load_metric('error_consistency')
|
18
|
+
score = metric(assembly.sel(subject='A'), assembly)
|
19
|
+
assert hasattr(score, 'error')
|
20
|
+
|
21
|
+
|
22
|
+
def test_raw_subjects():
|
23
|
+
assembly = _make_data()
|
24
|
+
metric = load_metric('error_consistency')
|
25
|
+
score = metric(assembly.sel(subject='A'), assembly)
|
26
|
+
subject_scores = score.raw.mean('condition')
|
27
|
+
assert subject_scores.sel(subject='A') == 1
|
28
|
+
|
29
|
+
|
30
|
+
def _make_data():
|
31
|
+
return BehavioralAssembly(['dog', 'cat', 'chair', 'cat', 'dog', 'dog', 'dog', 'dog', 'chair', # subject A
|
32
|
+
'cat', 'cat', 'chair', 'cat', 'dog', 'cat', 'chair', 'cat', 'cat', # subject B
|
33
|
+
'dog', 'cat', 'chair', 'dog', 'cat', 'chair', 'dog', 'cat', 'chair' # subject C
|
34
|
+
],
|
35
|
+
coords={'stimulus_id': ('presentation', np.resize(np.arange(9), 9 * 3)),
|
36
|
+
'truth': ('presentation', np.resize(['dog', 'cat', 'chair'], 9 * 3)),
|
37
|
+
'condition': ('presentation', np.resize([1, 1, 1, 2, 2, 2, 3, 3, 3], 9 * 3)),
|
38
|
+
'subject': ('presentation', ['A'] * 9 + ['B'] * 9 + ['C'] * 9)},
|
39
|
+
dims=['presentation'])
|
@@ -0,0 +1,16 @@
|
|
1
|
+
from brainscore_vision import metric_registry
|
2
|
+
from .metric import I1, I2n
|
3
|
+
|
4
|
+
metric_registry['i1'] = I1
|
5
|
+
metric_registry['i2n'] = I2n
|
6
|
+
|
7
|
+
BIBTEX = """@article{rajalingham2018large,
|
8
|
+
title={Large-scale, high-resolution comparison of the core visual object recognition behavior of humans, monkeys, and state-of-the-art deep artificial neural networks},
|
9
|
+
author={Rajalingham, Rishi and Issa, Elias B and Bashivan, Pouya and Kar, Kohitij and Schmidt, Kailyn and DiCarlo, James J},
|
10
|
+
journal={Journal of Neuroscience},
|
11
|
+
volume={38},
|
12
|
+
number={33},
|
13
|
+
pages={7255--7269},
|
14
|
+
year={2018},
|
15
|
+
publisher={Soc Neuroscience}
|
16
|
+
}"""
|
@@ -0,0 +1,299 @@
|
|
1
|
+
import itertools
|
2
|
+
import logging
|
3
|
+
from collections import Counter
|
4
|
+
|
5
|
+
import numpy as np
|
6
|
+
import scipy.stats
|
7
|
+
import xarray
|
8
|
+
from numpy.random.mtrand import RandomState
|
9
|
+
from scipy.stats import pearsonr
|
10
|
+
|
11
|
+
from brainio.assemblies import walk_coords, DataAssembly
|
12
|
+
from brainscore_core.metrics import Metric, Score
|
13
|
+
from brainscore_vision.metric_helpers.transformations import apply_aggregate
|
14
|
+
from brainscore_vision.utils import fullname
|
15
|
+
|
16
|
+
|
17
|
+
# only the implementation for i2n has been verified.
|
18
|
+
|
19
|
+
def I1(*args, **kwargs):
|
20
|
+
return _I(*args, collapse_distractors=True, normalize=False, **kwargs)
|
21
|
+
|
22
|
+
|
23
|
+
def I1n(*args, **kwargs):
|
24
|
+
return _I(*args, collapse_distractors=True, normalize=True, **kwargs)
|
25
|
+
|
26
|
+
|
27
|
+
def I2(*args, **kwargs):
|
28
|
+
return _I(*args, collapse_distractors=False, normalize=False, **kwargs)
|
29
|
+
|
30
|
+
|
31
|
+
def I2n(*args, **kwargs):
|
32
|
+
return _I(*args, collapse_distractors=False, normalize=True, **kwargs)
|
33
|
+
|
34
|
+
|
35
|
+
def O1(*args, **kwargs):
|
36
|
+
return _O(*args, collapse_distractors=True, normalize=False, **kwargs)
|
37
|
+
|
38
|
+
|
39
|
+
def O2(*args, **kwargs):
|
40
|
+
return _O(*args, collapse_distractors=False, normalize=False, **kwargs)
|
41
|
+
|
42
|
+
|
43
|
+
def _o2(assembly):
|
44
|
+
i2n = I2n()
|
45
|
+
false_alarm_rates = i2n.target_distractor_scores(assembly)
|
46
|
+
false_alarm_rates_object = false_alarm_rates.groupby('truth').mean('presentation')
|
47
|
+
false_alarm_rates_object = false_alarm_rates_object.rename({'truth': 'task_left', 'choice': 'task_right'})
|
48
|
+
# hit rates are one minus the flipped false alarm rates, e.g. HR(dog, cat) = 1 - FAR(cat, dog)
|
49
|
+
hit_rates_object = 1 - false_alarm_rates_object.rename({'task_left': 'task_right', 'task_right': 'task_left'})
|
50
|
+
dprime_false_alarms_rates_object = xarray.apply_ufunc(i2n.z_score, false_alarm_rates_object)
|
51
|
+
dprime_hit_rates_object = xarray.apply_ufunc(i2n.z_score, hit_rates_object)
|
52
|
+
o2 = dprime_hit_rates_object - dprime_false_alarms_rates_object
|
53
|
+
return o2
|
54
|
+
|
55
|
+
|
56
|
+
class _Behavior_Metric(Metric):
|
57
|
+
"""
|
58
|
+
Rajalingham & Issa et al., 2018 http://www.jneurosci.org/content/early/2018/07/13/JNEUROSCI.0388-18.2018
|
59
|
+
modified by Schrimpf & Kubilius et al., 2018 https://www.biorxiv.org/content/early/2018/09/05/407007:
|
60
|
+
- Rajalingham et al. generated model trials by using different train-test splits.
|
61
|
+
This implementation fixes the train-test split, and thus computes a fixed response matrix without trials.
|
62
|
+
- for computing dprime scores, Rajalingham et al. computed the false-alarms rate across the flat vector of all
|
63
|
+
distractor images. This implementation computes the false-alarms rate per object, and then takes the mean.
|
64
|
+
"""
|
65
|
+
|
66
|
+
def __init__(self, collapse_distractors, normalize=False, repetitions=2):
|
67
|
+
super().__init__()
|
68
|
+
self._collapse_distractors = collapse_distractors
|
69
|
+
self._normalize = normalize
|
70
|
+
self._repetitions = repetitions
|
71
|
+
self._logger = logging.getLogger(fullname(self))
|
72
|
+
|
73
|
+
def __call__(self, source_probabilities, target) -> Score:
|
74
|
+
return self._repeat(lambda random_state:
|
75
|
+
self._call_single(source_probabilities, target, random_state=random_state))
|
76
|
+
|
77
|
+
def _call_single(self, source_probabilities, target, random_state):
|
78
|
+
self.add_source_meta(source_probabilities, target)
|
79
|
+
source_response_matrix = self.target_distractor_scores(source_probabilities)
|
80
|
+
source_response_matrix = self.dprimes(source_response_matrix)
|
81
|
+
if self._collapse_distractors:
|
82
|
+
source_response_matrix = self.collapse_distractors(source_response_matrix)
|
83
|
+
|
84
|
+
target_half = self.generate_halves(target, random_state=random_state)[0]
|
85
|
+
target_response_matrix = self.build_response_matrix_from_responses(target_half)
|
86
|
+
target_response_matrix = self.dprimes(target_response_matrix)
|
87
|
+
if self._collapse_distractors:
|
88
|
+
target_response_matrix = self.collapse_distractors(target_response_matrix)
|
89
|
+
|
90
|
+
correlation = self.correlate(source_response_matrix, target_response_matrix,
|
91
|
+
collapse_distractors=self._collapse_distractors)
|
92
|
+
return correlation
|
93
|
+
|
94
|
+
def ceiling(self, assembly, skipna=False):
|
95
|
+
return self._repeat(lambda random_state:
|
96
|
+
self.compute_ceiling(assembly, random_state=random_state, skipna=skipna))
|
97
|
+
|
98
|
+
def compute_ceiling(self, assembly, random_state, skipna=False):
|
99
|
+
dprime_halves = []
|
100
|
+
for half in self.generate_halves(assembly, random_state=random_state):
|
101
|
+
half = self.build_response_matrix_from_responses(half)
|
102
|
+
half = self.dprimes(half)
|
103
|
+
if self._collapse_distractors:
|
104
|
+
half = self.collapse_distractors(half)
|
105
|
+
dprime_halves.append(half)
|
106
|
+
return self.correlate(*dprime_halves, skipna=skipna, collapse_distractors=self._collapse_distractors)
|
107
|
+
|
108
|
+
def build_response_matrix_from_responses(self, responses):
|
109
|
+
num_choices = [(image_id, choice) for image_id, choice in
|
110
|
+
zip(responses['stimulus_id'].values, responses.values)]
|
111
|
+
num_choices = Counter(num_choices)
|
112
|
+
num_objects = [[(image_id, sample_obj), (image_id, dist_obj)] for image_id, sample_obj, dist_obj in zip(
|
113
|
+
responses['stimulus_id'].values, responses['sample_obj'].values, responses['dist_obj'].values)]
|
114
|
+
num_objects = Counter(itertools.chain(*num_objects))
|
115
|
+
|
116
|
+
choices = np.unique(responses)
|
117
|
+
image_ids, indices = np.unique(responses['stimulus_id'], return_index=True)
|
118
|
+
truths = responses['truth'].values[indices]
|
119
|
+
image_dim = responses['stimulus_id'].dims
|
120
|
+
coords = {**{coord: (dims, value) for coord, dims, value in walk_coords(responses)},
|
121
|
+
**{'choice': ('choice', choices)}}
|
122
|
+
coords = {coord: (dims, value if dims != image_dim else value[indices]) # align image_dim coords with indices
|
123
|
+
for coord, (dims, value) in coords.items()}
|
124
|
+
response_matrix = np.zeros((len(image_ids), len(choices)))
|
125
|
+
for (image_index, image_id), (choice_index, choice) in itertools.product(
|
126
|
+
enumerate(image_ids), enumerate(choices)):
|
127
|
+
if truths[image_index] == choice: # object == choice, ignore
|
128
|
+
p = np.nan
|
129
|
+
else:
|
130
|
+
# divide by number of times where object was one of the two choices (target or distractor)
|
131
|
+
p = (num_choices[(image_id, choice)] / num_objects[(image_id, choice)]) \
|
132
|
+
if num_objects[(image_id, choice)] > 0 else np.nan
|
133
|
+
response_matrix[image_index, choice_index] = p
|
134
|
+
response_matrix = DataAssembly(response_matrix, coords=coords, dims=responses.dims + ('choice',))
|
135
|
+
return response_matrix
|
136
|
+
|
137
|
+
def dprimes(self, response_matrix, cap=5):
|
138
|
+
dprime_scores = self.dprime(response_matrix)
|
139
|
+
dprime_scores_clipped = dprime_scores.clip(-cap, cap)
|
140
|
+
dprime_scores_clipped = type(dprime_scores)(dprime_scores_clipped) # make sure type is preserved
|
141
|
+
if not self._normalize:
|
142
|
+
return dprime_scores_clipped
|
143
|
+
else:
|
144
|
+
dprime_scores_normalized = self.subtract_mean(dprime_scores_clipped)
|
145
|
+
return dprime_scores_normalized
|
146
|
+
|
147
|
+
def target_distractor_scores(self, object_probabilities):
|
148
|
+
cached_object_probabilities = self._build_index(object_probabilities, ['stimulus_id', 'choice'])
|
149
|
+
|
150
|
+
def apply(p_choice, stimulus_id, truth, choice, **_):
|
151
|
+
if truth == choice: # object == choice, ignore
|
152
|
+
return np.nan
|
153
|
+
# probability that something else was chosen rather than object (p_choice == p_distractor after above check)
|
154
|
+
p_object = cached_object_probabilities[(stimulus_id, truth)]
|
155
|
+
p = p_choice / (p_choice + p_object)
|
156
|
+
return p
|
157
|
+
|
158
|
+
result = object_probabilities.multi_dim_apply(['stimulus_id', 'choice'], apply)
|
159
|
+
return result
|
160
|
+
|
161
|
+
def z_score(self, value):
|
162
|
+
return scipy.stats.norm.ppf(value)
|
163
|
+
|
164
|
+
def subtract_mean(self, scores):
|
165
|
+
result = scores.multi_dim_apply(['truth', 'choice'], lambda group, **_: group - np.nanmean(group))
|
166
|
+
return result
|
167
|
+
|
168
|
+
@classmethod
|
169
|
+
def aggregate(cls, scores):
|
170
|
+
score = scores.mean('split')
|
171
|
+
score.attrs['error'] = scores.std('split')
|
172
|
+
return score
|
173
|
+
|
174
|
+
def generate_halves(self, assembly, random_state):
|
175
|
+
indices = list(range(len(assembly)))
|
176
|
+
random_state.shuffle(indices)
|
177
|
+
halves = assembly[indices[:int(len(indices) / 2)]], assembly[indices[int(len(indices) / 2):]]
|
178
|
+
return halves
|
179
|
+
|
180
|
+
def _build_index(self, assembly, coords):
|
181
|
+
np.testing.assert_array_equal(list(itertools.chain(*[assembly[coord].dims for coord in coords])), assembly.dims)
|
182
|
+
aligned_coords = itertools.product(*[assembly[coord].values for coord in coords])
|
183
|
+
result = {}
|
184
|
+
for coord_values, value in zip(aligned_coords, assembly.values.flatten()):
|
185
|
+
if coord_values in result: # if there are duplicates, make it a list
|
186
|
+
previous_value = result[coord_values]
|
187
|
+
previous_value = (previous_value if isinstance(previous_value, list) else [previous_value])
|
188
|
+
result[coord_values] = previous_value + [value]
|
189
|
+
else:
|
190
|
+
result[coord_values] = value
|
191
|
+
return result
|
192
|
+
|
193
|
+
def _repeat(self, func):
|
194
|
+
random_state = self._initialize_random_state()
|
195
|
+
repetitions = list(range(self._repetitions))
|
196
|
+
scores = [func(random_state=random_state) for repetition in repetitions]
|
197
|
+
score = Score(scores, coords={'split': repetitions}, dims=['split'])
|
198
|
+
return apply_aggregate(self.aggregate, score)
|
199
|
+
|
200
|
+
def _initialize_random_state(self):
|
201
|
+
return RandomState(seed=0) # fix the seed here so that the same halves are produced for score and ceiling
|
202
|
+
|
203
|
+
def add_source_meta(self, source_probabilities, target):
|
204
|
+
image_meta = {image_id: meta_value for image_id, meta_value in
|
205
|
+
zip(target['stimulus_id'].values, target['truth'].values)}
|
206
|
+
meta_values = [image_meta[image_id] for image_id in source_probabilities['stimulus_id'].values]
|
207
|
+
source_probabilities['truth'] = 'presentation', meta_values
|
208
|
+
|
209
|
+
|
210
|
+
class _I(_Behavior_Metric):
|
211
|
+
"""
|
212
|
+
Rajalingham & Issa et al., 2018 http://www.jneurosci.org/content/early/2018/07/13/JNEUROSCI.0388-18.2018
|
213
|
+
modified by Schrimpf & Kubilius et al., 2018 https://www.biorxiv.org/content/early/2018/09/05/407007:
|
214
|
+
- Rajalingham et al. generated model trials by using different train-test splits.
|
215
|
+
This implementation fixes the train-test split, and thus computes a fixed response matrix without trials.
|
216
|
+
- for computing dprime scores, Rajalingham et al. computed the false-alarms rate across the flat vector of all
|
217
|
+
distractor images. This implementation computes the false-alarms rate per object, and then takes the mean.
|
218
|
+
"""
|
219
|
+
|
220
|
+
def collapse_distractors(self, response_matrix):
|
221
|
+
return response_matrix.mean(dim='choice', skipna=True)
|
222
|
+
|
223
|
+
def dprime(self, response_matrix):
|
224
|
+
truth_choice_values = self._build_index(response_matrix, ['truth', 'choice'])
|
225
|
+
|
226
|
+
def apply(false_alarms_rate_images, choice, truth, **_):
|
227
|
+
hit_rate = 1 - false_alarms_rate_images
|
228
|
+
inverse_choice = truth_choice_values[(choice, truth)]
|
229
|
+
false_alarms_rate_objects = np.nanmean(inverse_choice)
|
230
|
+
dprime = self.z_score(hit_rate) - self.z_score(false_alarms_rate_objects)
|
231
|
+
return dprime
|
232
|
+
|
233
|
+
result = response_matrix.multi_dim_apply(['stimulus_id', 'choice'], apply)
|
234
|
+
return result
|
235
|
+
|
236
|
+
@classmethod
|
237
|
+
def correlate(cls, source_response_matrix, target_response_matrix, skipna=False, collapse_distractors=False):
|
238
|
+
# align
|
239
|
+
if collapse_distractors:
|
240
|
+
source_response_matrix = source_response_matrix.sortby('stimulus_id')
|
241
|
+
target_response_matrix = target_response_matrix.sortby('stimulus_id')
|
242
|
+
else:
|
243
|
+
source_response_matrix = source_response_matrix.sortby('stimulus_id').sortby('choice')
|
244
|
+
target_response_matrix = target_response_matrix.sortby('stimulus_id').sortby('choice')
|
245
|
+
assert all(source_response_matrix['choice'].values == target_response_matrix['choice'].values)
|
246
|
+
assert all(source_response_matrix['stimulus_id'].values == target_response_matrix['stimulus_id'].values)
|
247
|
+
# flatten and mask out NaNs
|
248
|
+
source, target = source_response_matrix.values.flatten(), target_response_matrix.values.flatten()
|
249
|
+
non_nan = ~np.isnan(target)
|
250
|
+
non_nan = np.logical_and(non_nan, (~np.isnan(source) if skipna else 1))
|
251
|
+
source, target = source[non_nan], target[non_nan]
|
252
|
+
assert not any(np.isnan(source))
|
253
|
+
correlation, p = pearsonr(source, target)
|
254
|
+
return correlation
|
255
|
+
|
256
|
+
|
257
|
+
class _O(_Behavior_Metric):
|
258
|
+
"""
|
259
|
+
Rajalingham & Issa et al., 2018 http://www.jneurosci.org/content/early/2018/07/13/JNEUROSCI.0388-18.2018
|
260
|
+
modified by Schrimpf & Kubilius et al., 2018 https://www.biorxiv.org/content/early/2018/09/05/407007:
|
261
|
+
- Rajalingham et al. generated model trials by using different train-test splits.
|
262
|
+
This implementation fixes the train-test split, and thus computes a fixed response matrix without trials.
|
263
|
+
- for computing dprime scores, Rajalingham et al. computed the false-alarms rate across the flat vector of all
|
264
|
+
distractor images. This implementation computes the false-alarms rate per object, and then takes the mean.
|
265
|
+
"""
|
266
|
+
|
267
|
+
def collapse_distractors(self, response_matrix):
|
268
|
+
return response_matrix.mean(dim='task_left', skipna=True)
|
269
|
+
|
270
|
+
def dprime(self, response_matrix):
|
271
|
+
false_alarm_rates_object = response_matrix.groupby('truth').mean('presentation')
|
272
|
+
false_alarm_rates_object = false_alarm_rates_object.rename({'truth': 'task_left', 'choice': 'task_right'})
|
273
|
+
hit_rates_object = 1 - false_alarm_rates_object.rename({'task_left': 'task_right', 'task_right': 'task_left'})
|
274
|
+
|
275
|
+
dprime_false_alarms_rates_object = xarray.apply_ufunc(self.z_score, false_alarm_rates_object)
|
276
|
+
dprime_hit_rates_object = xarray.apply_ufunc(self.z_score, hit_rates_object)
|
277
|
+
|
278
|
+
result = dprime_hit_rates_object - dprime_false_alarms_rates_object
|
279
|
+
return result
|
280
|
+
|
281
|
+
@classmethod
|
282
|
+
def correlate(cls, source_response_matrix, target_response_matrix, skipna=False, collapse_distractors=False):
|
283
|
+
# align
|
284
|
+
if collapse_distractors:
|
285
|
+
source_response_matrix = source_response_matrix.sortby('task_right')
|
286
|
+
target_response_matrix = target_response_matrix.sortby('task_right')
|
287
|
+
else:
|
288
|
+
source_response_matrix = source_response_matrix.sortby('task_right').sortby('task_left')
|
289
|
+
target_response_matrix = target_response_matrix.sortby('task_right').sortby('task_left')
|
290
|
+
assert all(source_response_matrix['task_left'].values == target_response_matrix['task_left'].values)
|
291
|
+
assert all(source_response_matrix['task_right'].values == target_response_matrix['task_right'].values)
|
292
|
+
# flatten and mask out NaNs
|
293
|
+
source, target = source_response_matrix.values.flatten(), target_response_matrix.values.flatten()
|
294
|
+
non_nan = ~np.isnan(target)
|
295
|
+
non_nan = np.logical_and(non_nan, (~np.isnan(source) if skipna else 1))
|
296
|
+
source, target = source[non_nan], target[non_nan]
|
297
|
+
assert not any(np.isnan(source))
|
298
|
+
correlation, p = pearsonr(source, target)
|
299
|
+
return correlation
|
@@ -0,0 +1,36 @@
|
|
1
|
+
from pathlib import Path
|
2
|
+
|
3
|
+
import pytest
|
4
|
+
from pytest import approx
|
5
|
+
|
6
|
+
from brainio.assemblies import BehavioralAssembly
|
7
|
+
from brainscore_vision import load_metric
|
8
|
+
from brainscore_vision.benchmarks.rajalingham2018.benchmarks.benchmark import load_assembly
|
9
|
+
|
10
|
+
|
11
|
+
@pytest.mark.private_access
|
12
|
+
class TestI2N:
|
13
|
+
@pytest.mark.parametrize(['model', 'expected_score'], [
|
14
|
+
('alexnet', .253),
|
15
|
+
('resnet34', .37787),
|
16
|
+
('resnet18', .3638),
|
17
|
+
])
|
18
|
+
def test_model(self, model, expected_score):
|
19
|
+
# assemblies
|
20
|
+
objectome = load_assembly()
|
21
|
+
probabilities = Path(__file__).parent / 'test_resources' / f'{model}-probabilities.nc'
|
22
|
+
probabilities = BehavioralAssembly.from_files(
|
23
|
+
probabilities,
|
24
|
+
stimulus_set_identifier=objectome.attrs['stimulus_set_identifier'],
|
25
|
+
stimulus_set=objectome.attrs['stimulus_set'])
|
26
|
+
# metric
|
27
|
+
i2n = load_metric('i2n')
|
28
|
+
score = i2n(probabilities, objectome)
|
29
|
+
assert score == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score}"
|
30
|
+
|
31
|
+
def test_ceiling(self):
|
32
|
+
objectome = load_assembly()
|
33
|
+
i2n = load_metric('i2n')
|
34
|
+
ceiling = i2n.ceiling(objectome)
|
35
|
+
assert ceiling == approx(.4786, abs=.0064)
|
36
|
+
assert ceiling.attrs['error'] == approx(.00537, abs=.0015)
|
Binary file
|
Binary file
|
Binary file
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from brainscore_vision import metric_registry
|
2
|
+
from .ceiling import InternalConsistency
|
3
|
+
|
4
|
+
from brainscore_vision.metric_helpers.temporal import PerTime
|
5
|
+
|
6
|
+
|
7
|
+
metric_registry['internal_consistency'] = InternalConsistency
|
8
|
+
metric_registry['internal_consistency_temporal'] = lambda *args, **kwargs: PerTime(InternalConsistency(*args, **kwargs))
|
@@ -0,0 +1,127 @@
|
|
1
|
+
import scipy.stats
|
2
|
+
import xarray as xr
|
3
|
+
from tqdm import tqdm
|
4
|
+
|
5
|
+
from brainio.assemblies import walk_coords, DataAssembly
|
6
|
+
from brainscore_core import Metric, Score
|
7
|
+
from brainscore_vision.metric_helpers import Defaults as XarrayDefaults
|
8
|
+
from brainscore_vision.metric_helpers.transformations import CrossValidationSingle
|
9
|
+
from brainscore_vision.metric_helpers.xarray_utils import XarrayCorrelation
|
10
|
+
from brainscore_vision.metrics import Ceiling
|
11
|
+
|
12
|
+
|
13
|
+
class _SplitHalvesConsistency(Ceiling):
|
14
|
+
"""
|
15
|
+
Computes the consistency within an assembly with repetitions,
|
16
|
+
by splitting the assembly in halves and computing consistency between the two halves.
|
17
|
+
"""
|
18
|
+
|
19
|
+
class Defaults:
|
20
|
+
split_coord = 'repetition'
|
21
|
+
|
22
|
+
def __init__(self, consistency_metric, aggregate=None,
|
23
|
+
split_coord=Defaults.split_coord, cross_validation_kwargs=None):
|
24
|
+
correction = SpearmanBrownCorrection()
|
25
|
+
self._consistency = self.SplitHalfWrapper(split_coord=split_coord,
|
26
|
+
consistency_metric=consistency_metric, correction=correction)
|
27
|
+
self._aggregate = aggregate
|
28
|
+
cross_validation_defaults = dict(train_size=0.5, split_coord=split_coord,
|
29
|
+
stratification_coord=None, unique_split_values=True)
|
30
|
+
cross_validation_kwargs = {**cross_validation_defaults, **(cross_validation_kwargs or {})}
|
31
|
+
self._cross_validation = CrossValidationSingle(**cross_validation_kwargs)
|
32
|
+
|
33
|
+
def __call__(self, assembly):
|
34
|
+
return self._cross_validation(assembly, apply=self._consistency, aggregate=self._aggregate)
|
35
|
+
|
36
|
+
class SplitHalfWrapper:
|
37
|
+
def __init__(self, split_coord, consistency_metric: Metric, correction):
|
38
|
+
self._split_coord = split_coord
|
39
|
+
self._consistency_metric = consistency_metric
|
40
|
+
self._correction = correction
|
41
|
+
|
42
|
+
def __call__(self, half1, half2):
|
43
|
+
half1, half2 = self._average_repetitions(half1), self._average_repetitions(half2)
|
44
|
+
consistency = self._consistency_metric(half1, half2)
|
45
|
+
corrected_consistency = self._correction(consistency, n=2)
|
46
|
+
return corrected_consistency
|
47
|
+
|
48
|
+
def _average_repetitions(self, assembly):
|
49
|
+
repetition_dims = assembly[self._split_coord].dims
|
50
|
+
nonrepetition_coords = [coord for coord, dims, values in walk_coords(assembly)
|
51
|
+
if dims == repetition_dims and coord != self._split_coord]
|
52
|
+
average = assembly.multi_groupby(nonrepetition_coords).mean(dim=repetition_dims)
|
53
|
+
return average
|
54
|
+
|
55
|
+
|
56
|
+
class PearsonCorrelation:
|
57
|
+
"""
|
58
|
+
Computes the Pearson r between two halves of an assembly.
|
59
|
+
"""
|
60
|
+
|
61
|
+
def __init__(self, stimulus_coord=XarrayDefaults.stimulus_coord,
|
62
|
+
neuroid_dim=XarrayDefaults.neuroid_dim, neuroid_coord=XarrayDefaults.neuroid_coord):
|
63
|
+
correlation = scipy.stats.pearsonr
|
64
|
+
self._correlation = XarrayCorrelation(correlation, correlation_coord=stimulus_coord,
|
65
|
+
neuroid_coord=neuroid_coord)
|
66
|
+
self._neuroid_dim = neuroid_dim
|
67
|
+
|
68
|
+
def __call__(self, half1, half2):
|
69
|
+
return self._correlation(half1, half2)
|
70
|
+
|
71
|
+
def aggregate(self, scores):
|
72
|
+
return scores.median(dim=self._neuroid_dim)
|
73
|
+
|
74
|
+
|
75
|
+
class InternalConsistency(Ceiling):
|
76
|
+
def __init__(self,
|
77
|
+
consistency_metric: Metric = None,
|
78
|
+
aggregate=None,
|
79
|
+
split_coord=_SplitHalvesConsistency.Defaults.split_coord):
|
80
|
+
"""
|
81
|
+
Creates a class to estimate the ceiling for a given assembly, based on split-half consistencies.
|
82
|
+
|
83
|
+
:param consistency_metric: The metric to compare two halves. :class:`~.PearsonCorrelation` by default.
|
84
|
+
:param aggregate: An optional function to aggregate the output of the `consistency_metric`.
|
85
|
+
:param split_coord: Over which coordinate to split to obtain the two halves.
|
86
|
+
"""
|
87
|
+
if not consistency_metric:
|
88
|
+
consistency_metric = PearsonCorrelation(
|
89
|
+
stimulus_coord=XarrayDefaults.stimulus_coord, neuroid_dim=XarrayDefaults.neuroid_dim,
|
90
|
+
neuroid_coord=XarrayDefaults.neuroid_coord)
|
91
|
+
aggregate = consistency_metric.aggregate
|
92
|
+
self._consistency = _SplitHalvesConsistency(consistency_metric=consistency_metric,
|
93
|
+
aggregate=aggregate,
|
94
|
+
split_coord=split_coord)
|
95
|
+
|
96
|
+
def __call__(self, assembly: DataAssembly) -> Score:
|
97
|
+
return self._consistency(assembly)
|
98
|
+
|
99
|
+
|
100
|
+
class SpearmanBrownCorrection:
|
101
|
+
"""
|
102
|
+
Applies Spearman-Brown correction to all passed values.
|
103
|
+
"""
|
104
|
+
|
105
|
+
def __call__(self, correlations, n):
|
106
|
+
return xr.apply_ufunc(lambda correlation: self.correct(correlation, n), correlations)
|
107
|
+
|
108
|
+
def correct(self, correlation, n):
|
109
|
+
return n * correlation / (1 + (n - 1) * correlation)
|
110
|
+
|
111
|
+
|
112
|
+
class TemporalCeiling:
|
113
|
+
def __init__(self, ceiling):
|
114
|
+
"""
|
115
|
+
:param ceiling: the ceiling to use per time-bin
|
116
|
+
"""
|
117
|
+
self.ceiling = ceiling
|
118
|
+
|
119
|
+
def __call__(self, assembly: DataAssembly) -> Score:
|
120
|
+
ceilings = []
|
121
|
+
for time_bin in tqdm(assembly['time_bin'].values, desc='time-ceiling'):
|
122
|
+
ceiling = self.ceiling(assembly.sel(time_bin=time_bin))
|
123
|
+
ceiling = ceiling.expand_dims('time_bin')
|
124
|
+
ceiling['time_bin'] = [str(time_bin)]
|
125
|
+
ceilings.append(ceiling)
|
126
|
+
ceiling = Score.merge(*ceilings)
|
127
|
+
return ceiling
|
@@ -0,0 +1 @@
|
|
1
|
+
scipy
|