brainscore-vision 2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/__init__.py +105 -0
- brainscore_vision/__main__.py +20 -0
- brainscore_vision/benchmark_helpers/__init__.py +67 -0
- brainscore_vision/benchmark_helpers/neural_common.py +70 -0
- brainscore_vision/benchmark_helpers/properties_common.py +424 -0
- brainscore_vision/benchmark_helpers/screen.py +126 -0
- brainscore_vision/benchmark_helpers/test_helper.py +160 -0
- brainscore_vision/benchmarks/README.md +7 -0
- brainscore_vision/benchmarks/__init__.py +122 -0
- brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
- brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
- brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
- brainscore_vision/benchmarks/baker2022/test.py +90 -0
- brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
- brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
- brainscore_vision/benchmarks/bmd2024/test.py +29 -0
- brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
- brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
- brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
- brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
- brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
- brainscore_vision/benchmarks/cadena2017/test.py +35 -0
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
- brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
- brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
- brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
- brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
- brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
- brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
- brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
- brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
- brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
- brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
- brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
- brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
- brainscore_vision/benchmarks/hebart2023/test.py +19 -0
- brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
- brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
- brainscore_vision/benchmarks/hermann2020/test.py +28 -0
- brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
- brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
- brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
- brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
- brainscore_vision/benchmarks/imagenet/test.py +32 -0
- brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
- brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
- brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
- brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
- brainscore_vision/benchmarks/islam2021/test.py +47 -0
- brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
- brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
- brainscore_vision/benchmarks/kar2019/test.py +93 -0
- brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
- brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
- brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
- brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
- brainscore_vision/benchmarks/malania2007/test.py +64 -0
- brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
- brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
- brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
- brainscore_vision/benchmarks/marques2020/test.py +135 -0
- brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
- brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
- brainscore_vision/benchmarks/objectnet/test.py +33 -0
- brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
- brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
- brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
- brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
- brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
- brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
- brainscore_vision/benchmarks/scialom2024/test.py +162 -0
- brainscore_vision/data/__init__.py +0 -0
- brainscore_vision/data/baker2022/__init__.py +40 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
- brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
- brainscore_vision/data/baker2022/test.py +135 -0
- brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
- brainscore_vision/data/barbumayo2019/__init__.py +23 -0
- brainscore_vision/data/barbumayo2019/test.py +10 -0
- brainscore_vision/data/bashivankar2019/__init__.py +52 -0
- brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
- brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
- brainscore_vision/data/bashivankar2019/test.py +15 -0
- brainscore_vision/data/bmd2024/__init__.py +69 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
- brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
- brainscore_vision/data/bmd2024/test.py +130 -0
- brainscore_vision/data/bracci2019/__init__.py +36 -0
- brainscore_vision/data/bracci2019/data_packaging.py +221 -0
- brainscore_vision/data/bracci2019/test.py +16 -0
- brainscore_vision/data/cadena2017/__init__.py +52 -0
- brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
- brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
- brainscore_vision/data/cadena2017/test.py +24 -0
- brainscore_vision/data/cichy2019/__init__.py +38 -0
- brainscore_vision/data/cichy2019/test.py +8 -0
- brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
- brainscore_vision/data/coggan2024_behavior/test.py +32 -0
- brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
- brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/data/david2004/__init__.py +34 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
- brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
- brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
- brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
- brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
- brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
- brainscore_vision/data/david2004/test.py +8 -0
- brainscore_vision/data/deng2009/__init__.py +22 -0
- brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
- brainscore_vision/data/deng2009/test.py +9 -0
- brainscore_vision/data/ferguson2024/__init__.py +401 -0
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
- brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
- brainscore_vision/data/ferguson2024/requirements.txt +2 -0
- brainscore_vision/data/ferguson2024/test.py +155 -0
- brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
- brainscore_vision/data/freemanziemba2013/test.py +97 -0
- brainscore_vision/data/geirhos2021/__init__.py +358 -0
- brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
- brainscore_vision/data/geirhos2021/test.py +330 -0
- brainscore_vision/data/hebart2023/__init__.py +23 -0
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
- brainscore_vision/data/hebart2023/test.py +42 -0
- brainscore_vision/data/hendrycks2019/__init__.py +45 -0
- brainscore_vision/data/hendrycks2019/test.py +26 -0
- brainscore_vision/data/igustibagus2024/__init__.py +23 -0
- brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
- brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
- brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
- brainscore_vision/data/igustibagus2024/test.py +26 -0
- brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
- brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
- brainscore_vision/data/imagenetslim15000/test.py +8 -0
- brainscore_vision/data/islam2021/__init__.py +18 -0
- brainscore_vision/data/islam2021/data_packaging.py +64 -0
- brainscore_vision/data/islam2021/test.py +11 -0
- brainscore_vision/data/kar2018/__init__.py +58 -0
- brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
- brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
- brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
- brainscore_vision/data/kar2018/test.py +10 -0
- brainscore_vision/data/kar2019/__init__.py +43 -0
- brainscore_vision/data/kar2019/data_packaging.py +116 -0
- brainscore_vision/data/kar2019/test.py +8 -0
- brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
- brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
- brainscore_vision/data/kuzovkin2018/test.py +8 -0
- brainscore_vision/data/majajhong2015/__init__.py +113 -0
- brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
- brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
- brainscore_vision/data/majajhong2015/test.py +38 -0
- brainscore_vision/data/malania2007/__init__.py +254 -0
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
- brainscore_vision/data/malania2007/test.py +147 -0
- brainscore_vision/data/maniquet2024/__init__.py +57 -0
- brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
- brainscore_vision/data/maniquet2024/test.py +16 -0
- brainscore_vision/data/marques2020/__init__.py +123 -0
- brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
- brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
- brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
- brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
- brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
- brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
- brainscore_vision/data/marques2020/test.py +54 -0
- brainscore_vision/data/rajalingham2018/__init__.py +56 -0
- brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
- brainscore_vision/data/rajalingham2018/test.py +10 -0
- brainscore_vision/data/rajalingham2020/__init__.py +39 -0
- brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
- brainscore_vision/data/rajalingham2020/test.py +8 -0
- brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
- brainscore_vision/data/rust2012/__init__.py +45 -0
- brainscore_vision/data/rust2012/rust305.py +35 -0
- brainscore_vision/data/rust2012/test.py +47 -0
- brainscore_vision/data/sanghavi2020/__init__.py +119 -0
- brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
- brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
- brainscore_vision/data/sanghavi2020/test.py +13 -0
- brainscore_vision/data/scialom2024/__init__.py +386 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
- brainscore_vision/data/scialom2024/test.py +301 -0
- brainscore_vision/data/seibert2019/__init__.py +25 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
- brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
- brainscore_vision/data/seibert2019/test.py +35 -0
- brainscore_vision/data/zhang2018/__init__.py +38 -0
- brainscore_vision/data/zhang2018/test.py +29 -0
- brainscore_vision/data_helpers/__init__.py +0 -0
- brainscore_vision/data_helpers/lookup_legacy.py +15 -0
- brainscore_vision/data_helpers/s3.py +79 -0
- brainscore_vision/metric_helpers/__init__.py +5 -0
- brainscore_vision/metric_helpers/temporal.py +119 -0
- brainscore_vision/metric_helpers/transformations.py +379 -0
- brainscore_vision/metric_helpers/utils.py +71 -0
- brainscore_vision/metric_helpers/xarray_utils.py +151 -0
- brainscore_vision/metrics/__init__.py +7 -0
- brainscore_vision/metrics/accuracy/__init__.py +4 -0
- brainscore_vision/metrics/accuracy/metric.py +16 -0
- brainscore_vision/metrics/accuracy/test.py +11 -0
- brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
- brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
- brainscore_vision/metrics/accuracy_distance/test.py +57 -0
- brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
- brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
- brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
- brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
- brainscore_vision/metrics/cka/__init__.py +14 -0
- brainscore_vision/metrics/cka/metric.py +105 -0
- brainscore_vision/metrics/cka/test.py +28 -0
- brainscore_vision/metrics/dimensionality/__init__.py +13 -0
- brainscore_vision/metrics/dimensionality/metric.py +45 -0
- brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
- brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
- brainscore_vision/metrics/distribution_similarity/test.py +10 -0
- brainscore_vision/metrics/error_consistency/__init__.py +13 -0
- brainscore_vision/metrics/error_consistency/metric.py +93 -0
- brainscore_vision/metrics/error_consistency/test.py +39 -0
- brainscore_vision/metrics/i1i2/__init__.py +16 -0
- brainscore_vision/metrics/i1i2/metric.py +299 -0
- brainscore_vision/metrics/i1i2/requirements.txt +2 -0
- brainscore_vision/metrics/i1i2/test.py +36 -0
- brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
- brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
- brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
- brainscore_vision/metrics/internal_consistency/test.py +39 -0
- brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
- brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
- brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
- brainscore_vision/metrics/mask_regression/__init__.py +16 -0
- brainscore_vision/metrics/mask_regression/metric.py +242 -0
- brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
- brainscore_vision/metrics/mask_regression/test.py +0 -0
- brainscore_vision/metrics/ost/__init__.py +23 -0
- brainscore_vision/metrics/ost/metric.py +350 -0
- brainscore_vision/metrics/ost/requirements.txt +2 -0
- brainscore_vision/metrics/ost/test.py +0 -0
- brainscore_vision/metrics/rdm/__init__.py +14 -0
- brainscore_vision/metrics/rdm/metric.py +101 -0
- brainscore_vision/metrics/rdm/requirements.txt +2 -0
- brainscore_vision/metrics/rdm/test.py +63 -0
- brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
- brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
- brainscore_vision/metrics/regression_correlation/metric.py +125 -0
- brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
- brainscore_vision/metrics/regression_correlation/test.py +36 -0
- brainscore_vision/metrics/threshold/__init__.py +5 -0
- brainscore_vision/metrics/threshold/metric.py +481 -0
- brainscore_vision/metrics/threshold/test.py +71 -0
- brainscore_vision/metrics/value_delta/__init__.py +4 -0
- brainscore_vision/metrics/value_delta/metric.py +30 -0
- brainscore_vision/metrics/value_delta/requirements.txt +1 -0
- brainscore_vision/metrics/value_delta/test.py +40 -0
- brainscore_vision/model_helpers/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/__init__.py +1 -0
- brainscore_vision/model_helpers/activations/core.py +635 -0
- brainscore_vision/model_helpers/activations/pca.py +117 -0
- brainscore_vision/model_helpers/activations/pytorch.py +152 -0
- brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
- brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
- brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
- brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
- brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
- brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
- brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
- brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
- brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
- brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
- brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
- brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
- brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
- brainscore_vision/model_helpers/conftest.py +3 -0
- brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
- brainscore_vision/model_helpers/s3.py +62 -0
- brainscore_vision/model_helpers/utils/__init__.py +15 -0
- brainscore_vision/model_helpers/utils/s3.py +42 -0
- brainscore_vision/model_interface.py +214 -0
- brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
- brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
- brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
- brainscore_vision/models/AlexNet_SIN/model.py +29 -0
- brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
- brainscore_vision/models/AlexNet_SIN/test.py +1 -0
- brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
- brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
- brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
- brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
- brainscore_vision/models/__init__.py +0 -0
- brainscore_vision/models/alexnet/__init__.py +8 -0
- brainscore_vision/models/alexnet/model.py +28 -0
- brainscore_vision/models/alexnet/requirements.txt +2 -0
- brainscore_vision/models/alexnet/test.py +15 -0
- brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
- brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
- brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
- brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
- brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
- brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
- brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
- brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
- brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
- brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
- brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
- brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
- brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
- brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
- brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
- brainscore_vision/models/alexnet_testing/__init__.py +8 -0
- brainscore_vision/models/alexnet_testing/model.py +28 -0
- brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
- brainscore_vision/models/alexnet_testing/setup.py +24 -0
- brainscore_vision/models/alexnet_testing/test.py +15 -0
- brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
- brainscore_vision/models/antialias_resnet152/model.py +35 -0
- brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
- brainscore_vision/models/antialias_resnet152/test.py +8 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
- brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/clip/__init__.py +5 -0
- brainscore_vision/models/clip/model.py +179 -0
- brainscore_vision/models/clip/requirements.txt +4 -0
- brainscore_vision/models/clip/test.py +1 -0
- brainscore_vision/models/clipvision/__init__.py +5 -0
- brainscore_vision/models/clipvision/model.py +179 -0
- brainscore_vision/models/clipvision/requirements.txt +4 -0
- brainscore_vision/models/clipvision/test.py +1 -0
- brainscore_vision/models/cornet_s/__init__.py +8 -0
- brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
- brainscore_vision/models/cornet_s/model.py +77 -0
- brainscore_vision/models/cornet_s/requirements.txt +7 -0
- brainscore_vision/models/cornet_s/test.py +8 -0
- brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
- brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
- brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
- brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
- brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
- brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
- brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
- brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
- brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
- brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
- brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
- brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
- brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
- brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
- brainscore_vision/models/effnetb1_272x240/model.py +126 -0
- brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
- brainscore_vision/models/effnetb1_272x240/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
- brainscore_vision/models/hmax/__init__.py +7 -0
- brainscore_vision/models/hmax/helpers/hmax.py +438 -0
- brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
- brainscore_vision/models/hmax/model.py +69 -0
- brainscore_vision/models/hmax/requirements.txt +5 -0
- brainscore_vision/models/hmax/test.py +8 -0
- brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
- brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
- brainscore_vision/models/mobilevit_small/__init__.py +7 -0
- brainscore_vision/models/mobilevit_small/model.py +49 -0
- brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
- brainscore_vision/models/mobilevit_small/test.py +8 -0
- brainscore_vision/models/pixels/__init__.py +8 -0
- brainscore_vision/models/pixels/model.py +35 -0
- brainscore_vision/models/pixels/test.py +15 -0
- brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
- brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
- brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r50_tvpt/__init__.py +9 -0
- brainscore_vision/models/r50_tvpt/model.py +47 -0
- brainscore_vision/models/r50_tvpt/setup.py +24 -0
- brainscore_vision/models/r50_tvpt/test.py +1 -0
- brainscore_vision/models/regnet/__init__.py +14 -0
- brainscore_vision/models/regnet/model.py +17 -0
- brainscore_vision/models/regnet/requirements.txt +2 -0
- brainscore_vision/models/regnet/test.py +17 -0
- brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
- brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
- brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
- brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
- brainscore_vision/models/resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/resnet50_julios/model.py +54 -0
- brainscore_vision/models/resnet50_julios/setup.py +24 -0
- brainscore_vision/models/resnet50_julios/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
- brainscore_vision/models/resnet50_tutorial/model.py +34 -0
- brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
- brainscore_vision/models/resnet50_tutorial/test.py +8 -0
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
- brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
- brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/resnet_50_robust/model.py +55 -0
- brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
- brainscore_vision/models/resnet_50_robust/test.py +8 -0
- brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
- brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
- brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
- brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
- brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
- brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
- brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
- brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
- brainscore_vision/models/temporal_model_GDT/model.py +72 -0
- brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_GDT/test.py +17 -0
- brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
- brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
- brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
- brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
- brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
- brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
- brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
- brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
- brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
- brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
- brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
- brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
- brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
- brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
- brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
- brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
- brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
- brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
- brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
- brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
- brainscore_vision/models/temporal_model_openstl/model.py +206 -0
- brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_openstl/test.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
- brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
- brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
- brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
- brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
- brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
- brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
- brainscore_vision/submission/__init__.py +0 -0
- brainscore_vision/submission/actions_helpers.py +153 -0
- brainscore_vision/submission/config.py +7 -0
- brainscore_vision/submission/endpoints.py +58 -0
- brainscore_vision/utils/__init__.py +91 -0
- brainscore_vision-2.1.dist-info/LICENSE +11 -0
- brainscore_vision-2.1.dist-info/METADATA +152 -0
- brainscore_vision-2.1.dist-info/RECORD +1009 -0
- brainscore_vision-2.1.dist-info/WHEEL +5 -0
- brainscore_vision-2.1.dist-info/top_level.txt +4 -0
- docs/Makefile +20 -0
- docs/source/conf.py +78 -0
- docs/source/index.rst +21 -0
- docs/source/modules/api_reference.rst +10 -0
- docs/source/modules/benchmarks.rst +8 -0
- docs/source/modules/brainscore_submission.png +0 -0
- docs/source/modules/developer_clarifications.rst +36 -0
- docs/source/modules/metrics.rst +8 -0
- docs/source/modules/model_interface.rst +8 -0
- docs/source/modules/submission.rst +112 -0
- docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
- docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
- docs/source/modules/tutorial_screenshots/init_py.png +0 -0
- docs/source/modules/tutorial_screenshots/mms.png +0 -0
- docs/source/modules/tutorial_screenshots/setup.png +0 -0
- docs/source/modules/tutorial_screenshots/sms.png +0 -0
- docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
- docs/source/modules/utils.rst +22 -0
- migrations/2020-12-20_pkl_to_nc.py +90 -0
- tests/__init__.py +6 -0
- tests/conftest.py +26 -0
- tests/test_benchmark_helpers/__init__.py +0 -0
- tests/test_benchmark_helpers/test_screen.py +75 -0
- tests/test_examples.py +41 -0
- tests/test_integration.py +43 -0
- tests/test_metric_helpers/__init__.py +0 -0
- tests/test_metric_helpers/test_temporal.py +80 -0
- tests/test_metric_helpers/test_transformations.py +171 -0
- tests/test_metric_helpers/test_xarray_utils.py +85 -0
- tests/test_model_helpers/__init__.py +6 -0
- tests/test_model_helpers/activations/__init__.py +0 -0
- tests/test_model_helpers/activations/test___init__.py +404 -0
- tests/test_model_helpers/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
- tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
- tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
- tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
- tests/test_model_helpers/temporal/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
- tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
- tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
- tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
- tests/test_model_helpers/temporal/test_utils.py +61 -0
- tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
- tests/test_model_helpers/test_imports.py +10 -0
- tests/test_model_helpers/test_s3.py +38 -0
- tests/test_models.py +15 -0
- tests/test_stimuli.py +0 -0
- tests/test_submission/__init__.py +0 -0
- tests/test_submission/mock_config.py +3 -0
- tests/test_submission/test_actions_helpers.py +67 -0
- tests/test_submission/test_db.py +54 -0
- tests/test_submission/test_endpoints.py +125 -0
- tests/test_utils.py +21 -0
@@ -0,0 +1,32 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from pytest import approx
|
3
|
+
|
4
|
+
from brainio.assemblies import BehavioralAssembly
|
5
|
+
from brainscore_vision.benchmarks.imagenet import Imagenet2012
|
6
|
+
from brainscore_vision.model_interface import BrainModel
|
7
|
+
|
8
|
+
|
9
|
+
class TestImagenet2012:
|
10
|
+
def test_groundtruth(self):
|
11
|
+
benchmark = Imagenet2012()
|
12
|
+
source = benchmark._stimulus_set
|
13
|
+
|
14
|
+
class GroundTruth(BrainModel):
|
15
|
+
def start_task(self, task, fitting_stimuli):
|
16
|
+
assert task == BrainModel.Task.label
|
17
|
+
assert fitting_stimuli == 'imagenet' # shortcut
|
18
|
+
|
19
|
+
def look_at(self, stimuli, number_of_trials=1):
|
20
|
+
source_image_ids = source['stimulus_id'].values
|
21
|
+
stimuli_image_ids = stimuli['stimulus_id'].values
|
22
|
+
sorted_x = source_image_ids[np.argsort(source_image_ids)]
|
23
|
+
sorted_index = np.searchsorted(sorted_x, stimuli_image_ids)
|
24
|
+
aligned_source = source.loc[sorted_index]
|
25
|
+
labels = aligned_source['synset'].values
|
26
|
+
return BehavioralAssembly([labels], coords={
|
27
|
+
**{column: ('presentation', aligned_source[column].values) for column in aligned_source.columns},
|
28
|
+
**{'choice': ('choice', ['dummy'])}}, dims=['choice', 'presentation'])
|
29
|
+
|
30
|
+
candidate = GroundTruth()
|
31
|
+
score = benchmark(candidate)
|
32
|
+
assert score == approx(1)
|
@@ -0,0 +1,7 @@
|
|
1
|
+
from brainscore_vision import benchmark_registry
|
2
|
+
from .benchmark import Imagenet_C_Noise, Imagenet_C_Blur, Imagenet_C_Weather, Imagenet_C_Digital
|
3
|
+
|
4
|
+
benchmark_registry['ImageNet-C-noise-top1'] = Imagenet_C_Noise
|
5
|
+
benchmark_registry['ImageNet-C-blur-top1'] = Imagenet_C_Blur
|
6
|
+
benchmark_registry['ImageNet-C-weather-top1'] = Imagenet_C_Weather
|
7
|
+
benchmark_registry['ImageNet-C-digital-top1'] = Imagenet_C_Digital
|
@@ -0,0 +1,204 @@
|
|
1
|
+
import logging
|
2
|
+
import os
|
3
|
+
from pathlib import Path
|
4
|
+
|
5
|
+
import numpy as np
|
6
|
+
import pandas as pd
|
7
|
+
import xarray as xr
|
8
|
+
|
9
|
+
from brainio.fetch import StimulusSetLoader
|
10
|
+
from brainio.stimuli import StimulusSet
|
11
|
+
from brainscore_core import Score
|
12
|
+
from brainscore_vision import load_stimulus_set, load_metric
|
13
|
+
from brainscore_vision.benchmarks import BenchmarkBase
|
14
|
+
from brainscore_vision.benchmarks.imagenet.benchmark import NUMBER_OF_TRIALS
|
15
|
+
from brainscore_vision.model_interface import BrainModel
|
16
|
+
|
17
|
+
_logger = logging.getLogger(__name__)
|
18
|
+
LOCAL_STIMULUS_DIRECTORY = '/braintree/data2/active/common/imagenet-c-brainscore-stimuli/'
|
19
|
+
|
20
|
+
BIBTEX = """@ARTICLE{Hendrycks2019-di,
|
21
|
+
title = "Benchmarking Neural Network Robustness to Common Corruptions
|
22
|
+
and Perturbations",
|
23
|
+
author = "Hendrycks, Dan and Dietterich, Thomas",
|
24
|
+
abstract = "In this paper we establish rigorous benchmarks for image
|
25
|
+
classifier robustness. Our first benchmark, ImageNet-C,
|
26
|
+
standardizes and expands the corruption robustness topic,
|
27
|
+
while showing which classifiers are preferable in
|
28
|
+
safety-critical applications. Then we propose a new dataset
|
29
|
+
called ImageNet-P which enables researchers to benchmark a
|
30
|
+
classifier's robustness to common perturbations. Unlike
|
31
|
+
recent robustness research, this benchmark evaluates
|
32
|
+
performance on common corruptions and perturbations not
|
33
|
+
worst-case adversarial perturbations. We find that there are
|
34
|
+
negligible changes in relative corruption robustness from
|
35
|
+
AlexNet classifiers to ResNet classifiers. Afterward we
|
36
|
+
discover ways to enhance corruption and perturbation
|
37
|
+
robustness. We even find that a bypassed adversarial defense
|
38
|
+
provides substantial common perturbation robustness.
|
39
|
+
Together our benchmarks may aid future work toward networks
|
40
|
+
that robustly generalize.",
|
41
|
+
month = mar,
|
42
|
+
year = 2019,
|
43
|
+
archivePrefix = "arXiv",
|
44
|
+
primaryClass = "cs.LG",
|
45
|
+
eprint = "1903.12261",
|
46
|
+
url = "https://arxiv.org/abs/1903.12261"
|
47
|
+
}"""
|
48
|
+
|
49
|
+
def Imagenet_C_Noise(sampling_factor=10):
|
50
|
+
return Imagenet_C_Category('noise', sampling_factor=sampling_factor)
|
51
|
+
|
52
|
+
def Imagenet_C_Blur(sampling_factor=10):
|
53
|
+
return Imagenet_C_Category('blur', sampling_factor=sampling_factor)
|
54
|
+
|
55
|
+
def Imagenet_C_Weather(sampling_factor=10):
|
56
|
+
return Imagenet_C_Category('weather', sampling_factor=sampling_factor)
|
57
|
+
|
58
|
+
def Imagenet_C_Digital(sampling_factor=10):
|
59
|
+
return Imagenet_C_Category('digital', sampling_factor=sampling_factor)
|
60
|
+
|
61
|
+
|
62
|
+
class Imagenet_C_Category(BenchmarkBase):
|
63
|
+
"""
|
64
|
+
Runs all ImageNet C benchmarks within a noise category, ie:
|
65
|
+
gaussian noise [1-5]
|
66
|
+
shot noise [1-5]
|
67
|
+
impulse noise [1-5]
|
68
|
+
"""
|
69
|
+
noise_category_map = {
|
70
|
+
'noise' : ['gaussian_noise', 'shot_noise', 'impulse_noise'],
|
71
|
+
'blur' : ['glass_blur', 'motion_blur', 'zoom_blur', 'defocus_blur'],
|
72
|
+
'weather' : ['snow', 'frost', 'fog', 'brightness'],
|
73
|
+
'digital' : ['pixelate', 'contrast', 'elastic_transform', 'jpeg_compression']
|
74
|
+
}
|
75
|
+
|
76
|
+
def __init__(self, noise_category, sampling_factor=10):
|
77
|
+
self.noise_category = noise_category
|
78
|
+
self.stimulus_set_name = f'imagenet_c.{noise_category}'
|
79
|
+
|
80
|
+
self.sampling_factor = sampling_factor
|
81
|
+
self.stimulus_set = self.load_stimulus_set()
|
82
|
+
self.noise_types = self.noise_category_map[noise_category]
|
83
|
+
|
84
|
+
ceiling = Score(1)
|
85
|
+
super(Imagenet_C_Category, self).__init__(identifier=f'ImageNet-C-{noise_category}-top1',
|
86
|
+
version=2,
|
87
|
+
ceiling_func=lambda: ceiling,
|
88
|
+
parent='Hendrycks2019-top1',
|
89
|
+
bibtex=BIBTEX)
|
90
|
+
|
91
|
+
|
92
|
+
def load_stimulus_set(self):
|
93
|
+
"""
|
94
|
+
ImageNet-C is quite large, and thus cumbersome to download each time the benchmark is run.
|
95
|
+
Here we try loading a local copy first, before proceeding to download the AWS copy.
|
96
|
+
"""
|
97
|
+
try:
|
98
|
+
_logger.debug(f'Loading local Imagenet-C {self.noise_category}')
|
99
|
+
category_path = os.path.join(
|
100
|
+
LOCAL_STIMULUS_DIRECTORY,
|
101
|
+
f'stimulus_imagenet_c_{self.noise_category}'
|
102
|
+
)
|
103
|
+
loader = SampledStimulusSetLoader(
|
104
|
+
cls=StimulusSet,
|
105
|
+
csv_path=os.path.join(category_path, f'stimulus_imagenet_c_{self.noise_category}.csv'),
|
106
|
+
stimuli_directory=category_path,
|
107
|
+
sampling_factor=self.sampling_factor
|
108
|
+
)
|
109
|
+
|
110
|
+
return loader.load()
|
111
|
+
|
112
|
+
except OSError as error:
|
113
|
+
_logger.debug(f'Excepted {error}. Attempting to access {self.stimulus_set_name} through Brainscore.')
|
114
|
+
return load_stimulus_set(self.stimulus_set_name)
|
115
|
+
|
116
|
+
def __call__(self, candidate):
|
117
|
+
scores = xr.concat([
|
118
|
+
Imagenet_C_Type(self.stimulus_set, noise_type, self.noise_category)(candidate)
|
119
|
+
for noise_type in self.noise_types
|
120
|
+
], dim='presentation')
|
121
|
+
assert len(set(scores['noise_type'].values)) == len(self.noise_types)
|
122
|
+
center = np.mean(scores)
|
123
|
+
error = np.std(scores)
|
124
|
+
score = Score(center)
|
125
|
+
score.attrs['error'] = error
|
126
|
+
score.attrs[Score.RAW_VALUES_KEY] = scores
|
127
|
+
return score
|
128
|
+
|
129
|
+
|
130
|
+
class Imagenet_C_Type(BenchmarkBase):
|
131
|
+
"""
|
132
|
+
Runs a group in imnet C benchmarks, like gaussian noise [1-5]
|
133
|
+
"""
|
134
|
+
def __init__(self, stimulus_set, noise_type, noise_category):
|
135
|
+
self.stimulus_set = stimulus_set[stimulus_set['noise_type'] == noise_type]
|
136
|
+
self.noise_type = noise_type
|
137
|
+
self.noise_category = noise_category
|
138
|
+
ceiling = Score(1)
|
139
|
+
super(Imagenet_C_Type, self).__init__(identifier=f'Hendrycks2019-{noise_category}-{noise_type}-top1',
|
140
|
+
version=2,
|
141
|
+
ceiling_func=lambda: ceiling,
|
142
|
+
parent=f'Hendrycks2019-{noise_category}-top1',
|
143
|
+
bibtex=BIBTEX)
|
144
|
+
|
145
|
+
def __call__(self, candidate):
|
146
|
+
score = xr.concat([
|
147
|
+
Imagenet_C_Individual(self.stimulus_set, noise_level, self.noise_type, self.noise_category)(candidate)
|
148
|
+
for noise_level in range(1, 6)
|
149
|
+
], dim='presentation')
|
150
|
+
return score
|
151
|
+
|
152
|
+
|
153
|
+
class Imagenet_C_Individual(BenchmarkBase):
|
154
|
+
"""
|
155
|
+
Runs an individual ImageNet C benchmark, like "gaussian_noise_1"
|
156
|
+
"""
|
157
|
+
|
158
|
+
def __init__(self, stimulus_set, noise_level, noise_type, noise_category):
|
159
|
+
self.stimulus_set = stimulus_set[stimulus_set['noise_level'] == noise_level]
|
160
|
+
self.noise_level = noise_level
|
161
|
+
self.noise_type = noise_type
|
162
|
+
self.benchmark_name = f'Hendrycks2019-{noise_category}-{noise_type}-{noise_level}-top1'
|
163
|
+
self._similarity_metric = load_metric('accuracy')
|
164
|
+
ceiling = Score(1)
|
165
|
+
super(Imagenet_C_Individual, self).__init__(identifier=self.benchmark_name, version=2,
|
166
|
+
ceiling_func=lambda: ceiling,
|
167
|
+
parent=f'Hendrycks2019-{noise_category}-{noise_type}-top1',
|
168
|
+
bibtex=BIBTEX)
|
169
|
+
|
170
|
+
def __call__(self, candidate):
|
171
|
+
candidate.start_task(BrainModel.Task.label, 'imagenet')
|
172
|
+
stimulus_set = self.stimulus_set[
|
173
|
+
list(set(self.stimulus_set.columns) - {'synset'})].copy().reset_index() # do not show label
|
174
|
+
stimulus_set.identifier = f'{self.benchmark_name}-{len(stimulus_set)}samples'
|
175
|
+
predictions = candidate.look_at(stimulus_set, number_of_trials=NUMBER_OF_TRIALS)
|
176
|
+
score = self._similarity_metric(
|
177
|
+
predictions.sortby('filename'),
|
178
|
+
self.stimulus_set.sort_values('filename')['synset'].values
|
179
|
+
).raw
|
180
|
+
|
181
|
+
score = score.assign_coords(
|
182
|
+
name=('presentation', [f'{self.benchmark_name}' for _ in range(len(score.presentation))])
|
183
|
+
)
|
184
|
+
|
185
|
+
return score
|
186
|
+
|
187
|
+
|
188
|
+
class SampledStimulusSetLoader(StimulusSetLoader):
|
189
|
+
"""
|
190
|
+
Subclass of StimulusSetLoader that allows for downsampling of the stimulus set before loading.
|
191
|
+
"""
|
192
|
+
def __init__(self, cls, csv_path, stimuli_directory, sampling_factor):
|
193
|
+
super().__init__(cls, csv_path, stimuli_directory)
|
194
|
+
self.sampling_factor = sampling_factor
|
195
|
+
|
196
|
+
def load(self):
|
197
|
+
stimulus_set = pd.read_csv(self.csv_path)[::self.sampling_factor]
|
198
|
+
self.correct_stimulus_id_name(stimulus_set)
|
199
|
+
stimulus_set = self.stimulus_set_class(stimulus_set)
|
200
|
+
stimulus_set.stimulus_paths = {row['stimulus_id']: Path(self.stimuli_directory) / row['filename']
|
201
|
+
for _, row in stimulus_set.iterrows()}
|
202
|
+
# make sure that all the stimulus files a loaded StimulusSet offers access to are actually available
|
203
|
+
assert all(stimulus_path.is_file() for stimulus_path in stimulus_set.stimulus_paths.values())
|
204
|
+
return stimulus_set
|
@@ -0,0 +1,57 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import pytest
|
3
|
+
from pytest import approx
|
4
|
+
|
5
|
+
from brainio.assemblies import BehavioralAssembly
|
6
|
+
from brainscore_vision.benchmarks.imagenet_c import Imagenet_C_Noise, Imagenet_C_Blur, \
|
7
|
+
Imagenet_C_Weather, Imagenet_C_Digital
|
8
|
+
from brainscore_vision.model_interface import BrainModel
|
9
|
+
|
10
|
+
# downloads all ImageNet C benchmarks (50.3G) and runs with default downsampling by a factor of 10
|
11
|
+
@pytest.mark.slow
|
12
|
+
class TestImagenetC:
|
13
|
+
def test_groundtruth(self):
|
14
|
+
benchmarks = [
|
15
|
+
Imagenet_C_Noise(),
|
16
|
+
Imagenet_C_Blur(),
|
17
|
+
Imagenet_C_Weather(),
|
18
|
+
Imagenet_C_Digital(),
|
19
|
+
]
|
20
|
+
|
21
|
+
class Static(BrainModel):
|
22
|
+
def start_task(self, task, fitting_stimuli):
|
23
|
+
assert task == BrainModel.Task.label
|
24
|
+
assert fitting_stimuli == 'imagenet' # shortcut
|
25
|
+
|
26
|
+
def look_at(self, stimuli, number_of_trials=1):
|
27
|
+
labels = -np.ones_like(stimuli['stimulus_id'].values)
|
28
|
+
return BehavioralAssembly([labels], coords={
|
29
|
+
**{column: ('presentation', stimuli[column].values) for column in stimuli.columns},
|
30
|
+
**{'choice': ('choice', ['dummy'])}}, dims=['choice', 'presentation'])
|
31
|
+
|
32
|
+
candidate = Static()
|
33
|
+
scores = [benchmark(candidate) for benchmark in benchmarks]
|
34
|
+
assert all([np.mean(score) == approx(0) for score in scores])
|
35
|
+
|
36
|
+
# downloads ImageNet C blur benchmarks (7.1G) and downsamples with a factor of 1000
|
37
|
+
@pytest.mark.travis_slow
|
38
|
+
class TestImagenetC_Category:
|
39
|
+
def test_groundtruth(self):
|
40
|
+
benchmarks = [
|
41
|
+
Imagenet_C_Blur(sampling_factor=1000),
|
42
|
+
]
|
43
|
+
|
44
|
+
class Static(BrainModel):
|
45
|
+
def start_task(self, task, fitting_stimuli):
|
46
|
+
assert task == BrainModel.Task.label
|
47
|
+
assert fitting_stimuli == 'imagenet' # shortcut
|
48
|
+
|
49
|
+
def look_at(self, stimuli, number_of_trials=1):
|
50
|
+
labels = -np.ones_like(stimuli['stimulus_id'].values)
|
51
|
+
return BehavioralAssembly([labels], coords={
|
52
|
+
**{column: ('presentation', stimuli[column].values) for column in stimuli.columns},
|
53
|
+
**{'choice': ('choice', ['dummy'])}}, dims=['choice', 'presentation'])
|
54
|
+
|
55
|
+
candidate = Static()
|
56
|
+
scores = [benchmark(candidate) for benchmark in benchmarks]
|
57
|
+
assert all([np.mean(score) == approx(0) for score in scores])
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from brainscore_vision import benchmark_registry
|
2
|
+
from .benchmark import _Islam2021Dimensionality
|
3
|
+
|
4
|
+
benchmark_registry['Islam2021-shape_v1_dimensionality'] = lambda: _Islam2021Dimensionality("V1", "shape")
|
5
|
+
benchmark_registry['Islam2021-texture_v1_dimensionality'] = lambda: _Islam2021Dimensionality("V1", "texture")
|
6
|
+
benchmark_registry['Islam2021-shape_v2_dimensionality'] = lambda: _Islam2021Dimensionality("V2", "shape")
|
7
|
+
benchmark_registry['Islam2021-texture_v2_dimensionality'] = lambda: _Islam2021Dimensionality("V2", "texture")
|
8
|
+
benchmark_registry['Islam2021-shape_v4_dimensionality'] = lambda: _Islam2021Dimensionality("V4", "shape")
|
9
|
+
benchmark_registry['Islam2021-texture_v4_dimensionality'] = lambda: _Islam2021Dimensionality("V4", "texture")
|
10
|
+
benchmark_registry['Islam2021-shape_it_dimensionality'] = lambda: _Islam2021Dimensionality("IT", "shape")
|
11
|
+
benchmark_registry['Islam2021-texture_it_dimensionality'] = lambda: _Islam2021Dimensionality("IT", "texture")
|
@@ -0,0 +1,107 @@
|
|
1
|
+
import random
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
from tqdm import tqdm
|
5
|
+
|
6
|
+
from brainscore_core import Score
|
7
|
+
from brainscore_core.benchmarks import BenchmarkBase
|
8
|
+
from brainscore_vision import BrainModel, load_stimulus_set, load_metric
|
9
|
+
from brainscore_vision.metrics.dimensionality import Dimensionality
|
10
|
+
|
11
|
+
BIBTEX = """@inproceedings{
|
12
|
+
islam2021shape,
|
13
|
+
title={Shape or Texture: Understanding Discriminative Features in {\{}CNN{\}}s},
|
14
|
+
author={Md Amirul Islam and Matthew Kowal and Patrick Esser and Sen Jia and Bj{\"o}rn Ommer and Konstantinos G. Derpanis and Neil Bruce},
|
15
|
+
booktitle={International Conference on Learning Representations},
|
16
|
+
year={2021},
|
17
|
+
url={https://openreview.net/forum?id=NcFEZOi-rLa}
|
18
|
+
}"""
|
19
|
+
|
20
|
+
|
21
|
+
TIME_BIN_ST, TIME_BIN_END = 70, 170 # standard core object recognition response, following Majaj*, Hong*, et al. 2015
|
22
|
+
SEED = 1751 #turn this benchmark into a deterministic one
|
23
|
+
|
24
|
+
class _Islam2021Dimensionality(BenchmarkBase):
|
25
|
+
def __init__(self,region,factor,deterministic=True):
|
26
|
+
assert factor in ["shape","texture","residual"]
|
27
|
+
factor_idx = {"shape":"shape", "texture":"texture", "residual":"residual"}[factor]
|
28
|
+
assert region in ["V1","V2","V4","IT"]
|
29
|
+
self.stimulus_set = load_stimulus_set("Islam2021")
|
30
|
+
self.region = region
|
31
|
+
self.deterministic = deterministic
|
32
|
+
self._metric = load_metric('factor_dimensionality', factor=factor_idx)
|
33
|
+
self._number_of_trials = 1
|
34
|
+
super(_Islam2021Dimensionality, self).__init__(
|
35
|
+
identifier=f'Islam2021-{region + "_" + factor + "_dimensionality"}', version=1,
|
36
|
+
ceiling=Score(1),
|
37
|
+
parent='Islam2021',
|
38
|
+
bibtex=BIBTEX)
|
39
|
+
|
40
|
+
def set_generator(self):
|
41
|
+
if self.deterministic:
|
42
|
+
self.generator = random.Random(SEED)
|
43
|
+
else:
|
44
|
+
self.generator = random.Random()
|
45
|
+
|
46
|
+
def __call__(self, candidate: BrainModel):
|
47
|
+
self.set_generator()
|
48
|
+
candidate.start_recording(self.region,[(TIME_BIN_ST, TIME_BIN_END)])
|
49
|
+
assembly = candidate.look_at(self.stimulus_set)
|
50
|
+
factors, assembly1, assembly2 = self.get_assembly_sets(assembly)
|
51
|
+
assembly1 = self.prepare_assembly(assembly1,factors)
|
52
|
+
assembly2 = self.prepare_assembly(assembly2,factors)
|
53
|
+
score = self._metric(assembly1, assembly2)
|
54
|
+
return score
|
55
|
+
|
56
|
+
def get_assembly_sets(self,assembly,samples=None):
|
57
|
+
textures = list(set(self.stimulus_set["texture"].values))
|
58
|
+
textures.sort()
|
59
|
+
assert len(textures) == 5
|
60
|
+
shapes = list(set(self.stimulus_set["shape"].values))
|
61
|
+
shapes.sort()
|
62
|
+
assert len(shapes) == 20
|
63
|
+
factors, indexes1, indexes2 = [], [], []
|
64
|
+
if samples is None:
|
65
|
+
samples = len(self.stimulus_set)
|
66
|
+
for idx1 in tqdm(range(samples)):
|
67
|
+
factor, idx2 = self.get_index_pair(idx1,textures,shapes)
|
68
|
+
indexes1.append(idx1)
|
69
|
+
indexes2.append(idx2)
|
70
|
+
factors.append(factor)
|
71
|
+
return np.array(factors), assembly[:,np.array(indexes1)], assembly[:,np.array(indexes2)]
|
72
|
+
|
73
|
+
def get_index_pair(self,idx1,textures,shapes):
|
74
|
+
sample1 = self.stimulus_set.iloc[idx1]
|
75
|
+
factor = self.generator.choice(["shape", "texture"])
|
76
|
+
if factor == "shape": # same shape, different texture
|
77
|
+
list_possible_textures = textures.copy()
|
78
|
+
list_possible_textures.remove(sample1["texture"])
|
79
|
+
texture2 = self.generator.choice(list_possible_textures)
|
80
|
+
possible_samples_cond = \
|
81
|
+
(self.stimulus_set["original_image_id"] == sample1["original_image_id"]) & \
|
82
|
+
(self.stimulus_set["texture"] == texture2)
|
83
|
+
idx2 = np.where(possible_samples_cond)[0].item()
|
84
|
+
sample2 = self.stimulus_set.iloc[idx2]
|
85
|
+
assert sample2["shape"] == sample1["shape"] and sample1["texture"] != sample2["texture"]
|
86
|
+
|
87
|
+
else: #different shape, same texture
|
88
|
+
list_possible_shapes = shapes.copy()
|
89
|
+
list_possible_shapes.remove(sample1["shape"])
|
90
|
+
shape2 = self.generator.choice(list_possible_shapes)
|
91
|
+
possible_samples_cond = \
|
92
|
+
(self.stimulus_set["texture"] == sample1["texture"]) & \
|
93
|
+
(self.stimulus_set["shape"] == shape2)
|
94
|
+
possible_indexes = np.where(possible_samples_cond)[0]
|
95
|
+
idx2 = self.generator.choice(possible_indexes)
|
96
|
+
sample2 = self.stimulus_set.iloc[idx2]
|
97
|
+
assert sample2["shape"] != sample1["shape"] and sample1["texture"] == sample2["texture"]
|
98
|
+
return factor, idx2
|
99
|
+
|
100
|
+
def prepare_assembly(self,assembly,factors):
|
101
|
+
# prepare data assembly for the dimensionality metric
|
102
|
+
assert assembly.shape[1] == len(factors)
|
103
|
+
dim = assembly.dims[1]
|
104
|
+
assembly = assembly.assign_coords(factor = (dim,factors))
|
105
|
+
assembly = assembly.T
|
106
|
+
return assembly
|
107
|
+
|
@@ -0,0 +1,47 @@
|
|
1
|
+
from pathlib import Path
|
2
|
+
|
3
|
+
import boto3
|
4
|
+
import pytest
|
5
|
+
from pytest import approx
|
6
|
+
|
7
|
+
from brainio.assemblies import DataAssembly
|
8
|
+
from brainscore_vision import load_benchmark
|
9
|
+
from brainscore_vision.benchmark_helpers import PrecomputedFeatures
|
10
|
+
|
11
|
+
|
12
|
+
@pytest.fixture()
|
13
|
+
def alexnet_features():
|
14
|
+
# load
|
15
|
+
filename = 'alexnet-islam2021-classifier.5.nc'
|
16
|
+
precomputed_features_path = Path(__file__).parent / filename
|
17
|
+
if not precomputed_features_path.is_file(): # download on demand
|
18
|
+
s3 = boto3.client('s3')
|
19
|
+
s3.download_file('brain-score-tests', f'tests/test_benchmarks/{filename}',
|
20
|
+
str(precomputed_features_path.absolute()))
|
21
|
+
precomputed_features = DataAssembly.from_files(file_path=precomputed_features_path)
|
22
|
+
|
23
|
+
# adjust metadata
|
24
|
+
stimulus_id = list(map(lambda x: x.split("/")[-1][:-4], precomputed_features['stimulus_path'].values))
|
25
|
+
precomputed_features['stimulus_path'] = stimulus_id
|
26
|
+
precomputed_features = precomputed_features.rename({'stimulus_path': 'stimulus_id'})
|
27
|
+
precomputed_features = precomputed_features.stack(presentation=('stimulus_id',))
|
28
|
+
precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=None)
|
29
|
+
return precomputed_features
|
30
|
+
|
31
|
+
|
32
|
+
@pytest.mark.private_access
|
33
|
+
class TestEngineering:
|
34
|
+
@pytest.mark.parametrize('factor, area, expected_value', [
|
35
|
+
('shape', 'v1', approx(0.18310547, abs=0.001)),
|
36
|
+
('texture', 'v1', approx(0.30834961, abs=0.001)),
|
37
|
+
('shape', 'v2', approx(0.18310547, abs=0.001)),
|
38
|
+
('texture', 'v2', approx(0.30834961, abs=0.001)),
|
39
|
+
('shape', 'v4', approx(0.18310547, abs=0.001)),
|
40
|
+
('texture', 'v4', approx(0.30834961, abs=0.001)),
|
41
|
+
('shape', 'it', approx(0.18310547, abs=0.001)),
|
42
|
+
('texture', 'it', approx(0.30834961, abs=0.001)),
|
43
|
+
])
|
44
|
+
def test_dimensionality(self, factor, area, expected_value, alexnet_features):
|
45
|
+
benchmark = load_benchmark(f'Islam2021-{factor}_{area}_dimensionality')
|
46
|
+
score = benchmark(alexnet_features)
|
47
|
+
assert score.item() == expected_value
|
@@ -0,0 +1,88 @@
|
|
1
|
+
import numpy as np
|
2
|
+
|
3
|
+
from brainscore_core import Score
|
4
|
+
from brainscore_vision import load_dataset, load_metric
|
5
|
+
from brainscore_vision.benchmark_helpers.screen import place_on_screen
|
6
|
+
from brainscore_vision.benchmarks import BenchmarkBase, ceil_score
|
7
|
+
from brainscore_vision.model_interface import BrainModel
|
8
|
+
|
9
|
+
BIBTEX = """@Article{Kar2019,
|
10
|
+
author={Kar, Kohitij
|
11
|
+
and Kubilius, Jonas
|
12
|
+
and Schmidt, Kailyn
|
13
|
+
and Issa, Elias B.
|
14
|
+
and DiCarlo, James J.},
|
15
|
+
title={Evidence that recurrent circuits are critical to the ventral stream's execution of core object recognition behavior},
|
16
|
+
journal={Nature Neuroscience},
|
17
|
+
year={2019},
|
18
|
+
month={Jun},
|
19
|
+
day={01},
|
20
|
+
volume={22},
|
21
|
+
number={6},
|
22
|
+
pages={974-983},
|
23
|
+
abstract={Non-recurrent deep convolutional neural networks (CNNs) are currently the best at modeling core object recognition, a behavior that is supported by the densely recurrent primate ventral stream, culminating in the inferior temporal (IT) cortex. If recurrence is critical to this behavior, then primates should outperform feedforward-only deep CNNs for images that require additional recurrent processing beyond the feedforward IT response. Here we first used behavioral methods to discover hundreds of these `challenge' images. Second, using large-scale electrophysiology, we observed that behaviorally sufficient object identity solutions emerged {\textasciitilde}30{\thinspace}ms later in the IT cortex for challenge images compared with primate performance-matched `control' images. Third, these behaviorally critical late-phase IT response patterns were poorly predicted by feedforward deep CNN activations. Notably, very-deep CNNs and shallower recurrent CNNs better predicted these late IT responses, suggesting that there is a functional equivalence between additional nonlinear transformations and recurrence. Beyond arguing that recurrent circuits are critical for rapid object identification, our results provide strong constraints for future recurrent model development.},
|
24
|
+
issn={1546-1726},
|
25
|
+
doi={10.1038/s41593-019-0392-5},
|
26
|
+
url={https://doi.org/10.1038/s41593-019-0392-5}
|
27
|
+
}"""
|
28
|
+
VISUAL_DEGREES = 8
|
29
|
+
NUMBER_OF_TRIALS = 44
|
30
|
+
TIME_BINS = [(time_bin_start, time_bin_start + 10) for time_bin_start in range(70, 250, 10)]
|
31
|
+
|
32
|
+
|
33
|
+
class DicarloKar2019OST(BenchmarkBase):
|
34
|
+
def __init__(self):
|
35
|
+
ceiling = Score(.79) # computed offline by Kohitij Kar
|
36
|
+
super(DicarloKar2019OST, self).__init__(identifier='Kar2019-ost', version=2,
|
37
|
+
ceiling_func=lambda: ceiling,
|
38
|
+
parent='IT',
|
39
|
+
bibtex=BIBTEX)
|
40
|
+
assembly = load_dataset('Kar2019')
|
41
|
+
# drop duplicate images
|
42
|
+
_, index = np.unique(assembly['stimulus_id'], return_index=True)
|
43
|
+
assembly = assembly.isel(presentation=index)
|
44
|
+
assembly.attrs['stimulus_set'] = assembly.stimulus_set.drop_duplicates('stimulus_id')
|
45
|
+
|
46
|
+
assembly = assembly.sel(decoder='svm')
|
47
|
+
|
48
|
+
self._assembly = assembly
|
49
|
+
self._assembly['truth'] = self._assembly['image_label']
|
50
|
+
self._assembly.stimulus_set['truth'] = self._assembly.stimulus_set['image_label']
|
51
|
+
|
52
|
+
self._similarity_metric = load_metric('ost')
|
53
|
+
|
54
|
+
self._visual_degrees = VISUAL_DEGREES
|
55
|
+
self._number_of_trials = NUMBER_OF_TRIALS
|
56
|
+
self._time_bins = TIME_BINS
|
57
|
+
|
58
|
+
def __call__(self, candidate: BrainModel) -> Score:
|
59
|
+
candidate.start_recording('IT', time_bins=self._time_bins)
|
60
|
+
stimulus_set = place_on_screen(self._assembly.stimulus_set, target_visual_degrees=candidate.visual_degrees(),
|
61
|
+
source_visual_degrees=self._visual_degrees)
|
62
|
+
# Temporal recordings from large candidates take up a lot of memory and compute time.
|
63
|
+
# In order to quickly reject recordings that are static over time,
|
64
|
+
# we will show one image and check whether the recordings vary over time at all or not.
|
65
|
+
# If they don't we can quickly score the candidate with a failure state
|
66
|
+
# since it will not be able to predict temporal differences with the OST metric
|
67
|
+
check_stimulus_set = stimulus_set[:1]
|
68
|
+
check_stimulus_set.identifier = None # unset identifier to avoid storing (interferes with actual stimulus_set)
|
69
|
+
check_recordings = candidate.look_at(check_stimulus_set, number_of_trials=self._number_of_trials)
|
70
|
+
if not temporally_varying(check_recordings):
|
71
|
+
score = Score(np.nan)
|
72
|
+
else:
|
73
|
+
recordings = candidate.look_at(stimulus_set, number_of_trials=self._number_of_trials)
|
74
|
+
score = self._similarity_metric(recordings, self._assembly)
|
75
|
+
score = ceil_score(score, self.ceiling)
|
76
|
+
return score
|
77
|
+
|
78
|
+
|
79
|
+
def temporally_varying(recordings):
|
80
|
+
"""
|
81
|
+
Tests whether the given recordings change over time, for any of the stimuli on any of the neuroids
|
82
|
+
|
83
|
+
:return True if any of the neuroids changes over time for any of the stimuli, False otherwise
|
84
|
+
"""
|
85
|
+
recordings = recordings.transpose('presentation', 'neuroid', 'time_bin')
|
86
|
+
first_response = recordings.sel(time_bin=recordings['time_bin'].values[0])
|
87
|
+
different = recordings != first_response
|
88
|
+
return different.any()
|
@@ -0,0 +1,93 @@
|
|
1
|
+
from pathlib import Path
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
import pytest
|
5
|
+
from numpy.random.mtrand import RandomState
|
6
|
+
from pytest import approx
|
7
|
+
|
8
|
+
from brainio.assemblies import NeuroidAssembly, DataAssembly
|
9
|
+
from brainscore_vision import load_benchmark
|
10
|
+
from brainscore_vision.benchmark_helpers import PrecomputedFeatures
|
11
|
+
from brainscore_vision.benchmark_helpers.test_helper import VisualDegreesTests, NumberOfTrialsTests
|
12
|
+
from brainscore_vision.benchmarks.kar2019 import DicarloKar2019OST
|
13
|
+
from brainscore_vision.data_helpers import s3
|
14
|
+
|
15
|
+
visual_degrees = VisualDegreesTests()
|
16
|
+
number_trials = NumberOfTrialsTests()
|
17
|
+
|
18
|
+
|
19
|
+
@pytest.mark.memory_intense
|
20
|
+
@pytest.mark.private_access
|
21
|
+
@pytest.mark.slow
|
22
|
+
def test_Kar2019ost_cornet_s():
|
23
|
+
benchmark = load_benchmark('Kar2019-ost')
|
24
|
+
filename = 'cornet_s-kar2019.nc'
|
25
|
+
filepath = Path(__file__).parent / filename
|
26
|
+
s3.download_file_if_not_exists(local_path=filepath,
|
27
|
+
bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}')
|
28
|
+
precomputed_features = NeuroidAssembly.from_files(
|
29
|
+
filepath,
|
30
|
+
stimulus_set_identifier=benchmark._assembly.stimulus_set.identifier,
|
31
|
+
stimulus_set=benchmark._assembly.stimulus_set)
|
32
|
+
precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8)
|
33
|
+
# score
|
34
|
+
score = benchmark(precomputed_features).raw
|
35
|
+
assert score == approx(.316, abs=.005)
|
36
|
+
|
37
|
+
|
38
|
+
@pytest.mark.private_access
|
39
|
+
@pytest.mark.parametrize('benchmark, candidate_degrees, image_id, expected', [
|
40
|
+
pytest.param('Kar2019-ost', 14, '6d19b24c29832dfb28360e7731e3261c13a4287f',
|
41
|
+
approx(.225021, abs=.0001), marks=[pytest.mark.private_access]),
|
42
|
+
pytest.param('Kar2019-ost', 6, '6d19b24c29832dfb28360e7731e3261c13a4287f',
|
43
|
+
approx(.001248, abs=.0001), marks=[pytest.mark.private_access]),
|
44
|
+
])
|
45
|
+
def test_amount_gray(benchmark: str, candidate_degrees: int, image_id: str, expected: float):
|
46
|
+
visual_degrees.amount_gray_test(benchmark, candidate_degrees, image_id, expected)
|
47
|
+
|
48
|
+
|
49
|
+
@pytest.mark.private_access
|
50
|
+
def test_repetitions():
|
51
|
+
number_trials.repetitions_test('Kar2019-ost')
|
52
|
+
|
53
|
+
|
54
|
+
@pytest.mark.memory_intense
|
55
|
+
@pytest.mark.private_access
|
56
|
+
def test_no_time():
|
57
|
+
benchmark = DicarloKar2019OST()
|
58
|
+
rnd = RandomState(0)
|
59
|
+
stimuli = benchmark._assembly.stimulus_set
|
60
|
+
source = DataAssembly(rnd.rand(len(stimuli), 5, 1), coords={
|
61
|
+
'stimulus_id': ('presentation', stimuli['stimulus_id']),
|
62
|
+
'image_label': ('presentation', stimuli['image_label']),
|
63
|
+
'truth': ('presentation', stimuli['truth']),
|
64
|
+
'neuroid_id': ('neuroid', list(range(5))),
|
65
|
+
'layer': ('neuroid', ['test'] * 5),
|
66
|
+
'time_bin_start': ('time_bin', [70]),
|
67
|
+
'time_bin_end': ('time_bin', [170]),
|
68
|
+
}, dims=['presentation', 'neuroid', 'time_bin'])
|
69
|
+
source.name = __name__ + ".test_notime"
|
70
|
+
score = benchmark(PrecomputedFeatures(source, visual_degrees=8))
|
71
|
+
assert np.isnan(score) # not a temporal model
|
72
|
+
assert np.isnan(score.raw) # not a temporal model
|
73
|
+
assert score.attrs['ceiling'] == approx(.79)
|
74
|
+
|
75
|
+
|
76
|
+
@pytest.mark.memory_intense
|
77
|
+
@pytest.mark.private_access
|
78
|
+
def test_random_time():
|
79
|
+
benchmark = DicarloKar2019OST()
|
80
|
+
rnd = RandomState(0)
|
81
|
+
stimuli = benchmark._assembly.stimulus_set
|
82
|
+
source = DataAssembly(rnd.rand(len(stimuli), 5, 5), coords={
|
83
|
+
'stimulus_id': ('presentation', stimuli['stimulus_id']),
|
84
|
+
'image_label': ('presentation', stimuli['image_label']),
|
85
|
+
'truth': ('presentation', stimuli['truth']),
|
86
|
+
'neuroid_id': ('neuroid', list(range(5))),
|
87
|
+
'layer': ('neuroid', ['test'] * 5),
|
88
|
+
'time_bin_start': ('time_bin', [70, 90, 110, 130, 150]),
|
89
|
+
'time_bin_end': ('time_bin', [90, 110, 130, 150, 170]),
|
90
|
+
}, dims=['presentation', 'neuroid', 'time_bin'])
|
91
|
+
source.name = __name__ + ".test_notime"
|
92
|
+
score = benchmark(PrecomputedFeatures(source, visual_degrees=8))
|
93
|
+
assert np.isnan(score) # not a good temporal model
|