brainscore-vision 2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/__init__.py +105 -0
- brainscore_vision/__main__.py +20 -0
- brainscore_vision/benchmark_helpers/__init__.py +67 -0
- brainscore_vision/benchmark_helpers/neural_common.py +70 -0
- brainscore_vision/benchmark_helpers/properties_common.py +424 -0
- brainscore_vision/benchmark_helpers/screen.py +126 -0
- brainscore_vision/benchmark_helpers/test_helper.py +160 -0
- brainscore_vision/benchmarks/README.md +7 -0
- brainscore_vision/benchmarks/__init__.py +122 -0
- brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
- brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
- brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
- brainscore_vision/benchmarks/baker2022/test.py +90 -0
- brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
- brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
- brainscore_vision/benchmarks/bmd2024/test.py +29 -0
- brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
- brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
- brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
- brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
- brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
- brainscore_vision/benchmarks/cadena2017/test.py +35 -0
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
- brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
- brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
- brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
- brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
- brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
- brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
- brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
- brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
- brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
- brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
- brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
- brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
- brainscore_vision/benchmarks/hebart2023/test.py +19 -0
- brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
- brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
- brainscore_vision/benchmarks/hermann2020/test.py +28 -0
- brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
- brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
- brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
- brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
- brainscore_vision/benchmarks/imagenet/test.py +32 -0
- brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
- brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
- brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
- brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
- brainscore_vision/benchmarks/islam2021/test.py +47 -0
- brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
- brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
- brainscore_vision/benchmarks/kar2019/test.py +93 -0
- brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
- brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
- brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
- brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
- brainscore_vision/benchmarks/malania2007/test.py +64 -0
- brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
- brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
- brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
- brainscore_vision/benchmarks/marques2020/test.py +135 -0
- brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
- brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
- brainscore_vision/benchmarks/objectnet/test.py +33 -0
- brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
- brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
- brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
- brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
- brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
- brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
- brainscore_vision/benchmarks/scialom2024/test.py +162 -0
- brainscore_vision/data/__init__.py +0 -0
- brainscore_vision/data/baker2022/__init__.py +40 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
- brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
- brainscore_vision/data/baker2022/test.py +135 -0
- brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
- brainscore_vision/data/barbumayo2019/__init__.py +23 -0
- brainscore_vision/data/barbumayo2019/test.py +10 -0
- brainscore_vision/data/bashivankar2019/__init__.py +52 -0
- brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
- brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
- brainscore_vision/data/bashivankar2019/test.py +15 -0
- brainscore_vision/data/bmd2024/__init__.py +69 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
- brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
- brainscore_vision/data/bmd2024/test.py +130 -0
- brainscore_vision/data/bracci2019/__init__.py +36 -0
- brainscore_vision/data/bracci2019/data_packaging.py +221 -0
- brainscore_vision/data/bracci2019/test.py +16 -0
- brainscore_vision/data/cadena2017/__init__.py +52 -0
- brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
- brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
- brainscore_vision/data/cadena2017/test.py +24 -0
- brainscore_vision/data/cichy2019/__init__.py +38 -0
- brainscore_vision/data/cichy2019/test.py +8 -0
- brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
- brainscore_vision/data/coggan2024_behavior/test.py +32 -0
- brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
- brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/data/david2004/__init__.py +34 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
- brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
- brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
- brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
- brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
- brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
- brainscore_vision/data/david2004/test.py +8 -0
- brainscore_vision/data/deng2009/__init__.py +22 -0
- brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
- brainscore_vision/data/deng2009/test.py +9 -0
- brainscore_vision/data/ferguson2024/__init__.py +401 -0
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
- brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
- brainscore_vision/data/ferguson2024/requirements.txt +2 -0
- brainscore_vision/data/ferguson2024/test.py +155 -0
- brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
- brainscore_vision/data/freemanziemba2013/test.py +97 -0
- brainscore_vision/data/geirhos2021/__init__.py +358 -0
- brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
- brainscore_vision/data/geirhos2021/test.py +330 -0
- brainscore_vision/data/hebart2023/__init__.py +23 -0
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
- brainscore_vision/data/hebart2023/test.py +42 -0
- brainscore_vision/data/hendrycks2019/__init__.py +45 -0
- brainscore_vision/data/hendrycks2019/test.py +26 -0
- brainscore_vision/data/igustibagus2024/__init__.py +23 -0
- brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
- brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
- brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
- brainscore_vision/data/igustibagus2024/test.py +26 -0
- brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
- brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
- brainscore_vision/data/imagenetslim15000/test.py +8 -0
- brainscore_vision/data/islam2021/__init__.py +18 -0
- brainscore_vision/data/islam2021/data_packaging.py +64 -0
- brainscore_vision/data/islam2021/test.py +11 -0
- brainscore_vision/data/kar2018/__init__.py +58 -0
- brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
- brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
- brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
- brainscore_vision/data/kar2018/test.py +10 -0
- brainscore_vision/data/kar2019/__init__.py +43 -0
- brainscore_vision/data/kar2019/data_packaging.py +116 -0
- brainscore_vision/data/kar2019/test.py +8 -0
- brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
- brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
- brainscore_vision/data/kuzovkin2018/test.py +8 -0
- brainscore_vision/data/majajhong2015/__init__.py +113 -0
- brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
- brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
- brainscore_vision/data/majajhong2015/test.py +38 -0
- brainscore_vision/data/malania2007/__init__.py +254 -0
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
- brainscore_vision/data/malania2007/test.py +147 -0
- brainscore_vision/data/maniquet2024/__init__.py +57 -0
- brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
- brainscore_vision/data/maniquet2024/test.py +16 -0
- brainscore_vision/data/marques2020/__init__.py +123 -0
- brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
- brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
- brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
- brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
- brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
- brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
- brainscore_vision/data/marques2020/test.py +54 -0
- brainscore_vision/data/rajalingham2018/__init__.py +56 -0
- brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
- brainscore_vision/data/rajalingham2018/test.py +10 -0
- brainscore_vision/data/rajalingham2020/__init__.py +39 -0
- brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
- brainscore_vision/data/rajalingham2020/test.py +8 -0
- brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
- brainscore_vision/data/rust2012/__init__.py +45 -0
- brainscore_vision/data/rust2012/rust305.py +35 -0
- brainscore_vision/data/rust2012/test.py +47 -0
- brainscore_vision/data/sanghavi2020/__init__.py +119 -0
- brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
- brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
- brainscore_vision/data/sanghavi2020/test.py +13 -0
- brainscore_vision/data/scialom2024/__init__.py +386 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
- brainscore_vision/data/scialom2024/test.py +301 -0
- brainscore_vision/data/seibert2019/__init__.py +25 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
- brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
- brainscore_vision/data/seibert2019/test.py +35 -0
- brainscore_vision/data/zhang2018/__init__.py +38 -0
- brainscore_vision/data/zhang2018/test.py +29 -0
- brainscore_vision/data_helpers/__init__.py +0 -0
- brainscore_vision/data_helpers/lookup_legacy.py +15 -0
- brainscore_vision/data_helpers/s3.py +79 -0
- brainscore_vision/metric_helpers/__init__.py +5 -0
- brainscore_vision/metric_helpers/temporal.py +119 -0
- brainscore_vision/metric_helpers/transformations.py +379 -0
- brainscore_vision/metric_helpers/utils.py +71 -0
- brainscore_vision/metric_helpers/xarray_utils.py +151 -0
- brainscore_vision/metrics/__init__.py +7 -0
- brainscore_vision/metrics/accuracy/__init__.py +4 -0
- brainscore_vision/metrics/accuracy/metric.py +16 -0
- brainscore_vision/metrics/accuracy/test.py +11 -0
- brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
- brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
- brainscore_vision/metrics/accuracy_distance/test.py +57 -0
- brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
- brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
- brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
- brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
- brainscore_vision/metrics/cka/__init__.py +14 -0
- brainscore_vision/metrics/cka/metric.py +105 -0
- brainscore_vision/metrics/cka/test.py +28 -0
- brainscore_vision/metrics/dimensionality/__init__.py +13 -0
- brainscore_vision/metrics/dimensionality/metric.py +45 -0
- brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
- brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
- brainscore_vision/metrics/distribution_similarity/test.py +10 -0
- brainscore_vision/metrics/error_consistency/__init__.py +13 -0
- brainscore_vision/metrics/error_consistency/metric.py +93 -0
- brainscore_vision/metrics/error_consistency/test.py +39 -0
- brainscore_vision/metrics/i1i2/__init__.py +16 -0
- brainscore_vision/metrics/i1i2/metric.py +299 -0
- brainscore_vision/metrics/i1i2/requirements.txt +2 -0
- brainscore_vision/metrics/i1i2/test.py +36 -0
- brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
- brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
- brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
- brainscore_vision/metrics/internal_consistency/test.py +39 -0
- brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
- brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
- brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
- brainscore_vision/metrics/mask_regression/__init__.py +16 -0
- brainscore_vision/metrics/mask_regression/metric.py +242 -0
- brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
- brainscore_vision/metrics/mask_regression/test.py +0 -0
- brainscore_vision/metrics/ost/__init__.py +23 -0
- brainscore_vision/metrics/ost/metric.py +350 -0
- brainscore_vision/metrics/ost/requirements.txt +2 -0
- brainscore_vision/metrics/ost/test.py +0 -0
- brainscore_vision/metrics/rdm/__init__.py +14 -0
- brainscore_vision/metrics/rdm/metric.py +101 -0
- brainscore_vision/metrics/rdm/requirements.txt +2 -0
- brainscore_vision/metrics/rdm/test.py +63 -0
- brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
- brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
- brainscore_vision/metrics/regression_correlation/metric.py +125 -0
- brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
- brainscore_vision/metrics/regression_correlation/test.py +36 -0
- brainscore_vision/metrics/threshold/__init__.py +5 -0
- brainscore_vision/metrics/threshold/metric.py +481 -0
- brainscore_vision/metrics/threshold/test.py +71 -0
- brainscore_vision/metrics/value_delta/__init__.py +4 -0
- brainscore_vision/metrics/value_delta/metric.py +30 -0
- brainscore_vision/metrics/value_delta/requirements.txt +1 -0
- brainscore_vision/metrics/value_delta/test.py +40 -0
- brainscore_vision/model_helpers/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/__init__.py +1 -0
- brainscore_vision/model_helpers/activations/core.py +635 -0
- brainscore_vision/model_helpers/activations/pca.py +117 -0
- brainscore_vision/model_helpers/activations/pytorch.py +152 -0
- brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
- brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
- brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
- brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
- brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
- brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
- brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
- brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
- brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
- brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
- brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
- brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
- brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
- brainscore_vision/model_helpers/conftest.py +3 -0
- brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
- brainscore_vision/model_helpers/s3.py +62 -0
- brainscore_vision/model_helpers/utils/__init__.py +15 -0
- brainscore_vision/model_helpers/utils/s3.py +42 -0
- brainscore_vision/model_interface.py +214 -0
- brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
- brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
- brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
- brainscore_vision/models/AlexNet_SIN/model.py +29 -0
- brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
- brainscore_vision/models/AlexNet_SIN/test.py +1 -0
- brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
- brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
- brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
- brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
- brainscore_vision/models/__init__.py +0 -0
- brainscore_vision/models/alexnet/__init__.py +8 -0
- brainscore_vision/models/alexnet/model.py +28 -0
- brainscore_vision/models/alexnet/requirements.txt +2 -0
- brainscore_vision/models/alexnet/test.py +15 -0
- brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
- brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
- brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
- brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
- brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
- brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
- brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
- brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
- brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
- brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
- brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
- brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
- brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
- brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
- brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
- brainscore_vision/models/alexnet_testing/__init__.py +8 -0
- brainscore_vision/models/alexnet_testing/model.py +28 -0
- brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
- brainscore_vision/models/alexnet_testing/setup.py +24 -0
- brainscore_vision/models/alexnet_testing/test.py +15 -0
- brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
- brainscore_vision/models/antialias_resnet152/model.py +35 -0
- brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
- brainscore_vision/models/antialias_resnet152/test.py +8 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
- brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/clip/__init__.py +5 -0
- brainscore_vision/models/clip/model.py +179 -0
- brainscore_vision/models/clip/requirements.txt +4 -0
- brainscore_vision/models/clip/test.py +1 -0
- brainscore_vision/models/clipvision/__init__.py +5 -0
- brainscore_vision/models/clipvision/model.py +179 -0
- brainscore_vision/models/clipvision/requirements.txt +4 -0
- brainscore_vision/models/clipvision/test.py +1 -0
- brainscore_vision/models/cornet_s/__init__.py +8 -0
- brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
- brainscore_vision/models/cornet_s/model.py +77 -0
- brainscore_vision/models/cornet_s/requirements.txt +7 -0
- brainscore_vision/models/cornet_s/test.py +8 -0
- brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
- brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
- brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
- brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
- brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
- brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
- brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
- brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
- brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
- brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
- brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
- brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
- brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
- brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
- brainscore_vision/models/effnetb1_272x240/model.py +126 -0
- brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
- brainscore_vision/models/effnetb1_272x240/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
- brainscore_vision/models/hmax/__init__.py +7 -0
- brainscore_vision/models/hmax/helpers/hmax.py +438 -0
- brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
- brainscore_vision/models/hmax/model.py +69 -0
- brainscore_vision/models/hmax/requirements.txt +5 -0
- brainscore_vision/models/hmax/test.py +8 -0
- brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
- brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
- brainscore_vision/models/mobilevit_small/__init__.py +7 -0
- brainscore_vision/models/mobilevit_small/model.py +49 -0
- brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
- brainscore_vision/models/mobilevit_small/test.py +8 -0
- brainscore_vision/models/pixels/__init__.py +8 -0
- brainscore_vision/models/pixels/model.py +35 -0
- brainscore_vision/models/pixels/test.py +15 -0
- brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
- brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
- brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r50_tvpt/__init__.py +9 -0
- brainscore_vision/models/r50_tvpt/model.py +47 -0
- brainscore_vision/models/r50_tvpt/setup.py +24 -0
- brainscore_vision/models/r50_tvpt/test.py +1 -0
- brainscore_vision/models/regnet/__init__.py +14 -0
- brainscore_vision/models/regnet/model.py +17 -0
- brainscore_vision/models/regnet/requirements.txt +2 -0
- brainscore_vision/models/regnet/test.py +17 -0
- brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
- brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
- brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
- brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
- brainscore_vision/models/resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/resnet50_julios/model.py +54 -0
- brainscore_vision/models/resnet50_julios/setup.py +24 -0
- brainscore_vision/models/resnet50_julios/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
- brainscore_vision/models/resnet50_tutorial/model.py +34 -0
- brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
- brainscore_vision/models/resnet50_tutorial/test.py +8 -0
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
- brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
- brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/resnet_50_robust/model.py +55 -0
- brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
- brainscore_vision/models/resnet_50_robust/test.py +8 -0
- brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
- brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
- brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
- brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
- brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
- brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
- brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
- brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
- brainscore_vision/models/temporal_model_GDT/model.py +72 -0
- brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_GDT/test.py +17 -0
- brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
- brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
- brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
- brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
- brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
- brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
- brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
- brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
- brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
- brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
- brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
- brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
- brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
- brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
- brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
- brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
- brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
- brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
- brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
- brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
- brainscore_vision/models/temporal_model_openstl/model.py +206 -0
- brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_openstl/test.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
- brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
- brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
- brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
- brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
- brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
- brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
- brainscore_vision/submission/__init__.py +0 -0
- brainscore_vision/submission/actions_helpers.py +153 -0
- brainscore_vision/submission/config.py +7 -0
- brainscore_vision/submission/endpoints.py +58 -0
- brainscore_vision/utils/__init__.py +91 -0
- brainscore_vision-2.1.dist-info/LICENSE +11 -0
- brainscore_vision-2.1.dist-info/METADATA +152 -0
- brainscore_vision-2.1.dist-info/RECORD +1009 -0
- brainscore_vision-2.1.dist-info/WHEEL +5 -0
- brainscore_vision-2.1.dist-info/top_level.txt +4 -0
- docs/Makefile +20 -0
- docs/source/conf.py +78 -0
- docs/source/index.rst +21 -0
- docs/source/modules/api_reference.rst +10 -0
- docs/source/modules/benchmarks.rst +8 -0
- docs/source/modules/brainscore_submission.png +0 -0
- docs/source/modules/developer_clarifications.rst +36 -0
- docs/source/modules/metrics.rst +8 -0
- docs/source/modules/model_interface.rst +8 -0
- docs/source/modules/submission.rst +112 -0
- docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
- docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
- docs/source/modules/tutorial_screenshots/init_py.png +0 -0
- docs/source/modules/tutorial_screenshots/mms.png +0 -0
- docs/source/modules/tutorial_screenshots/setup.png +0 -0
- docs/source/modules/tutorial_screenshots/sms.png +0 -0
- docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
- docs/source/modules/utils.rst +22 -0
- migrations/2020-12-20_pkl_to_nc.py +90 -0
- tests/__init__.py +6 -0
- tests/conftest.py +26 -0
- tests/test_benchmark_helpers/__init__.py +0 -0
- tests/test_benchmark_helpers/test_screen.py +75 -0
- tests/test_examples.py +41 -0
- tests/test_integration.py +43 -0
- tests/test_metric_helpers/__init__.py +0 -0
- tests/test_metric_helpers/test_temporal.py +80 -0
- tests/test_metric_helpers/test_transformations.py +171 -0
- tests/test_metric_helpers/test_xarray_utils.py +85 -0
- tests/test_model_helpers/__init__.py +6 -0
- tests/test_model_helpers/activations/__init__.py +0 -0
- tests/test_model_helpers/activations/test___init__.py +404 -0
- tests/test_model_helpers/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
- tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
- tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
- tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
- tests/test_model_helpers/temporal/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
- tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
- tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
- tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
- tests/test_model_helpers/temporal/test_utils.py +61 -0
- tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
- tests/test_model_helpers/test_imports.py +10 -0
- tests/test_model_helpers/test_s3.py +38 -0
- tests/test_models.py +15 -0
- tests/test_stimuli.py +0 -0
- tests/test_submission/__init__.py +0 -0
- tests/test_submission/mock_config.py +3 -0
- tests/test_submission/test_actions_helpers.py +67 -0
- tests/test_submission/test_db.py +54 -0
- tests/test_submission/test_endpoints.py +125 -0
- tests/test_utils.py +21 -0
@@ -0,0 +1,251 @@
|
|
1
|
+
import pandas as pd
|
2
|
+
import numpy as np
|
3
|
+
from brainio.assemblies import BehavioralAssembly
|
4
|
+
import sympy as sp
|
5
|
+
from pandas import DataFrame
|
6
|
+
from tqdm import tqdm
|
7
|
+
import statistics
|
8
|
+
from typing import Dict
|
9
|
+
|
10
|
+
# number of distractors in the experiment
|
11
|
+
DISTRACTOR_NUMS = ["1.0", "5.0", "11.0"]
|
12
|
+
|
13
|
+
# These are precomputed subject average lapse rates
|
14
|
+
LAPSE_RATES = {'circle_line': 0.0335, 'color': 0.0578, 'convergence': 0.0372, 'eighth': 0.05556,
|
15
|
+
'gray_easy': 0.0414, 'gray_hard': 0.02305, 'half': 0.0637, 'juncture': 0.3715,
|
16
|
+
'lle': 0.0573, 'llh': 0.0402, 'quarter': 0.0534, 'round_f': 0.08196,
|
17
|
+
'round_v': 0.0561, 'tilted_line': 0.04986}
|
18
|
+
|
19
|
+
# These are precomputed integral errors, computed by bootstrapping (see below)
|
20
|
+
HUMAN_INTEGRAL_ERRORS = {'circle_line': 0.3078, 'color': 0.362, 'convergence': 0.2773, 'eighth': 0.278,
|
21
|
+
'gray_easy': 0.309, 'gray_hard': 0.4246, 'half': 0.3661, 'juncture': 0.2198,
|
22
|
+
'lle': 0.209, 'llh': 0.195, 'quarter': 0.2959, 'round_f': 0.344,
|
23
|
+
'round_v': 0.2794, 'tilted_line': 0.3573}
|
24
|
+
|
25
|
+
|
26
|
+
def get_adjusted_rate(acc: float, lapse_rate: float, n_way: int = 2) -> float:
|
27
|
+
"""
|
28
|
+
- Adjusts the raw accuracy by a lapse rate correction
|
29
|
+
|
30
|
+
:param acc: float, the raw accuracy
|
31
|
+
:param lapse_rate: a precomputed float defined above that represents avg. subject lapse rate in experiment
|
32
|
+
:param n_way: int, (default value 2), the number of ways to divide by
|
33
|
+
|
34
|
+
:return: float, the SEM of that array
|
35
|
+
"""
|
36
|
+
return (acc - lapse_rate * (1.0 / n_way)) / (1 - lapse_rate)
|
37
|
+
|
38
|
+
|
39
|
+
def sem(array: BehavioralAssembly) -> float:
|
40
|
+
"""
|
41
|
+
- Get the standard error of the mean (SEM) of an assembly
|
42
|
+
|
43
|
+
:param array: the assembly to look at
|
44
|
+
:return: float, the SEM of that array
|
45
|
+
"""
|
46
|
+
array = np.array(array)
|
47
|
+
return np.std(array) / np.sqrt(len(array))
|
48
|
+
|
49
|
+
|
50
|
+
def get_line(point_1: tuple, point_2: tuple) -> str:
|
51
|
+
"""
|
52
|
+
- Calculate the equation of a line from two points
|
53
|
+
|
54
|
+
:param point_1: tuple in the form (x, y) of first point
|
55
|
+
:param point_2: tuple in the form (x, y) of second point
|
56
|
+
:return: str, equation of a line in the form y = mx + b
|
57
|
+
"""
|
58
|
+
x1, y1 = point_1
|
59
|
+
x2, y2 = point_2
|
60
|
+
m = (y2 - y1) / (x2 - x1)
|
61
|
+
c = y1 - m * x1
|
62
|
+
equation = f"{m:.10f}*x + {c:.10f}"
|
63
|
+
return equation
|
64
|
+
|
65
|
+
|
66
|
+
def integrate_line(equation: str, lower: float, upper: float) -> float:
|
67
|
+
"""
|
68
|
+
- Integrates an equation.
|
69
|
+
|
70
|
+
:param equation: a string representing the equation of the line to integrate
|
71
|
+
:param lower: float, the lower bound of the definite integral
|
72
|
+
:param upper: float, the upper bound of the definite integral
|
73
|
+
:return: float, representing the integral of that line
|
74
|
+
"""
|
75
|
+
x = sp.symbols('x')
|
76
|
+
integral_definite = sp.integrate(equation, (x, lower, upper))
|
77
|
+
return integral_definite
|
78
|
+
|
79
|
+
|
80
|
+
def get_averages(df_blue: DataFrame, df_orange: DataFrame, num_distractors: str) -> (float, float):
|
81
|
+
"""
|
82
|
+
- Gets the per-distractor averages for a block
|
83
|
+
|
84
|
+
:param df_blue: the first (blue) block of data (target on a field of distractors)
|
85
|
+
:param df_orange: the second (orange) block of data (distractor on a field of targets)
|
86
|
+
:param num_distractors: string of a float representing how many distractors to look at
|
87
|
+
:return:
|
88
|
+
"""
|
89
|
+
blue_df = df_blue[df_blue["distractor_nums"] == num_distractors]
|
90
|
+
orange_df = df_orange[df_orange["distractor_nums"] == num_distractors]
|
91
|
+
blue_val_avg = blue_df["correct"].values.mean()
|
92
|
+
orange_val_avg = orange_df["correct"].values.mean()
|
93
|
+
return blue_val_avg, orange_val_avg
|
94
|
+
|
95
|
+
|
96
|
+
def calculate_integral(df_blue: DataFrame, df_orange: DataFrame) -> float:
|
97
|
+
"""
|
98
|
+
- Manually calculates the integral under the delta line
|
99
|
+
|
100
|
+
:param df_blue: the first (blue) block of data (target on a field of distractors)
|
101
|
+
:param df_orange: the second (orange) block of data (distractor on a field of targets)
|
102
|
+
:return: float representing the integral of the delta line
|
103
|
+
"""
|
104
|
+
blue_low_avg, orange_low_avg = get_averages(df_blue, df_orange, "1.0")
|
105
|
+
blue_mid_avg, orange_mid_avg = get_averages(df_blue, df_orange, "5.0")
|
106
|
+
blue_high_avg, orange_high_avg = get_averages(df_blue, df_orange, "11.0")
|
107
|
+
|
108
|
+
# compute deltas
|
109
|
+
low_delta = orange_low_avg - blue_low_avg
|
110
|
+
mid_delta = orange_mid_avg - blue_mid_avg
|
111
|
+
high_delta = orange_high_avg - blue_high_avg
|
112
|
+
|
113
|
+
# get equation of line through 1-5
|
114
|
+
point_1 = (1, low_delta)
|
115
|
+
point_2 = (5, mid_delta)
|
116
|
+
equation = get_line(point_1, point_2)
|
117
|
+
first_half = integrate_line(equation, 1, 5)
|
118
|
+
|
119
|
+
# get line 5-11 equation and integrate
|
120
|
+
point_3 = (11, high_delta)
|
121
|
+
equation_2 = get_line(point_2, point_3)
|
122
|
+
second_half = integrate_line(equation_2, 5, 11)
|
123
|
+
|
124
|
+
# add up integral
|
125
|
+
total_integral = round(first_half + second_half, 4)
|
126
|
+
return total_integral
|
127
|
+
|
128
|
+
|
129
|
+
def calculate_accuracy(df: BehavioralAssembly, lapse_rate: float) -> float:
|
130
|
+
"""
|
131
|
+
- Calculates a per-subject lapse rate-corrected accuracy for an assembly.
|
132
|
+
- Subject accuracy is averaged over all images with a certain distractor size and repetition coords (i.e. these
|
133
|
+
coords are mixed togather and the accuracy is calculated over this merged assembly).
|
134
|
+
|
135
|
+
:param df: DataFrame Object that contains experimental data
|
136
|
+
:param lapse_rate: a precomputed float defined above that represents avg. subject lapse rate in experiment
|
137
|
+
:return: float representing the adjusted (for lapse rate) accuracy of that subject
|
138
|
+
"""
|
139
|
+
accuracy = len(df[df["correct"] == True]) / len(df)
|
140
|
+
adjusted_accuracy = get_adjusted_rate(accuracy, lapse_rate)
|
141
|
+
return adjusted_accuracy
|
142
|
+
|
143
|
+
|
144
|
+
def generate_summary_df(assembly: BehavioralAssembly, lapse_rate: float, block: str) -> pd.DataFrame:
|
145
|
+
"""
|
146
|
+
|
147
|
+
- Takes in raw assembly data and outputs a dataframe of summary statistics, used for benchmark.
|
148
|
+
- For each distractor size, accuracy is calculated per subject.
|
149
|
+
|
150
|
+
:param assembly: the data in the form of a BehavioralAssembly
|
151
|
+
:param lapse_rate: a precomputed float defined above that represents avg. subject lapse rate in experiment
|
152
|
+
:param block: str that defined what data to look at, "first (blue) or second (orange)
|
153
|
+
:return: a DataFrame object that contains needed summary data
|
154
|
+
"""
|
155
|
+
filtered_data = assembly[(assembly["trial_type"] == "normal") & (assembly["block"] == block)]
|
156
|
+
participants = list(set(filtered_data['participant_id'].values))
|
157
|
+
|
158
|
+
summary_data = []
|
159
|
+
for subject in participants:
|
160
|
+
subject_data = filtered_data[filtered_data["participant_id"] == subject]
|
161
|
+
for distractor_num in DISTRACTOR_NUMS:
|
162
|
+
distractor_df = subject_data[subject_data["distractor_nums"] == str(distractor_num)]
|
163
|
+
if len(distractor_df) == 0:
|
164
|
+
continue
|
165
|
+
adjusted_acc = calculate_accuracy(distractor_df, lapse_rate)
|
166
|
+
summary_data.append({
|
167
|
+
'distractor_nums': distractor_num,
|
168
|
+
'participant_id': subject,
|
169
|
+
'correct': adjusted_acc
|
170
|
+
})
|
171
|
+
summary_df = pd.DataFrame(summary_data, columns=['distractor_nums', 'participant_id', 'correct'])
|
172
|
+
return summary_df
|
173
|
+
|
174
|
+
|
175
|
+
def split_dataframe(df: BehavioralAssembly, seed: int) -> (BehavioralAssembly, BehavioralAssembly):
|
176
|
+
"""
|
177
|
+
- Takes in one DF and splits it into two, randomly, on the presentation dim
|
178
|
+
|
179
|
+
:param df: The DataFrame (assembly) to split
|
180
|
+
:param seed: a seed for the numpy rng
|
181
|
+
:return: Two DataFrames (assemblies)
|
182
|
+
"""
|
183
|
+
if seed is not None:
|
184
|
+
np.random.seed(seed)
|
185
|
+
shuffled_indices = np.random.permutation(df.presentation.size)
|
186
|
+
half = len(shuffled_indices) // 2
|
187
|
+
indices_1 = shuffled_indices[:half]
|
188
|
+
indices_2 = shuffled_indices[half:]
|
189
|
+
dataarray_1 = df.isel(presentation=indices_1)
|
190
|
+
dataarray_2 = df.isel(presentation=indices_2)
|
191
|
+
return dataarray_1, dataarray_2
|
192
|
+
|
193
|
+
|
194
|
+
def get_acc_delta(df_blue: DataFrame, df_orange: DataFrame, num_dist: str) -> float:
|
195
|
+
"""
|
196
|
+
Helper function for bootstrapping. Calculates an accuracy delta on a specific subject/distractor.
|
197
|
+
|
198
|
+
:param df_blue: DataFrame, the first (blue) block of data (target on a field of distractors)
|
199
|
+
:param df_orange: DataFrame, the second (orange) block of data (distractor on a field of targets)
|
200
|
+
:param num_dist: string, number of distractors
|
201
|
+
:return: float representing the requested accuracy delta.
|
202
|
+
"""
|
203
|
+
d_blue = df_blue[df_blue["distractor_nums"] == num_dist]
|
204
|
+
d_orange = df_orange[df_orange["distractor_nums"] == num_dist]
|
205
|
+
sampled_blue = d_blue.sample(n=1, replace=True)
|
206
|
+
sampled_orange = d_orange.sample(n=1, replace=True)
|
207
|
+
accuracy_delta = sampled_blue["correct"].values[0] - sampled_orange["correct"].values[0]
|
208
|
+
return accuracy_delta
|
209
|
+
|
210
|
+
|
211
|
+
def boostrap_integral(df_blue: DataFrame, df_orange: DataFrame, num_loops: int = 500) -> Dict:
|
212
|
+
"""
|
213
|
+
Computes an error (std) on integral calculation by bootstrapping the integral via slices of subjects.
|
214
|
+
|
215
|
+
:param df_blue: DataFrame, the first (blue) block of data (target on a field of distractors)
|
216
|
+
:param df_orange: DataFrame, the second (orange) block of data (distractor on a field of targets)
|
217
|
+
:param num_loops: int, number of times the boostrap will run (and thus take the average)
|
218
|
+
:return: Dict of values {bootstrapped_integral, bootstrapped_integral_error)
|
219
|
+
"""
|
220
|
+
num_subjects = len(set(df_blue["participant_id"]))
|
221
|
+
integral_list = []
|
222
|
+
for i in tqdm(range(num_loops)):
|
223
|
+
accuracy_delta_lows = []
|
224
|
+
accuracy_delta_mids = []
|
225
|
+
accuracy_delta_highs = []
|
226
|
+
for j in range(num_subjects):
|
227
|
+
accuracy_delta_lows.append(get_acc_delta(df_blue, df_orange, num_dist="1.0")) # get low distractor case
|
228
|
+
accuracy_delta_mids.append(get_acc_delta(df_blue, df_orange, num_dist="5.0")) # get mid distractor case
|
229
|
+
accuracy_delta_highs.append(get_acc_delta(df_blue, df_orange, num_dist="11.0")) # get high distractor case
|
230
|
+
average_low_delta = statistics.mean(accuracy_delta_highs)
|
231
|
+
average_mid_delta = statistics.mean(accuracy_delta_mids)
|
232
|
+
average_high_delta = statistics.mean(accuracy_delta_lows)
|
233
|
+
|
234
|
+
# get equation for line through points 1 - 5 and integrate:
|
235
|
+
point_1 = (1, average_low_delta)
|
236
|
+
point_2 = (5, average_mid_delta)
|
237
|
+
equation = get_line(point_1, point_2)
|
238
|
+
first_half = integrate_line(equation, 1, 5)
|
239
|
+
|
240
|
+
# get line 5-11 equation and integrate
|
241
|
+
point_3 = (11, average_high_delta)
|
242
|
+
equation_2 = get_line(point_2, point_3)
|
243
|
+
second_half = integrate_line(equation_2, 5, 11)
|
244
|
+
|
245
|
+
total_integral = first_half + second_half
|
246
|
+
integral_list.append(total_integral)
|
247
|
+
data_array = np.array(integral_list, dtype=float)
|
248
|
+
integral_mean = -np.mean(data_array)
|
249
|
+
integral_std = np.std(data_array)
|
250
|
+
|
251
|
+
return {"bootstrap_integral_mean": integral_mean, "integral_std": integral_std}
|
@@ -0,0 +1,114 @@
|
|
1
|
+
from pathlib import Path
|
2
|
+
import pytest
|
3
|
+
from pytest import approx
|
4
|
+
from brainio.assemblies import BehavioralAssembly
|
5
|
+
from brainscore_vision import load_benchmark
|
6
|
+
from brainscore_vision.benchmark_helpers import PrecomputedFeatures
|
7
|
+
from brainscore_vision.data_helpers import s3
|
8
|
+
|
9
|
+
|
10
|
+
class TestExist:
|
11
|
+
@pytest.mark.parametrize("identifier", [
|
12
|
+
'Ferguson2024circle_line-value_delta',
|
13
|
+
'Ferguson2024color-value_delta',
|
14
|
+
'Ferguson2024convergence-value_delta',
|
15
|
+
'Ferguson2024eighth-value_delta',
|
16
|
+
'Ferguson2024gray_easy-value_delta',
|
17
|
+
'Ferguson2024gray_hard-value_delta',
|
18
|
+
'Ferguson2024half-value_delta',
|
19
|
+
'Ferguson2024juncture-value_delta',
|
20
|
+
'Ferguson2024lle-value_delta',
|
21
|
+
'Ferguson2024llh-value_delta',
|
22
|
+
'Ferguson2024quarter-value_delta',
|
23
|
+
'Ferguson2024round_f-value_delta',
|
24
|
+
'Ferguson2024round_v-value_delta',
|
25
|
+
'Ferguson2024tilted_line-value_delta',
|
26
|
+
])
|
27
|
+
def test_benchmark_registry(self, identifier):
|
28
|
+
benchmark = load_benchmark(identifier)
|
29
|
+
assert benchmark is not None
|
30
|
+
assert benchmark.identifier == identifier
|
31
|
+
|
32
|
+
|
33
|
+
class TestBehavioral:
|
34
|
+
@pytest.mark.private_access
|
35
|
+
@pytest.mark.parametrize('benchmark, expected_ceiling', [
|
36
|
+
('Ferguson2024circle_line-value_delta', approx(0.883, abs=0.001)),
|
37
|
+
('Ferguson2024color-value_delta', approx(0.897, abs=0.001)),
|
38
|
+
('Ferguson2024convergence-value_delta', approx(0.862, abs=0.001)),
|
39
|
+
('Ferguson2024eighth-value_delta', approx(0.852, abs=0.001)),
|
40
|
+
('Ferguson2024gray_easy-value_delta', approx(0.907, abs=0.001)),
|
41
|
+
('Ferguson2024gray_hard-value_delta', approx(0.863, abs=0.001)),
|
42
|
+
('Ferguson2024half-value_delta', approx(0.898, abs=0.001)),
|
43
|
+
('Ferguson2024juncture-value_delta', approx(0.767, abs=0.001)),
|
44
|
+
('Ferguson2024lle-value_delta', approx(0.831, abs=0.001)),
|
45
|
+
('Ferguson2024llh-value_delta', approx(0.812, abs=0.001)),
|
46
|
+
('Ferguson2024quarter-value_delta', approx(0.876, abs=0.001)),
|
47
|
+
('Ferguson2024round_f-value_delta', approx(0.874, abs=0.001)),
|
48
|
+
('Ferguson2024round_v-value_delta', approx(0.853, abs=0.001)),
|
49
|
+
('Ferguson2024tilted_line-value_delta', approx(0.912, abs=0.001)),
|
50
|
+
])
|
51
|
+
def test_benchmark_ceiling(self, benchmark, expected_ceiling):
|
52
|
+
benchmark = load_benchmark(benchmark)
|
53
|
+
ceiling = benchmark._ceiling
|
54
|
+
assert ceiling == expected_ceiling
|
55
|
+
|
56
|
+
@pytest.mark.private_access
|
57
|
+
@pytest.mark.parametrize('benchmark, expected_raw_score', [
|
58
|
+
('Ferguson2024circle_line-value_delta', approx(0.143, abs=0.001)),
|
59
|
+
('Ferguson2024color-value_delta', approx(0.645, abs=0.001)),
|
60
|
+
('Ferguson2024convergence-value_delta', approx(0.024, abs=0.001)),
|
61
|
+
('Ferguson2024eighth-value_delta', approx(0.093, abs=0.001)),
|
62
|
+
('Ferguson2024gray_easy-value_delta', approx(0.799, abs=0.001)),
|
63
|
+
('Ferguson2024gray_hard-value_delta', approx(0.609, abs=0.001)),
|
64
|
+
('Ferguson2024half-value_delta', approx(0.379, abs=0.001)),
|
65
|
+
('Ferguson2024juncture-value_delta', approx(0.191, abs=0.001)),
|
66
|
+
('Ferguson2024lle-value_delta', approx(0.208, abs=0.001)),
|
67
|
+
('Ferguson2024llh-value_delta', approx(0.654, abs=0.001)),
|
68
|
+
('Ferguson2024quarter-value_delta', approx(0.223, abs=0.001)),
|
69
|
+
('Ferguson2024round_f-value_delta', approx(0.455, abs=0.001)),
|
70
|
+
('Ferguson2024round_v-value_delta', approx(0.212, abs=0.001)),
|
71
|
+
('Ferguson2024tilted_line-value_delta', approx(0.445, abs=0.001)),
|
72
|
+
])
|
73
|
+
def test_model_raw_score(self, benchmark, expected_raw_score):
|
74
|
+
benchmark_object = load_benchmark(benchmark)
|
75
|
+
filename = f"alexnet_{benchmark}.nc"
|
76
|
+
precomputed_features = Path(__file__).parent / filename
|
77
|
+
s3.download_file_if_not_exists(precomputed_features,
|
78
|
+
bucket='brainscore-vision', remote_filepath=f'benchmarks/Ferguson2024/{filename}')
|
79
|
+
precomputed_features = BehavioralAssembly.from_files(file_path=precomputed_features)
|
80
|
+
precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8)
|
81
|
+
score = benchmark_object(precomputed_features)
|
82
|
+
raw_score = score.raw
|
83
|
+
# division by ceiling <= 1 should result in higher score
|
84
|
+
assert score >= raw_score
|
85
|
+
assert raw_score == expected_raw_score
|
86
|
+
|
87
|
+
@pytest.mark.private_access
|
88
|
+
@pytest.mark.parametrize('benchmark, expected_ceiled_score', [
|
89
|
+
('Ferguson2024circle_line-value_delta', approx(0.162, abs=0.001)),
|
90
|
+
('Ferguson2024color-value_delta', approx(0.719, abs=0.001)),
|
91
|
+
('Ferguson2024convergence-value_delta', approx(0.028, abs=0.001)),
|
92
|
+
('Ferguson2024eighth-value_delta', approx(0.109, abs=0.001)),
|
93
|
+
('Ferguson2024gray_easy-value_delta', approx(0.882, abs=0.001)),
|
94
|
+
('Ferguson2024gray_hard-value_delta', approx(0.706, abs=0.001)),
|
95
|
+
('Ferguson2024half-value_delta', approx(0.423, abs=0.001)),
|
96
|
+
('Ferguson2024juncture-value_delta', approx(0.248, abs=0.001)),
|
97
|
+
('Ferguson2024lle-value_delta', approx(0.250, abs=0.001)),
|
98
|
+
('Ferguson2024llh-value_delta', approx(0.805, abs=0.001)),
|
99
|
+
('Ferguson2024quarter-value_delta', approx(0.255, abs=0.001)),
|
100
|
+
('Ferguson2024round_f-value_delta', approx(0.520, abs=0.001)),
|
101
|
+
('Ferguson2024round_v-value_delta', approx(0.249, abs=0.001)),
|
102
|
+
('Ferguson2024tilted_line-value_delta', approx(0.489, abs=0.001)),
|
103
|
+
])
|
104
|
+
def test_model_ceiled_score(self, benchmark, expected_ceiled_score):
|
105
|
+
benchmark_object = load_benchmark(benchmark)
|
106
|
+
filename = f"alexnet_{benchmark}.nc"
|
107
|
+
precomputed_features = Path(__file__).parent / filename
|
108
|
+
s3.download_file_if_not_exists(precomputed_features,
|
109
|
+
bucket='brainscore-vision', remote_filepath=f'benchmarks/Ferguson2024/{filename}')
|
110
|
+
precomputed_features = BehavioralAssembly.from_files(file_path=precomputed_features)
|
111
|
+
precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8)
|
112
|
+
score = benchmark_object(precomputed_features)
|
113
|
+
assert score == expected_ceiled_score
|
114
|
+
|
@@ -0,0 +1,10 @@
|
|
1
|
+
from brainscore_vision import benchmark_registry
|
2
|
+
from .benchmarks.benchmark import MovshonFreemanZiemba2013V1PLS, MovshonFreemanZiemba2013V2PLS
|
3
|
+
from .benchmarks.public_benchmarks import FreemanZiembaV1PublicBenchmark, FreemanZiembaV2PublicBenchmark
|
4
|
+
|
5
|
+
benchmark_registry['FreemanZiemba2013.V1-pls'] = MovshonFreemanZiemba2013V1PLS
|
6
|
+
benchmark_registry['FreemanZiemba2013.V2-pls'] = MovshonFreemanZiemba2013V2PLS
|
7
|
+
|
8
|
+
# public benchmarks
|
9
|
+
benchmark_registry['FreemanZiemba2013public.V1-pls'] = FreemanZiembaV1PublicBenchmark
|
10
|
+
benchmark_registry['FreemanZiemba2013public.V2-pls'] = FreemanZiembaV2PublicBenchmark
|
@@ -0,0 +1,53 @@
|
|
1
|
+
from result_caching import store
|
2
|
+
|
3
|
+
from brainscore_vision import load_dataset, load_metric, load_ceiling
|
4
|
+
from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark, average_repetition
|
5
|
+
from brainscore_vision.data.freemanziemba2013 import BIBTEX
|
6
|
+
from brainscore_vision.utils import LazyLoad
|
7
|
+
|
8
|
+
VISUAL_DEGREES = 4
|
9
|
+
NUMBER_OF_TRIALS = 20
|
10
|
+
|
11
|
+
|
12
|
+
def _MovshonFreemanZiemba2013Region(region, identifier_metric_suffix, similarity_metric, ceiler):
|
13
|
+
assembly_repetition = LazyLoad(lambda region=region: load_assembly(False, region=region))
|
14
|
+
assembly = LazyLoad(lambda region=region: load_assembly(True, region=region))
|
15
|
+
return NeuralBenchmark(identifier=f'FreemanZiemba2013.{region}-{identifier_metric_suffix}', version=2,
|
16
|
+
assembly=assembly, similarity_metric=similarity_metric, parent=region,
|
17
|
+
ceiling_func=lambda: ceiler(assembly_repetition),
|
18
|
+
visual_degrees=VISUAL_DEGREES, number_of_trials=NUMBER_OF_TRIALS,
|
19
|
+
bibtex=BIBTEX)
|
20
|
+
|
21
|
+
|
22
|
+
def MovshonFreemanZiemba2013V1PLS():
|
23
|
+
metric = load_metric('pls', crossvalidation_kwargs=dict(stratification_coord='texture_type'))
|
24
|
+
ceiler = load_ceiling('internal_consistency')
|
25
|
+
return _MovshonFreemanZiemba2013Region(
|
26
|
+
'V1', identifier_metric_suffix='pls', similarity_metric=metric, ceiler=ceiler)
|
27
|
+
|
28
|
+
|
29
|
+
def MovshonFreemanZiemba2013V2PLS():
|
30
|
+
metric = load_metric('pls', crossvalidation_kwargs=dict(stratification_coord='texture_type'))
|
31
|
+
ceiler = load_ceiling('internal_consistency')
|
32
|
+
return _MovshonFreemanZiemba2013Region(
|
33
|
+
'V2', identifier_metric_suffix='pls', similarity_metric=metric, ceiler=ceiler)
|
34
|
+
|
35
|
+
|
36
|
+
@store()
|
37
|
+
def load_assembly(average_repetitions, region, access='private'):
|
38
|
+
assembly = load_dataset(f'FreemanZiemba2013.{access}')
|
39
|
+
assembly = assembly.sel(region=region)
|
40
|
+
assembly = assembly.stack(neuroid=['neuroid_id']) # work around xarray multiindex issues
|
41
|
+
assembly['region'] = 'neuroid', [region] * len(assembly['neuroid'])
|
42
|
+
assembly.load()
|
43
|
+
time_window = (50, 200)
|
44
|
+
assembly = assembly.sel(time_bin=[(t, t + 1) for t in range(*time_window)])
|
45
|
+
assembly = assembly.mean(dim='time_bin', keep_attrs=True)
|
46
|
+
assembly = assembly.expand_dims('time_bin_start').expand_dims('time_bin_end')
|
47
|
+
assembly['time_bin_start'], assembly['time_bin_end'] = [time_window[0]], [time_window[1]]
|
48
|
+
assembly = assembly.stack(time_bin=['time_bin_start', 'time_bin_end'])
|
49
|
+
assembly = assembly.squeeze('time_bin')
|
50
|
+
assembly = assembly.transpose('presentation', 'neuroid')
|
51
|
+
if average_repetitions:
|
52
|
+
assembly = average_repetition(assembly)
|
53
|
+
return assembly
|
@@ -0,0 +1,37 @@
|
|
1
|
+
"""
|
2
|
+
The purpose of this file is to provide benchmarks based on publicly accessible data that can be run on candidate models
|
3
|
+
without restrictions. As opposed to the private benchmarks hosted on www.Brain-Score.org, models can be evaluated
|
4
|
+
without having to submit them to the online platform.
|
5
|
+
This allows for quick local prototyping, layer commitment, etc.
|
6
|
+
For the final model evaluation, candidate models should still be sent to www.Brain-Score.org to evaluate them on
|
7
|
+
held-out private data.
|
8
|
+
"""
|
9
|
+
import logging
|
10
|
+
|
11
|
+
from brainscore_vision import load_metric, load_ceiling
|
12
|
+
from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark
|
13
|
+
from brainscore_vision.utils import LazyLoad
|
14
|
+
from .benchmark import load_assembly, NUMBER_OF_TRIALS, VISUAL_DEGREES, BIBTEX
|
15
|
+
|
16
|
+
_logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
|
19
|
+
def _freemanziemba2013_public_benchmark(region: str):
|
20
|
+
assembly_repetition = LazyLoad(lambda: load_assembly(region=region, access='public', average_repetitions=False))
|
21
|
+
assembly = LazyLoad(lambda: load_assembly(region=region, access='public', average_repetitions=True))
|
22
|
+
similarity_metric = load_metric('pls', crossvalidation_kwargs=dict(stratification_coord='texture_type'))
|
23
|
+
ceiler = load_ceiling('internal_consistency')
|
24
|
+
return NeuralBenchmark(identifier=f"FreemanZiemba2013.{region}.public-pls", version=1,
|
25
|
+
assembly=assembly, similarity_metric=similarity_metric,
|
26
|
+
visual_degrees=VISUAL_DEGREES, number_of_trials=NUMBER_OF_TRIALS,
|
27
|
+
ceiling_func=lambda: ceiler(assembly_repetition),
|
28
|
+
parent=None,
|
29
|
+
bibtex=BIBTEX)
|
30
|
+
|
31
|
+
|
32
|
+
def FreemanZiembaV1PublicBenchmark():
|
33
|
+
return _freemanziemba2013_public_benchmark('V1')
|
34
|
+
|
35
|
+
|
36
|
+
def FreemanZiembaV2PublicBenchmark():
|
37
|
+
return _freemanziemba2013_public_benchmark('V2')
|
@@ -0,0 +1,98 @@
|
|
1
|
+
from pathlib import Path
|
2
|
+
|
3
|
+
import pytest
|
4
|
+
from pytest import approx
|
5
|
+
|
6
|
+
from brainscore_vision import benchmark_registry, load_benchmark
|
7
|
+
from brainscore_vision.benchmark_helpers import PrecomputedFeatures
|
8
|
+
from brainscore_vision.benchmark_helpers.test_helper import StandardizedTests, PrecomputedTests, NumberOfTrialsTests, \
|
9
|
+
VisualDegreesTests
|
10
|
+
from brainscore_vision.data_helpers import s3
|
11
|
+
|
12
|
+
standardized_tests = StandardizedTests()
|
13
|
+
precomputed_test = PrecomputedTests()
|
14
|
+
num_trials_test = NumberOfTrialsTests()
|
15
|
+
visual_degrees_test = VisualDegreesTests()
|
16
|
+
|
17
|
+
|
18
|
+
@pytest.mark.parametrize('benchmark', [
|
19
|
+
'FreemanZiemba2013.V1-pls',
|
20
|
+
'FreemanZiemba2013.V2-pls',
|
21
|
+
])
|
22
|
+
def test_benchmark_registry(benchmark):
|
23
|
+
assert benchmark in benchmark_registry
|
24
|
+
|
25
|
+
|
26
|
+
@pytest.mark.memory_intense
|
27
|
+
@pytest.mark.parametrize('benchmark, expected', [
|
28
|
+
('FreemanZiemba2013.V1-pls', approx(.873345, abs=.001)),
|
29
|
+
('FreemanZiemba2013.V2-pls', approx(.824836, abs=.001)),
|
30
|
+
])
|
31
|
+
def test_ceilings(benchmark, expected):
|
32
|
+
standardized_tests.ceilings_test(benchmark, expected)
|
33
|
+
|
34
|
+
|
35
|
+
@pytest.mark.memory_intense
|
36
|
+
@pytest.mark.parametrize('benchmark, visual_degrees, expected', [
|
37
|
+
('FreemanZiemba2013.V1-pls', 4, approx(.668491, abs=.001)),
|
38
|
+
('FreemanZiemba2013.V2-pls', 4, approx(.553155, abs=.001)),
|
39
|
+
])
|
40
|
+
def test_self_regression(benchmark, visual_degrees, expected):
|
41
|
+
standardized_tests.self_regression_test(benchmark, visual_degrees, expected)
|
42
|
+
|
43
|
+
|
44
|
+
@pytest.mark.memory_intense
|
45
|
+
@pytest.mark.parametrize('benchmark, expected', [
|
46
|
+
('FreemanZiemba2013.V1-pls', approx(.466222, abs=.005)),
|
47
|
+
('FreemanZiemba2013.V2-pls', approx(.459283, abs=.005)),
|
48
|
+
])
|
49
|
+
def test_FreemanZiemba2013(benchmark, expected):
|
50
|
+
filename = 'alexnet-freemanziemba2013.aperture-private.nc'
|
51
|
+
filepath = Path(__file__).parent / filename
|
52
|
+
s3.download_file_if_not_exists(local_path=filepath,
|
53
|
+
bucket='brain-score-tests', remote_filepath=f'tests/test_benchmarks/{filename}')
|
54
|
+
precomputed_test.run_test(benchmark=benchmark, precomputed_features_filepath=filepath, expected=expected)
|
55
|
+
|
56
|
+
|
57
|
+
@pytest.mark.parametrize('benchmark, candidate_degrees, image_id, expected', [
|
58
|
+
pytest.param('FreemanZiemba2013.V1-pls', 14, 'c3a633a13e736394f213ddf44bf124fe80cabe07',
|
59
|
+
approx(.31429, abs=.0001), marks=[pytest.mark.private_access]),
|
60
|
+
pytest.param('FreemanZiemba2013.V1-pls', 6, 'c3a633a13e736394f213ddf44bf124fe80cabe07',
|
61
|
+
approx(.22966, abs=.0001), marks=[pytest.mark.private_access]),
|
62
|
+
pytest.param('FreemanZiemba2013public.V1-pls', 14, '21041db1f26c142812a66277c2957fb3e2070916',
|
63
|
+
approx(.314561, abs=.0001), marks=[]),
|
64
|
+
pytest.param('FreemanZiemba2013public.V1-pls', 6, '21041db1f26c142812a66277c2957fb3e2070916',
|
65
|
+
approx(.23113, abs=.0001), marks=[]),
|
66
|
+
pytest.param('FreemanZiemba2013.V2-pls', 14, 'c3a633a13e736394f213ddf44bf124fe80cabe07',
|
67
|
+
approx(.31429, abs=.0001), marks=[pytest.mark.private_access]),
|
68
|
+
pytest.param('FreemanZiemba2013.V2-pls', 6, 'c3a633a13e736394f213ddf44bf124fe80cabe07',
|
69
|
+
approx(.22966, abs=.0001), marks=[pytest.mark.private_access]),
|
70
|
+
pytest.param('FreemanZiemba2013public.V2-pls', 14, '21041db1f26c142812a66277c2957fb3e2070916',
|
71
|
+
approx(.314561, abs=.0001), marks=[]),
|
72
|
+
pytest.param('FreemanZiemba2013public.V2-pls', 6, '21041db1f26c142812a66277c2957fb3e2070916',
|
73
|
+
approx(.23113, abs=.0001), marks=[]),
|
74
|
+
])
|
75
|
+
def test_amount_gray(benchmark: str, candidate_degrees: int, image_id: str, expected: float):
|
76
|
+
visual_degrees_test.amount_gray_test(benchmark, candidate_degrees, image_id, expected)
|
77
|
+
|
78
|
+
|
79
|
+
@pytest.mark.private_access
|
80
|
+
@pytest.mark.parametrize('benchmark_identifier', [
|
81
|
+
'FreemanZiemba2013.V1-pls',
|
82
|
+
'FreemanZiemba2013.V2-pls',
|
83
|
+
])
|
84
|
+
def test_repetitions(benchmark_identifier):
|
85
|
+
num_trials_test.repetitions_test(benchmark_identifier)
|
86
|
+
|
87
|
+
|
88
|
+
@pytest.mark.memory_intense
|
89
|
+
@pytest.mark.parametrize('benchmark, visual_degrees, expected', [
|
90
|
+
('FreemanZiemba2013public.V1-pls', 4, approx(.679954, abs=.001)),
|
91
|
+
('FreemanZiemba2013public.V2-pls', 4, approx(.577498, abs=.001)),
|
92
|
+
])
|
93
|
+
def test_self(benchmark, visual_degrees, expected):
|
94
|
+
benchmark = load_benchmark(benchmark)
|
95
|
+
source = benchmark._assembly.copy()
|
96
|
+
source = {benchmark._assembly.stimulus_set.identifier: source}
|
97
|
+
score = benchmark(PrecomputedFeatures(source, visual_degrees=visual_degrees)).raw
|
98
|
+
assert score == expected
|
@@ -0,0 +1,59 @@
|
|
1
|
+
from brainscore_vision import benchmark_registry
|
2
|
+
from . import benchmark
|
3
|
+
|
4
|
+
DATASETS = ['colour', 'contrast', 'cue-conflict', 'edge',
|
5
|
+
'eidolonI', 'eidolonII', 'eidolonIII',
|
6
|
+
'false-colour', 'high-pass', 'low-pass', 'phase-scrambling', 'power-equalisation',
|
7
|
+
'rotation', 'silhouette', 'sketch', 'stylized', 'uniform-noise']
|
8
|
+
|
9
|
+
benchmark_registry['Geirhos2021colour-top1'] = getattr(benchmark, "Geirhos2021colourAccuracy")
|
10
|
+
benchmark_registry['Geirhos2021colour-error_consistency'] = getattr(benchmark, "Geirhos2021colourErrorConsistency")
|
11
|
+
|
12
|
+
benchmark_registry['Geirhos2021contrast-top1'] = getattr(benchmark, "Geirhos2021contrastAccuracy")
|
13
|
+
benchmark_registry['Geirhos2021contrast-error_consistency'] = getattr(benchmark, "Geirhos2021contrastErrorConsistency")
|
14
|
+
|
15
|
+
benchmark_registry['Geirhos2021cueconflict-top1'] = getattr(benchmark, "Geirhos2021cueconflictAccuracy")
|
16
|
+
benchmark_registry['Geirhos2021cueconflict-error_consistency'] = getattr(benchmark, "Geirhos2021cueconflictErrorConsistency")
|
17
|
+
|
18
|
+
benchmark_registry['Geirhos2021edge-top1'] = getattr(benchmark, "Geirhos2021edgeAccuracy")
|
19
|
+
benchmark_registry['Geirhos2021edge-error_consistency'] = getattr(benchmark, "Geirhos2021edgeErrorConsistency")
|
20
|
+
|
21
|
+
benchmark_registry['Geirhos2021eidolonI-top1'] = getattr(benchmark, "Geirhos2021eidolonIAccuracy")
|
22
|
+
benchmark_registry['Geirhos2021eidolonI-error_consistency'] = getattr(benchmark, "Geirhos2021eidolonIErrorConsistency")
|
23
|
+
|
24
|
+
benchmark_registry['Geirhos2021eidolonII-top1'] = getattr(benchmark, "Geirhos2021eidolonIIAccuracy")
|
25
|
+
benchmark_registry['Geirhos2021eidolonII-error_consistency'] = getattr(benchmark, "Geirhos2021eidolonIIErrorConsistency")
|
26
|
+
|
27
|
+
benchmark_registry['Geirhos2021eidolonIII-top1'] = getattr(benchmark, "Geirhos2021eidolonIIIAccuracy")
|
28
|
+
benchmark_registry['Geirhos2021eidolonIII-error_consistency'] = getattr(benchmark, "Geirhos2021eidolonIIIErrorConsistency")
|
29
|
+
|
30
|
+
benchmark_registry['Geirhos2021falsecolour-top1'] = getattr(benchmark, "Geirhos2021falsecolourAccuracy")
|
31
|
+
benchmark_registry['Geirhos2021falsecolour-error_consistency'] = getattr(benchmark, "Geirhos2021falsecolourErrorConsistency")
|
32
|
+
|
33
|
+
benchmark_registry['Geirhos2021highpass-top1'] = getattr(benchmark, "Geirhos2021highpassAccuracy")
|
34
|
+
benchmark_registry['Geirhos2021highpass-error_consistency'] = getattr(benchmark, "Geirhos2021highpassErrorConsistency")
|
35
|
+
|
36
|
+
benchmark_registry['Geirhos2021lowpass-top1'] = getattr(benchmark, "Geirhos2021lowpassAccuracy")
|
37
|
+
benchmark_registry['Geirhos2021lowpass-error_consistency'] = getattr(benchmark, "Geirhos2021lowpassErrorConsistency")
|
38
|
+
|
39
|
+
benchmark_registry['Geirhos2021phasescrambling-top1'] = getattr(benchmark, "Geirhos2021phasescramblingAccuracy")
|
40
|
+
benchmark_registry['Geirhos2021phasescrambling-error_consistency'] = getattr(benchmark, "Geirhos2021phasescramblingErrorConsistency")
|
41
|
+
|
42
|
+
benchmark_registry['Geirhos2021powerequalisation-top1'] = getattr(benchmark, "Geirhos2021powerequalisationAccuracy")
|
43
|
+
benchmark_registry['Geirhos2021powerequalisation-error_consistency'] = getattr(benchmark, "Geirhos2021powerequalisationErrorConsistency")
|
44
|
+
|
45
|
+
benchmark_registry['Geirhos2021rotation-top1'] = getattr(benchmark, "Geirhos2021rotationAccuracy")
|
46
|
+
benchmark_registry['Geirhos2021rotation-error_consistency'] = getattr(benchmark, "Geirhos2021rotationErrorConsistency")
|
47
|
+
|
48
|
+
benchmark_registry['Geirhos2021silhouette-top1'] = getattr(benchmark, "Geirhos2021silhouetteAccuracy")
|
49
|
+
benchmark_registry['Geirhos2021silhouette-error_consistency'] = getattr(benchmark, "Geirhos2021silhouetteErrorConsistency")
|
50
|
+
|
51
|
+
benchmark_registry['Geirhos2021sketch-top1'] = getattr(benchmark, "Geirhos2021sketchAccuracy")
|
52
|
+
benchmark_registry['Geirhos2021sketch-error_consistency'] = getattr(benchmark, "Geirhos2021sketchErrorConsistency")
|
53
|
+
|
54
|
+
benchmark_registry['Geirhos2021stylized-top1'] = getattr(benchmark, "Geirhos2021stylizedAccuracy")
|
55
|
+
benchmark_registry['Geirhos2021stylized-error_consistency'] = getattr(benchmark, "Geirhos2021stylizedErrorConsistency")
|
56
|
+
|
57
|
+
benchmark_registry['Geirhos2021uniformnoise-top1'] = getattr(benchmark, "Geirhos2021uniformnoiseAccuracy")
|
58
|
+
benchmark_registry['Geirhos2021uniformnoise-error_consistency'] = getattr(benchmark, "Geirhos2021uniformnoiseErrorConsistency")
|
59
|
+
|