brainscore-vision 2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/__init__.py +105 -0
- brainscore_vision/__main__.py +20 -0
- brainscore_vision/benchmark_helpers/__init__.py +67 -0
- brainscore_vision/benchmark_helpers/neural_common.py +70 -0
- brainscore_vision/benchmark_helpers/properties_common.py +424 -0
- brainscore_vision/benchmark_helpers/screen.py +126 -0
- brainscore_vision/benchmark_helpers/test_helper.py +160 -0
- brainscore_vision/benchmarks/README.md +7 -0
- brainscore_vision/benchmarks/__init__.py +122 -0
- brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
- brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
- brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
- brainscore_vision/benchmarks/baker2022/test.py +90 -0
- brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
- brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
- brainscore_vision/benchmarks/bmd2024/test.py +29 -0
- brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
- brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
- brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
- brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
- brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
- brainscore_vision/benchmarks/cadena2017/test.py +35 -0
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
- brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
- brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
- brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
- brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
- brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
- brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
- brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
- brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
- brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
- brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
- brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
- brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
- brainscore_vision/benchmarks/hebart2023/test.py +19 -0
- brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
- brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
- brainscore_vision/benchmarks/hermann2020/test.py +28 -0
- brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
- brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
- brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
- brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
- brainscore_vision/benchmarks/imagenet/test.py +32 -0
- brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
- brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
- brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
- brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
- brainscore_vision/benchmarks/islam2021/test.py +47 -0
- brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
- brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
- brainscore_vision/benchmarks/kar2019/test.py +93 -0
- brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
- brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
- brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
- brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
- brainscore_vision/benchmarks/malania2007/test.py +64 -0
- brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
- brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
- brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
- brainscore_vision/benchmarks/marques2020/test.py +135 -0
- brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
- brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
- brainscore_vision/benchmarks/objectnet/test.py +33 -0
- brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
- brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
- brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
- brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
- brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
- brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
- brainscore_vision/benchmarks/scialom2024/test.py +162 -0
- brainscore_vision/data/__init__.py +0 -0
- brainscore_vision/data/baker2022/__init__.py +40 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
- brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
- brainscore_vision/data/baker2022/test.py +135 -0
- brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
- brainscore_vision/data/barbumayo2019/__init__.py +23 -0
- brainscore_vision/data/barbumayo2019/test.py +10 -0
- brainscore_vision/data/bashivankar2019/__init__.py +52 -0
- brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
- brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
- brainscore_vision/data/bashivankar2019/test.py +15 -0
- brainscore_vision/data/bmd2024/__init__.py +69 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
- brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
- brainscore_vision/data/bmd2024/test.py +130 -0
- brainscore_vision/data/bracci2019/__init__.py +36 -0
- brainscore_vision/data/bracci2019/data_packaging.py +221 -0
- brainscore_vision/data/bracci2019/test.py +16 -0
- brainscore_vision/data/cadena2017/__init__.py +52 -0
- brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
- brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
- brainscore_vision/data/cadena2017/test.py +24 -0
- brainscore_vision/data/cichy2019/__init__.py +38 -0
- brainscore_vision/data/cichy2019/test.py +8 -0
- brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
- brainscore_vision/data/coggan2024_behavior/test.py +32 -0
- brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
- brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/data/david2004/__init__.py +34 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
- brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
- brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
- brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
- brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
- brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
- brainscore_vision/data/david2004/test.py +8 -0
- brainscore_vision/data/deng2009/__init__.py +22 -0
- brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
- brainscore_vision/data/deng2009/test.py +9 -0
- brainscore_vision/data/ferguson2024/__init__.py +401 -0
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
- brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
- brainscore_vision/data/ferguson2024/requirements.txt +2 -0
- brainscore_vision/data/ferguson2024/test.py +155 -0
- brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
- brainscore_vision/data/freemanziemba2013/test.py +97 -0
- brainscore_vision/data/geirhos2021/__init__.py +358 -0
- brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
- brainscore_vision/data/geirhos2021/test.py +330 -0
- brainscore_vision/data/hebart2023/__init__.py +23 -0
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
- brainscore_vision/data/hebart2023/test.py +42 -0
- brainscore_vision/data/hendrycks2019/__init__.py +45 -0
- brainscore_vision/data/hendrycks2019/test.py +26 -0
- brainscore_vision/data/igustibagus2024/__init__.py +23 -0
- brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
- brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
- brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
- brainscore_vision/data/igustibagus2024/test.py +26 -0
- brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
- brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
- brainscore_vision/data/imagenetslim15000/test.py +8 -0
- brainscore_vision/data/islam2021/__init__.py +18 -0
- brainscore_vision/data/islam2021/data_packaging.py +64 -0
- brainscore_vision/data/islam2021/test.py +11 -0
- brainscore_vision/data/kar2018/__init__.py +58 -0
- brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
- brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
- brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
- brainscore_vision/data/kar2018/test.py +10 -0
- brainscore_vision/data/kar2019/__init__.py +43 -0
- brainscore_vision/data/kar2019/data_packaging.py +116 -0
- brainscore_vision/data/kar2019/test.py +8 -0
- brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
- brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
- brainscore_vision/data/kuzovkin2018/test.py +8 -0
- brainscore_vision/data/majajhong2015/__init__.py +113 -0
- brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
- brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
- brainscore_vision/data/majajhong2015/test.py +38 -0
- brainscore_vision/data/malania2007/__init__.py +254 -0
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
- brainscore_vision/data/malania2007/test.py +147 -0
- brainscore_vision/data/maniquet2024/__init__.py +57 -0
- brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
- brainscore_vision/data/maniquet2024/test.py +16 -0
- brainscore_vision/data/marques2020/__init__.py +123 -0
- brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
- brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
- brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
- brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
- brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
- brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
- brainscore_vision/data/marques2020/test.py +54 -0
- brainscore_vision/data/rajalingham2018/__init__.py +56 -0
- brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
- brainscore_vision/data/rajalingham2018/test.py +10 -0
- brainscore_vision/data/rajalingham2020/__init__.py +39 -0
- brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
- brainscore_vision/data/rajalingham2020/test.py +8 -0
- brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
- brainscore_vision/data/rust2012/__init__.py +45 -0
- brainscore_vision/data/rust2012/rust305.py +35 -0
- brainscore_vision/data/rust2012/test.py +47 -0
- brainscore_vision/data/sanghavi2020/__init__.py +119 -0
- brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
- brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
- brainscore_vision/data/sanghavi2020/test.py +13 -0
- brainscore_vision/data/scialom2024/__init__.py +386 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
- brainscore_vision/data/scialom2024/test.py +301 -0
- brainscore_vision/data/seibert2019/__init__.py +25 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
- brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
- brainscore_vision/data/seibert2019/test.py +35 -0
- brainscore_vision/data/zhang2018/__init__.py +38 -0
- brainscore_vision/data/zhang2018/test.py +29 -0
- brainscore_vision/data_helpers/__init__.py +0 -0
- brainscore_vision/data_helpers/lookup_legacy.py +15 -0
- brainscore_vision/data_helpers/s3.py +79 -0
- brainscore_vision/metric_helpers/__init__.py +5 -0
- brainscore_vision/metric_helpers/temporal.py +119 -0
- brainscore_vision/metric_helpers/transformations.py +379 -0
- brainscore_vision/metric_helpers/utils.py +71 -0
- brainscore_vision/metric_helpers/xarray_utils.py +151 -0
- brainscore_vision/metrics/__init__.py +7 -0
- brainscore_vision/metrics/accuracy/__init__.py +4 -0
- brainscore_vision/metrics/accuracy/metric.py +16 -0
- brainscore_vision/metrics/accuracy/test.py +11 -0
- brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
- brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
- brainscore_vision/metrics/accuracy_distance/test.py +57 -0
- brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
- brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
- brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
- brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
- brainscore_vision/metrics/cka/__init__.py +14 -0
- brainscore_vision/metrics/cka/metric.py +105 -0
- brainscore_vision/metrics/cka/test.py +28 -0
- brainscore_vision/metrics/dimensionality/__init__.py +13 -0
- brainscore_vision/metrics/dimensionality/metric.py +45 -0
- brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
- brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
- brainscore_vision/metrics/distribution_similarity/test.py +10 -0
- brainscore_vision/metrics/error_consistency/__init__.py +13 -0
- brainscore_vision/metrics/error_consistency/metric.py +93 -0
- brainscore_vision/metrics/error_consistency/test.py +39 -0
- brainscore_vision/metrics/i1i2/__init__.py +16 -0
- brainscore_vision/metrics/i1i2/metric.py +299 -0
- brainscore_vision/metrics/i1i2/requirements.txt +2 -0
- brainscore_vision/metrics/i1i2/test.py +36 -0
- brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
- brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
- brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
- brainscore_vision/metrics/internal_consistency/test.py +39 -0
- brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
- brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
- brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
- brainscore_vision/metrics/mask_regression/__init__.py +16 -0
- brainscore_vision/metrics/mask_regression/metric.py +242 -0
- brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
- brainscore_vision/metrics/mask_regression/test.py +0 -0
- brainscore_vision/metrics/ost/__init__.py +23 -0
- brainscore_vision/metrics/ost/metric.py +350 -0
- brainscore_vision/metrics/ost/requirements.txt +2 -0
- brainscore_vision/metrics/ost/test.py +0 -0
- brainscore_vision/metrics/rdm/__init__.py +14 -0
- brainscore_vision/metrics/rdm/metric.py +101 -0
- brainscore_vision/metrics/rdm/requirements.txt +2 -0
- brainscore_vision/metrics/rdm/test.py +63 -0
- brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
- brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
- brainscore_vision/metrics/regression_correlation/metric.py +125 -0
- brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
- brainscore_vision/metrics/regression_correlation/test.py +36 -0
- brainscore_vision/metrics/threshold/__init__.py +5 -0
- brainscore_vision/metrics/threshold/metric.py +481 -0
- brainscore_vision/metrics/threshold/test.py +71 -0
- brainscore_vision/metrics/value_delta/__init__.py +4 -0
- brainscore_vision/metrics/value_delta/metric.py +30 -0
- brainscore_vision/metrics/value_delta/requirements.txt +1 -0
- brainscore_vision/metrics/value_delta/test.py +40 -0
- brainscore_vision/model_helpers/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/__init__.py +1 -0
- brainscore_vision/model_helpers/activations/core.py +635 -0
- brainscore_vision/model_helpers/activations/pca.py +117 -0
- brainscore_vision/model_helpers/activations/pytorch.py +152 -0
- brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
- brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
- brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
- brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
- brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
- brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
- brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
- brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
- brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
- brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
- brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
- brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
- brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
- brainscore_vision/model_helpers/conftest.py +3 -0
- brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
- brainscore_vision/model_helpers/s3.py +62 -0
- brainscore_vision/model_helpers/utils/__init__.py +15 -0
- brainscore_vision/model_helpers/utils/s3.py +42 -0
- brainscore_vision/model_interface.py +214 -0
- brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
- brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
- brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
- brainscore_vision/models/AlexNet_SIN/model.py +29 -0
- brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
- brainscore_vision/models/AlexNet_SIN/test.py +1 -0
- brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
- brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
- brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
- brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
- brainscore_vision/models/__init__.py +0 -0
- brainscore_vision/models/alexnet/__init__.py +8 -0
- brainscore_vision/models/alexnet/model.py +28 -0
- brainscore_vision/models/alexnet/requirements.txt +2 -0
- brainscore_vision/models/alexnet/test.py +15 -0
- brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
- brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
- brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
- brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
- brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
- brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
- brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
- brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
- brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
- brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
- brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
- brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
- brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
- brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
- brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
- brainscore_vision/models/alexnet_testing/__init__.py +8 -0
- brainscore_vision/models/alexnet_testing/model.py +28 -0
- brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
- brainscore_vision/models/alexnet_testing/setup.py +24 -0
- brainscore_vision/models/alexnet_testing/test.py +15 -0
- brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
- brainscore_vision/models/antialias_resnet152/model.py +35 -0
- brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
- brainscore_vision/models/antialias_resnet152/test.py +8 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
- brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/clip/__init__.py +5 -0
- brainscore_vision/models/clip/model.py +179 -0
- brainscore_vision/models/clip/requirements.txt +4 -0
- brainscore_vision/models/clip/test.py +1 -0
- brainscore_vision/models/clipvision/__init__.py +5 -0
- brainscore_vision/models/clipvision/model.py +179 -0
- brainscore_vision/models/clipvision/requirements.txt +4 -0
- brainscore_vision/models/clipvision/test.py +1 -0
- brainscore_vision/models/cornet_s/__init__.py +8 -0
- brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
- brainscore_vision/models/cornet_s/model.py +77 -0
- brainscore_vision/models/cornet_s/requirements.txt +7 -0
- brainscore_vision/models/cornet_s/test.py +8 -0
- brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
- brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
- brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
- brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
- brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
- brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
- brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
- brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
- brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
- brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
- brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
- brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
- brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
- brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
- brainscore_vision/models/effnetb1_272x240/model.py +126 -0
- brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
- brainscore_vision/models/effnetb1_272x240/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
- brainscore_vision/models/hmax/__init__.py +7 -0
- brainscore_vision/models/hmax/helpers/hmax.py +438 -0
- brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
- brainscore_vision/models/hmax/model.py +69 -0
- brainscore_vision/models/hmax/requirements.txt +5 -0
- brainscore_vision/models/hmax/test.py +8 -0
- brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
- brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
- brainscore_vision/models/mobilevit_small/__init__.py +7 -0
- brainscore_vision/models/mobilevit_small/model.py +49 -0
- brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
- brainscore_vision/models/mobilevit_small/test.py +8 -0
- brainscore_vision/models/pixels/__init__.py +8 -0
- brainscore_vision/models/pixels/model.py +35 -0
- brainscore_vision/models/pixels/test.py +15 -0
- brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
- brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
- brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r50_tvpt/__init__.py +9 -0
- brainscore_vision/models/r50_tvpt/model.py +47 -0
- brainscore_vision/models/r50_tvpt/setup.py +24 -0
- brainscore_vision/models/r50_tvpt/test.py +1 -0
- brainscore_vision/models/regnet/__init__.py +14 -0
- brainscore_vision/models/regnet/model.py +17 -0
- brainscore_vision/models/regnet/requirements.txt +2 -0
- brainscore_vision/models/regnet/test.py +17 -0
- brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
- brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
- brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
- brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
- brainscore_vision/models/resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/resnet50_julios/model.py +54 -0
- brainscore_vision/models/resnet50_julios/setup.py +24 -0
- brainscore_vision/models/resnet50_julios/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
- brainscore_vision/models/resnet50_tutorial/model.py +34 -0
- brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
- brainscore_vision/models/resnet50_tutorial/test.py +8 -0
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
- brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
- brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/resnet_50_robust/model.py +55 -0
- brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
- brainscore_vision/models/resnet_50_robust/test.py +8 -0
- brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
- brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
- brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
- brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
- brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
- brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
- brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
- brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
- brainscore_vision/models/temporal_model_GDT/model.py +72 -0
- brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_GDT/test.py +17 -0
- brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
- brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
- brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
- brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
- brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
- brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
- brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
- brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
- brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
- brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
- brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
- brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
- brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
- brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
- brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
- brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
- brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
- brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
- brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
- brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
- brainscore_vision/models/temporal_model_openstl/model.py +206 -0
- brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_openstl/test.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
- brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
- brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
- brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
- brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
- brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
- brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
- brainscore_vision/submission/__init__.py +0 -0
- brainscore_vision/submission/actions_helpers.py +153 -0
- brainscore_vision/submission/config.py +7 -0
- brainscore_vision/submission/endpoints.py +58 -0
- brainscore_vision/utils/__init__.py +91 -0
- brainscore_vision-2.1.dist-info/LICENSE +11 -0
- brainscore_vision-2.1.dist-info/METADATA +152 -0
- brainscore_vision-2.1.dist-info/RECORD +1009 -0
- brainscore_vision-2.1.dist-info/WHEEL +5 -0
- brainscore_vision-2.1.dist-info/top_level.txt +4 -0
- docs/Makefile +20 -0
- docs/source/conf.py +78 -0
- docs/source/index.rst +21 -0
- docs/source/modules/api_reference.rst +10 -0
- docs/source/modules/benchmarks.rst +8 -0
- docs/source/modules/brainscore_submission.png +0 -0
- docs/source/modules/developer_clarifications.rst +36 -0
- docs/source/modules/metrics.rst +8 -0
- docs/source/modules/model_interface.rst +8 -0
- docs/source/modules/submission.rst +112 -0
- docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
- docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
- docs/source/modules/tutorial_screenshots/init_py.png +0 -0
- docs/source/modules/tutorial_screenshots/mms.png +0 -0
- docs/source/modules/tutorial_screenshots/setup.png +0 -0
- docs/source/modules/tutorial_screenshots/sms.png +0 -0
- docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
- docs/source/modules/utils.rst +22 -0
- migrations/2020-12-20_pkl_to_nc.py +90 -0
- tests/__init__.py +6 -0
- tests/conftest.py +26 -0
- tests/test_benchmark_helpers/__init__.py +0 -0
- tests/test_benchmark_helpers/test_screen.py +75 -0
- tests/test_examples.py +41 -0
- tests/test_integration.py +43 -0
- tests/test_metric_helpers/__init__.py +0 -0
- tests/test_metric_helpers/test_temporal.py +80 -0
- tests/test_metric_helpers/test_transformations.py +171 -0
- tests/test_metric_helpers/test_xarray_utils.py +85 -0
- tests/test_model_helpers/__init__.py +6 -0
- tests/test_model_helpers/activations/__init__.py +0 -0
- tests/test_model_helpers/activations/test___init__.py +404 -0
- tests/test_model_helpers/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
- tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
- tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
- tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
- tests/test_model_helpers/temporal/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
- tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
- tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
- tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
- tests/test_model_helpers/temporal/test_utils.py +61 -0
- tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
- tests/test_model_helpers/test_imports.py +10 -0
- tests/test_model_helpers/test_s3.py +38 -0
- tests/test_models.py +15 -0
- tests/test_stimuli.py +0 -0
- tests/test_submission/__init__.py +0 -0
- tests/test_submission/mock_config.py +3 -0
- tests/test_submission/test_actions_helpers.py +67 -0
- tests/test_submission/test_db.py +54 -0
- tests/test_submission/test_endpoints.py +125 -0
- tests/test_utils.py +21 -0
@@ -0,0 +1,438 @@
|
|
1
|
+
# encoding: utf8
|
2
|
+
"""
|
3
|
+
PyTorch implementation of the HMAX model of human vision. For more information
|
4
|
+
about HMAX, check:
|
5
|
+
http://maxlab.neuro.georgetown.edu/hmax.html
|
6
|
+
The S and C units of the HMAX model can almost be mapped directly onto
|
7
|
+
TorchVision's Conv2d and MaxPool2d layers, where channels are used to store the
|
8
|
+
filters for different orientations. However, HMAX also implements multiple
|
9
|
+
scales, which doesn't map nicely onto the existing TorchVision functionality.
|
10
|
+
Therefore, each scale has its own Conv2d layer, which are executed in parallel.
|
11
|
+
Here is a schematic overview of the network architecture:
|
12
|
+
layers consisting of units with increasing scale
|
13
|
+
S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1
|
14
|
+
\ / \ / \ / \ / \ / \ / \ / \ /
|
15
|
+
C1 C1 C1 C1 C1 C1 C1 C1
|
16
|
+
\ \ \ | / / / /
|
17
|
+
ALL-TO-ALL CONNECTIVITY
|
18
|
+
/ / / | \ \ \ \
|
19
|
+
S2 S2 S2 S2 S2 S2 S2 S2
|
20
|
+
| | | | | | | |
|
21
|
+
C2 C2 C2 C2 C2 C2 C2 C2
|
22
|
+
Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
|
23
|
+
References
|
24
|
+
----------
|
25
|
+
.. [1] Riesenhuber, Maximilian, and Tomaso Poggio. “Hierarchical Models of
|
26
|
+
Object Recognition in Cortex.” Nature Neuroscience 2, no. 11 (1999):
|
27
|
+
1019–25. https://doi.org/10.1038/14819.
|
28
|
+
.. [2] Serre, T, M Kouh, C Cadieu, U Knoblich, Gabriel Kreiman, and T Poggio.
|
29
|
+
“A Theory of Object Recognition: Computations and Circuits in the
|
30
|
+
Feedforward Path of the Ventral Stream in Primate Visual Cortex.”
|
31
|
+
Artificial Intelligence, no. December (2005): 1–130.
|
32
|
+
https://doi.org/10.1.1.207.9279.
|
33
|
+
.. [3] Serre, Thomas, Aude Oliva, and Tomaso Poggio. “A Feedforward
|
34
|
+
Architecture Accounts for Rapid Categorization.” Proceedings of the
|
35
|
+
National Academy of Sciences 104, no. 15 (April 10, 2007): 6424–29.
|
36
|
+
https://doi.org/10.1073/pnas.0700622104.
|
37
|
+
.. [4] Serre, Thomas, and Maximilian Riesenhuber. “Realistic Modeling of
|
38
|
+
Simple and Complex Cell Tuning in the HMAXModel, and Implications for
|
39
|
+
Invariant Object Recognition in Cortex.” CBCL Memo, no. 239 (2004).
|
40
|
+
.. [5] Serre, Thomas, Lior Wolf, Stanley Bileschi, Maximilian Riesenhuber,
|
41
|
+
and Tomaso Poggio. “Robust Object Recognition with Cortex-like
|
42
|
+
Mechanisms.” IEEE Trans Pattern Anal Mach Intell 29, no. 3 (2007):
|
43
|
+
411–26. https://doi.org/10.1109/TPAMI.2007.56.
|
44
|
+
"""
|
45
|
+
import numpy as np
|
46
|
+
import torch
|
47
|
+
from scipy.io import loadmat
|
48
|
+
from torch import nn
|
49
|
+
|
50
|
+
|
51
|
+
def gabor_filter(size, wavelength, orientation):
|
52
|
+
"""Create a single gabor filter.
|
53
|
+
Parameters
|
54
|
+
----------
|
55
|
+
size : int
|
56
|
+
The size of the filter, measured in pixels. The filter is square, hence
|
57
|
+
only a single number (either width or height) needs to be specified.
|
58
|
+
wavelength : float
|
59
|
+
The wavelength of the grating in the filter, relative to the half the
|
60
|
+
size of the filter. For example, a wavelength of 2 will generate a
|
61
|
+
Gabor filter with a grating that contains exactly one wave. This
|
62
|
+
determines the "tightness" of the filter.
|
63
|
+
orientation : float
|
64
|
+
The orientation of the grating in the filter, in degrees.
|
65
|
+
Returns
|
66
|
+
-------
|
67
|
+
filt : ndarray, shape (size, size)
|
68
|
+
The filter weights.
|
69
|
+
"""
|
70
|
+
lambda_ = size * 2. / wavelength
|
71
|
+
sigma = lambda_ * 0.8
|
72
|
+
gamma = 0.3 # spatial aspect ratio: 0.23 < gamma < 0.92
|
73
|
+
theta = np.deg2rad(orientation + 90)
|
74
|
+
|
75
|
+
# Generate Gabor filter
|
76
|
+
x, y = np.mgrid[:size, :size] - (size // 2)
|
77
|
+
rotx = x * np.cos(theta) + y * np.sin(theta)
|
78
|
+
roty = -x * np.sin(theta) + y * np.cos(theta)
|
79
|
+
filt = np.exp(-(rotx ** 2 + gamma ** 2 * roty ** 2) / (2 * sigma ** 2))
|
80
|
+
filt *= np.cos(2 * np.pi * rotx / lambda_)
|
81
|
+
filt[np.sqrt(x ** 2 + y ** 2) > (size / 2)] = 0
|
82
|
+
|
83
|
+
# Normalize the filter
|
84
|
+
filt = filt - np.mean(filt)
|
85
|
+
filt = filt / np.sqrt(np.sum(filt ** 2))
|
86
|
+
|
87
|
+
return filt
|
88
|
+
|
89
|
+
|
90
|
+
class S1(nn.Module):
|
91
|
+
"""A layer of S1 units with different orientations but the same scale.
|
92
|
+
The S1 units are at the bottom of the network. They are exposed to the raw
|
93
|
+
pixel data of the image. Each S1 unit is a Gabor filter, which detects
|
94
|
+
edges in a certain orientation. They are implemented as PyTorch Conv2d
|
95
|
+
modules, where each channel is loaded with a Gabor filter in a specific
|
96
|
+
orientation.
|
97
|
+
Parameters
|
98
|
+
----------
|
99
|
+
size : int
|
100
|
+
The size of the filters, measured in pixels. The filters are square,
|
101
|
+
hence only a single number (either width or height) needs to be
|
102
|
+
specified.
|
103
|
+
wavelength : float
|
104
|
+
The wavelength of the grating in the filter, relative to the half the
|
105
|
+
size of the filter. For example, a wavelength of 2 will generate a
|
106
|
+
Gabor filter with a grating that contains exactly one wave. This
|
107
|
+
determines the "tightness" of the filter.
|
108
|
+
orientations : list of float
|
109
|
+
The orientations of the Gabor filters, in degrees.
|
110
|
+
"""
|
111
|
+
|
112
|
+
def __init__(self, size, wavelength, orientations=[90, -45, 0, 45]):
|
113
|
+
super().__init__()
|
114
|
+
self.num_orientations = len(orientations)
|
115
|
+
self.size = size
|
116
|
+
|
117
|
+
# Use PyTorch's Conv2d as a base object. Each "channel" will be an
|
118
|
+
# orientation.
|
119
|
+
self.gabor = nn.Conv2d(1, self.num_orientations, size,
|
120
|
+
padding=size // 2, bias=False)
|
121
|
+
|
122
|
+
# Fill the Conv2d filter weights with Gabor kernels: one for each
|
123
|
+
# orientation
|
124
|
+
for channel, orientation in enumerate(orientations):
|
125
|
+
self.gabor.weight.data[channel, 0] = torch.Tensor(
|
126
|
+
gabor_filter(size, wavelength, orientation))
|
127
|
+
|
128
|
+
# A convolution layer filled with ones. This is used to normalize the
|
129
|
+
# result in the forward method.
|
130
|
+
self.uniform = nn.Conv2d(1, 4, size, padding=size // 2, bias=False)
|
131
|
+
nn.init.constant_(self.uniform.weight, 1)
|
132
|
+
|
133
|
+
# Since everything is pre-computed, no gradient is required
|
134
|
+
for p in self.parameters():
|
135
|
+
p.requires_grad = False
|
136
|
+
|
137
|
+
def forward(self, img):
|
138
|
+
"""Apply Gabor filters, take absolute value, and normalize."""
|
139
|
+
s1_output = torch.abs(self.gabor(img))
|
140
|
+
norm = torch.sqrt(self.uniform(img ** 2))
|
141
|
+
norm.data[norm == 0] = 1 # To avoid divide by zero
|
142
|
+
s1_output /= norm
|
143
|
+
return s1_output
|
144
|
+
|
145
|
+
|
146
|
+
class C1(nn.Module):
|
147
|
+
"""A layer of C1 units with different orientations but the same scale.
|
148
|
+
Each C1 unit pools over the S1 units that are assigned to it.
|
149
|
+
Parameters
|
150
|
+
----------
|
151
|
+
size : int
|
152
|
+
Size of the MaxPool2d operation being performed by this C1 layer.
|
153
|
+
"""
|
154
|
+
|
155
|
+
def __init__(self, size):
|
156
|
+
super().__init__()
|
157
|
+
self.size = size
|
158
|
+
self.local_pool = nn.MaxPool2d(size, stride=size // 2,
|
159
|
+
padding=size // 2)
|
160
|
+
|
161
|
+
def forward(self, s1_outputs):
|
162
|
+
"""Max over scales, followed by a MaxPool2d operation."""
|
163
|
+
s1_outputs = torch.cat([out.unsqueeze(0) for out in s1_outputs], 0)
|
164
|
+
|
165
|
+
# Pool over all scales
|
166
|
+
s1_output, _ = torch.max(s1_outputs, dim=0)
|
167
|
+
|
168
|
+
# Pool over local (c1_space x c1_space) neighbourhood
|
169
|
+
return self.local_pool(s1_output)
|
170
|
+
|
171
|
+
|
172
|
+
class S2(nn.Module):
|
173
|
+
"""A layer of S2 units with different orientations but the same scale.
|
174
|
+
The activation of these units is computed by taking the distance between
|
175
|
+
the output of the C layer below and a set of predefined patches. This
|
176
|
+
distance is computed as:
|
177
|
+
d = sqrt( (w - p)^2 )
|
178
|
+
= sqrt( w^2 - 2pw + p^2 )
|
179
|
+
Parameters
|
180
|
+
----------
|
181
|
+
patches : ndarray, shape (n_patches, n_orientations, size, size)
|
182
|
+
The precomputed patches to lead into the weights of this layer.
|
183
|
+
activation : 'gaussian' | 'euclidean'
|
184
|
+
Which activation function to use for the units. In the PNAS paper, a
|
185
|
+
gaussian curve is used ('guassian', the default), whereas the MATLAB
|
186
|
+
implementation of The Laboratory for Computational Cognitive
|
187
|
+
Neuroscience uses the euclidean distance ('euclidean').
|
188
|
+
sigma : float
|
189
|
+
The sharpness of the tuning (sigma in eqn 1 of [1]_). Defaults to 1.
|
190
|
+
References:
|
191
|
+
-----------
|
192
|
+
.. [1] Serre, Thomas, Aude Oliva, and Tomaso Poggio. “A Feedforward
|
193
|
+
Architecture Accounts for Rapid Categorization.” Proceedings of the
|
194
|
+
National Academy of Sciences 104, no. 15 (April 10, 2007): 6424–29.
|
195
|
+
https://doi.org/10.1073/pnas.0700622104.
|
196
|
+
"""
|
197
|
+
|
198
|
+
def __init__(self, patches, activation='gaussian', sigma=1):
|
199
|
+
super().__init__()
|
200
|
+
self.activation = activation
|
201
|
+
self.sigma = sigma
|
202
|
+
|
203
|
+
num_patches, num_orientations, size, _ = patches.shape
|
204
|
+
|
205
|
+
# Main convolution layer
|
206
|
+
self.conv = nn.Conv2d(in_channels=num_orientations,
|
207
|
+
out_channels=num_orientations * num_patches,
|
208
|
+
kernel_size=size,
|
209
|
+
padding=size // 2,
|
210
|
+
groups=num_orientations,
|
211
|
+
bias=False)
|
212
|
+
self.conv.weight.data = torch.Tensor(
|
213
|
+
patches.transpose(1, 0, 2, 3).reshape(1600, 1, size, size))
|
214
|
+
|
215
|
+
# A convolution layer filled with ones. This is used for the distance
|
216
|
+
# computation
|
217
|
+
self.uniform = nn.Conv2d(1, 1, size, padding=size // 2, bias=False)
|
218
|
+
nn.init.constant_(self.uniform.weight, 1)
|
219
|
+
|
220
|
+
# This is also used for the distance computation
|
221
|
+
self.patches_sum_sq = nn.Parameter(
|
222
|
+
torch.Tensor((patches ** 2).sum(axis=(1, 2, 3))))
|
223
|
+
|
224
|
+
self.num_patches = num_patches
|
225
|
+
self.num_orientations = num_orientations
|
226
|
+
self.size = size
|
227
|
+
|
228
|
+
# No gradient required for this layer
|
229
|
+
for p in self.parameters():
|
230
|
+
p.requires_grad = False
|
231
|
+
|
232
|
+
def forward(self, c1_outputs):
|
233
|
+
max = 58
|
234
|
+
min = 22
|
235
|
+
s2_outputs = []
|
236
|
+
for c1_output in c1_outputs:
|
237
|
+
conv_output = self.conv(c1_output)
|
238
|
+
|
239
|
+
# Unstack the orientations
|
240
|
+
conv_output_size = conv_output.shape[3]
|
241
|
+
conv_output = conv_output.view(
|
242
|
+
-1, self.num_orientations, self.num_patches, conv_output_size,
|
243
|
+
conv_output_size)
|
244
|
+
|
245
|
+
# Pool over orientations
|
246
|
+
conv_output = conv_output.sum(dim=1)
|
247
|
+
|
248
|
+
# Compute distance
|
249
|
+
c1_sq = self.uniform(
|
250
|
+
torch.sum(c1_output ** 2, dim=1, keepdim=True))
|
251
|
+
dist = c1_sq - 2 * conv_output
|
252
|
+
dist += self.patches_sum_sq[None, :, None, None]
|
253
|
+
|
254
|
+
# Apply activation function
|
255
|
+
if self.activation == 'gaussian':
|
256
|
+
dist = torch.exp(- 1 / (2 * self.sigma ** 2) * dist)
|
257
|
+
elif self.activation == 'euclidean':
|
258
|
+
dist[dist < 0] = 0 # Negative values should never occur
|
259
|
+
torch.sqrt_(dist)
|
260
|
+
dist = -dist
|
261
|
+
else:
|
262
|
+
raise ValueError("activation parameter should be either "
|
263
|
+
"'gaussian' or 'euclidean'.")
|
264
|
+
|
265
|
+
s2_outputs.append(dist)
|
266
|
+
# s2_out = np.concatenate([np.atleast_2d(_m) for _m in s2_outputs], 0)
|
267
|
+
# return torch.Tensor(s2_out)
|
268
|
+
return s2_outputs
|
269
|
+
|
270
|
+
|
271
|
+
class C2(nn.Module):
|
272
|
+
"""A layer of C2 units operating on a layer of S2 units."""
|
273
|
+
|
274
|
+
def forward(self, s2_outputs):
|
275
|
+
"""Take the maximum value of the underlying S2 units."""
|
276
|
+
maxs = [s2.max(dim=3)[0] for s2 in s2_outputs]
|
277
|
+
maxs = [m.max(dim=2)[0] for m in maxs]
|
278
|
+
maxs = torch.cat([m[:, None, :] for m in maxs], 1)
|
279
|
+
return maxs.max(dim=1)[0]
|
280
|
+
|
281
|
+
|
282
|
+
class Output(nn.Module):
|
283
|
+
def forward(self, prev_out):
|
284
|
+
return prev_out
|
285
|
+
|
286
|
+
|
287
|
+
class HMAX(nn.Module):
|
288
|
+
"""The full HMAX model.
|
289
|
+
Use the `get_all_layers` method to obtain the activations for all layers.
|
290
|
+
If you are only interested in the final output (=C2 layer), use the model
|
291
|
+
as any other PyTorch module:
|
292
|
+
model = HMAX(universal_patch_set)
|
293
|
+
output = model(img)
|
294
|
+
Parameters
|
295
|
+
----------
|
296
|
+
universal_patch_set : str
|
297
|
+
Filename of the .mat file containing the universal patch set.
|
298
|
+
s2_act : 'gaussian' | 'euclidean'
|
299
|
+
The activation function for the S2 units. Defaults to 'gaussian'.
|
300
|
+
Returns
|
301
|
+
-------
|
302
|
+
c2_output : list of Tensors, shape (batch_size, num_patches)
|
303
|
+
For each scale, the output of the C2 units.
|
304
|
+
"""
|
305
|
+
|
306
|
+
def __init__(self, universal_patch_set, s2_act='gaussian'):
|
307
|
+
super().__init__()
|
308
|
+
|
309
|
+
# S1 layers, consisting of units with increasing size
|
310
|
+
self.s1_units = [
|
311
|
+
S1(size=7, wavelength=4),
|
312
|
+
S1(size=9, wavelength=3.95),
|
313
|
+
S1(size=11, wavelength=3.9),
|
314
|
+
S1(size=13, wavelength=3.85),
|
315
|
+
S1(size=15, wavelength=3.8),
|
316
|
+
S1(size=17, wavelength=3.75),
|
317
|
+
S1(size=19, wavelength=3.7),
|
318
|
+
S1(size=21, wavelength=3.65),
|
319
|
+
S1(size=23, wavelength=3.6),
|
320
|
+
S1(size=25, wavelength=3.55),
|
321
|
+
S1(size=27, wavelength=3.5),
|
322
|
+
S1(size=29, wavelength=3.45),
|
323
|
+
S1(size=31, wavelength=3.4),
|
324
|
+
S1(size=33, wavelength=3.35),
|
325
|
+
S1(size=35, wavelength=3.3),
|
326
|
+
S1(size=37, wavelength=3.25),
|
327
|
+
]
|
328
|
+
|
329
|
+
# Explicitly add the S1 units as submodules of the model
|
330
|
+
for s1 in self.s1_units:
|
331
|
+
self.add_module('s1_%02d' % s1.size, s1)
|
332
|
+
|
333
|
+
# Each C1 layer pools across two S1 layers
|
334
|
+
self.c1_units = [
|
335
|
+
C1(size=8),
|
336
|
+
C1(size=10),
|
337
|
+
C1(size=12),
|
338
|
+
C1(size=14),
|
339
|
+
C1(size=16),
|
340
|
+
C1(size=18),
|
341
|
+
C1(size=20),
|
342
|
+
C1(size=22),
|
343
|
+
]
|
344
|
+
|
345
|
+
# Explicitly add the C1 units as submodules of the model
|
346
|
+
for c1 in self.c1_units:
|
347
|
+
self.add_module('c1_%02d' % c1.size, c1)
|
348
|
+
|
349
|
+
# Read the universal patch set for the S2 layer
|
350
|
+
m = loadmat(universal_patch_set)
|
351
|
+
patches = [patch.reshape(shape[[2, 1, 0, 3]]).transpose(3, 0, 2, 1)
|
352
|
+
for patch, shape in zip(m['patches'][0], m['patchSizes'].T)]
|
353
|
+
|
354
|
+
# One S2 layer for each patch scale, operating on all C1 layers
|
355
|
+
self.s2_units = [S2(patches=scale_patches, activation=s2_act)
|
356
|
+
for scale_patches in patches]
|
357
|
+
|
358
|
+
# Explicitly add the S2 units as submodules of the model
|
359
|
+
for i, s2 in enumerate(self.s2_units):
|
360
|
+
self.add_module('s2_%d' % i, s2)
|
361
|
+
|
362
|
+
# One C2 layer operating on each scale
|
363
|
+
self.c2_units = [C2() for s2 in self.s2_units]
|
364
|
+
# self.outs = {}
|
365
|
+
for name in ['s1_out', 's2_out', 'c1_out', 'c2_out']:
|
366
|
+
out = Output()
|
367
|
+
# self.outs[name] = out
|
368
|
+
self.add_module(name, out)
|
369
|
+
|
370
|
+
# Explicitly add the C2 units as submodules of the model
|
371
|
+
for i, c2 in enumerate(self.c2_units):
|
372
|
+
self.add_module('c2_%d' % i, c2)
|
373
|
+
|
374
|
+
def run_all_layers(self, img):
|
375
|
+
"""Compute the activation for each layer.
|
376
|
+
Parameters
|
377
|
+
----------
|
378
|
+
img : Tensor, shape (batch_size, 1, height, width)
|
379
|
+
A batch of images to run through the model
|
380
|
+
Returns
|
381
|
+
-------
|
382
|
+
s1_outputs : List of Tensors, shape (batch_size, num_orientations, height, width)
|
383
|
+
For each scale, the output of the layer of S1 units.
|
384
|
+
c1_outputs : List of Tensors, shape (batch_size, num_orientations, height, width)
|
385
|
+
For each scale, the output of the layer of C1 units.
|
386
|
+
s2_outputs : List of lists of Tensors, shape (batch_size, num_patches, height, width)
|
387
|
+
For each C1 scale and each patch scale, the output of the layer of
|
388
|
+
S2 units.
|
389
|
+
c2_outputs : List of Tensors, shape (batch_size, num_patches)
|
390
|
+
For each patch scale, the output of the layer of C2 units.
|
391
|
+
"""
|
392
|
+
s1_outputs = [s1(img) for s1 in self.s1_units]
|
393
|
+
self.s1_out(s1_outputs)
|
394
|
+
# Each C1 layer pools across two S1 layers
|
395
|
+
c1_outputs = []
|
396
|
+
for c1, i in zip(self.c1_units, range(0, len(self.s1_units), 2)):
|
397
|
+
c1_outputs.append(c1(s1_outputs[i:i + 2]))
|
398
|
+
self.c1_out(c1_outputs)
|
399
|
+
s2_outputs = [s2(c1_outputs) for s2 in self.s2_units]
|
400
|
+
self.s2_out(s2_outputs)
|
401
|
+
c2_outputs = [c2(s2) for c2, s2 in zip(self.c2_units, s2_outputs)]
|
402
|
+
self.c2_out(c2_outputs)
|
403
|
+
|
404
|
+
return s1_outputs, c1_outputs, s2_outputs, c2_outputs
|
405
|
+
|
406
|
+
def forward(self, img):
|
407
|
+
"""Run through everything and concatenate the output of the C2s."""
|
408
|
+
img = img.mean(axis=1).reshape(img.shape[0], 1, img.shape[-1], img.shape[-1])
|
409
|
+
c2_outputs = self.run_all_layers(img)[-1]
|
410
|
+
c2_outputs = torch.cat(
|
411
|
+
[c2_out[:, None, :] for c2_out in c2_outputs], 1)
|
412
|
+
return c2_outputs
|
413
|
+
|
414
|
+
def get_all_layers(self, img):
|
415
|
+
"""Get the activation for all layers as NumPy arrays.
|
416
|
+
Parameters
|
417
|
+
----------
|
418
|
+
img : Tensor, shape (batch_size, 1, height, width)
|
419
|
+
A batch of images to run through the model
|
420
|
+
Returns
|
421
|
+
-------
|
422
|
+
s1_outputs : List of arrays, shape (batch_size, num_orientations, height, width)
|
423
|
+
For each scale, the output of the layer of S1 units.
|
424
|
+
c1_outputs : List of arrays, shape (batch_size, num_orientations, height, width)
|
425
|
+
For each scale, the output of the layer of C1 units.
|
426
|
+
s2_outputs : List of lists of arrays, shape (batch_size, num_patches, height, width)
|
427
|
+
For each C1 scale and each patch scale, the output of the layer of
|
428
|
+
S2 units.
|
429
|
+
c2_outputs : List of arrays, shape (batch_size, num_patches)
|
430
|
+
For each patch scale, the output of the layer of C2 units.
|
431
|
+
"""
|
432
|
+
s1_out, c1_out, s2_out, c2_out = self.run_all_layers(img)
|
433
|
+
return (
|
434
|
+
[s1.cpu().detach().numpy() for s1 in s1_out],
|
435
|
+
[c1.cpu().detach().numpy() for c1 in c1_out],
|
436
|
+
[[s2_.cpu().detach().numpy() for s2_ in s2] for s2 in s2_out],
|
437
|
+
[c2.cpu().detach().numpy() for c2 in c2_out],
|
438
|
+
)
|
@@ -0,0 +1,216 @@
|
|
1
|
+
import os
|
2
|
+
import logging
|
3
|
+
from collections import OrderedDict
|
4
|
+
|
5
|
+
import numpy as np
|
6
|
+
import torch
|
7
|
+
from PIL import Image
|
8
|
+
|
9
|
+
from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper
|
10
|
+
from brainscore_vision.model_helpers.utils import fullname
|
11
|
+
|
12
|
+
SUBMODULE_SEPARATOR = '.'
|
13
|
+
|
14
|
+
|
15
|
+
class PytorchWrapper:
|
16
|
+
def __init__(self, model, preprocessing, identifier=None, *args, **kwargs):
|
17
|
+
import torch
|
18
|
+
logger = logging.getLogger(fullname(self))
|
19
|
+
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
20
|
+
logger.debug(f"Using device {self._device}")
|
21
|
+
self._model = model
|
22
|
+
self._model = self._model.to(self._device)
|
23
|
+
identifier = identifier or model.__class__.__name__
|
24
|
+
self._extractor = self._build_extractor(
|
25
|
+
identifier=identifier, preprocessing=preprocessing, get_activations=self.get_activations, *args, **kwargs)
|
26
|
+
self._extractor.insert_attrs(self)
|
27
|
+
|
28
|
+
def _build_extractor(self, identifier, preprocessing, get_activations, *args, **kwargs):
|
29
|
+
return ActivationsExtractorHelper(
|
30
|
+
identifier=identifier, get_activations=get_activations, preprocessing=preprocessing,
|
31
|
+
*args, **kwargs)
|
32
|
+
|
33
|
+
@property
|
34
|
+
def identifier(self):
|
35
|
+
return self._extractor.identifier
|
36
|
+
|
37
|
+
@identifier.setter
|
38
|
+
def identifier(self, value):
|
39
|
+
self._extractor.identifier = value
|
40
|
+
|
41
|
+
def __call__(self, *args, **kwargs):
|
42
|
+
previous_value = os.getenv('RESULTCACHING_DISABLE', '')
|
43
|
+
os.environ['RESULTCACHING_DISABLE'] = 'model_tools.activations'
|
44
|
+
result = self._extractor(*args, **kwargs)
|
45
|
+
os.environ['RESULTCACHING_DISABLE'] = previous_value
|
46
|
+
return result
|
47
|
+
|
48
|
+
def get_activations(self, images, layer_names):
|
49
|
+
import torch
|
50
|
+
from torch.autograd import Variable
|
51
|
+
images = [torch.from_numpy(image) for image in images]
|
52
|
+
images = Variable(torch.stack(images))
|
53
|
+
images = images.to(self._device)
|
54
|
+
self._model.eval()
|
55
|
+
|
56
|
+
layer_results = OrderedDict()
|
57
|
+
hooks = []
|
58
|
+
|
59
|
+
for layer_name in layer_names:
|
60
|
+
layer = self.get_layer(layer_name)
|
61
|
+
hook = self.register_hook(layer, layer_name, target_dict=layer_results)
|
62
|
+
hooks.append(hook)
|
63
|
+
|
64
|
+
self._model(images)
|
65
|
+
for hook in hooks:
|
66
|
+
hook.remove()
|
67
|
+
return layer_results
|
68
|
+
|
69
|
+
def get_layer(self, layer_name):
|
70
|
+
if layer_name == 'logits':
|
71
|
+
return self._output_layer()
|
72
|
+
module = self._model
|
73
|
+
for part in layer_name.split(SUBMODULE_SEPARATOR):
|
74
|
+
module = module._modules.get(part)
|
75
|
+
assert module is not None, f"No submodule found for layer {layer_name}, at part {part}"
|
76
|
+
return module
|
77
|
+
|
78
|
+
def _output_layer(self):
|
79
|
+
module = self._model
|
80
|
+
while module._modules:
|
81
|
+
module = module._modules[next(reversed(module._modules))]
|
82
|
+
return module
|
83
|
+
|
84
|
+
@classmethod
|
85
|
+
def set_weights(cls, output, shape):
|
86
|
+
if len(shape) == 5:
|
87
|
+
return torch.cat([out[:shape[0],None, :shape[1], :shape[2], :shape[3], :shape[4]] for out in output], 1)
|
88
|
+
if len(shape) == 4:
|
89
|
+
return torch.cat([out[:shape[0],None, :shape[1], :shape[2], :shape[3]] for out in output], 1)
|
90
|
+
elif len(shape) == 3:
|
91
|
+
return torch.cat([out[:shape[0],None,:shape[1], :shape[2]] for out in output], 1)
|
92
|
+
elif len(shape) == 2:
|
93
|
+
return torch.cat([out[:shape[0],None ,:shape[1]] for out in output], 1)
|
94
|
+
elif len(shape) == 1:
|
95
|
+
return torch.cat([out[:shape[0], None] for out in output], 1)
|
96
|
+
else:
|
97
|
+
raise ValueError(f"Unexpected shape {shape}")
|
98
|
+
|
99
|
+
@classmethod
|
100
|
+
def _tensor_to_numpy(cls, output):
|
101
|
+
if isinstance(output, list):
|
102
|
+
zero = output[0]
|
103
|
+
if isinstance(zero, list):
|
104
|
+
length = len(zero[0].shape)
|
105
|
+
shape = np.full(length, 100000)
|
106
|
+
for li in output:
|
107
|
+
for item in li:
|
108
|
+
for j in range(len(item.shape)):
|
109
|
+
if shape[j] > item.shape[j]:
|
110
|
+
shape[j] = item.shape[j]
|
111
|
+
shape = shape.astype(int)
|
112
|
+
new_weights = []
|
113
|
+
for i in range(len(output)):
|
114
|
+
outer = output[i]
|
115
|
+
new_weights.append(cls.set_weights(outer, shape))
|
116
|
+
new_weights = cls.set_weights(new_weights, new_weights[0].shape)
|
117
|
+
if len(new_weights.shape) == 6:
|
118
|
+
new_weights = new_weights.reshape(new_weights.shape[0],new_weights.shape[1]*new_weights.shape[2]*new_weights.shape[3],new_weights.shape[4],new_weights.shape[5])
|
119
|
+
elif len(new_weights.shape) == 5:
|
120
|
+
new_weights = new_weights.reshape(new_weights.shape[0],new_weights.shape[1]*new_weights.shape[2],new_weights.shape[3],new_weights.shape[4])
|
121
|
+
elif len(new_weights.shape) == 4:
|
122
|
+
pass # ok already
|
123
|
+
elif len(new_weights.shape) == 3:
|
124
|
+
new_weights = new_weights.reshape(new_weights.shape[0],1,new_weights.shape[1],new_weights.shape[2])
|
125
|
+
elif len(new_weights.shape) == 2:
|
126
|
+
new_weights = new_weights.reshape(new_weights.shape[0],1,1,new_weights.shape[1])
|
127
|
+
else:
|
128
|
+
raise ValueError(f"Unexpected shape {new_weights.shape}")
|
129
|
+
return new_weights
|
130
|
+
else:
|
131
|
+
length = len(zero.shape)
|
132
|
+
shape = np.full(length, 10000)
|
133
|
+
for item in output:
|
134
|
+
for j in range(len(item.shape)):
|
135
|
+
if shape[j] > item.shape[j]:
|
136
|
+
shape[j] = item.shape[j]
|
137
|
+
shape = shape.astype(int)
|
138
|
+
new_weights = cls.set_weights(output, shape)
|
139
|
+
if len(new_weights.shape) > 4:
|
140
|
+
new_weights = new_weights.reshape(new_weights.shape[0],new_weights.shape[1]*new_weights.shape[2],new_weights.shape[3],new_weights.shape[4])
|
141
|
+
if len(new_weights.shape) == 3:
|
142
|
+
new_weights = new_weights.reshape(new_weights.shape[0],1,new_weights.shape[1],new_weights.shape[2])
|
143
|
+
if len(new_weights.shape) == 2:
|
144
|
+
new_weights = new_weights.reshape(new_weights.shape[0],1,1,new_weights.shape[1])
|
145
|
+
return new_weights
|
146
|
+
return output.cpu().data.numpy()
|
147
|
+
|
148
|
+
def register_hook(self, layer, layer_name, target_dict):
|
149
|
+
def hook_function(_layer, _input, output, name=layer_name):
|
150
|
+
target_dict[name] = PytorchWrapper._tensor_to_numpy(output)
|
151
|
+
|
152
|
+
hook = layer.register_forward_hook(hook_function)
|
153
|
+
return hook
|
154
|
+
|
155
|
+
def __repr__(self):
|
156
|
+
return repr(self._model)
|
157
|
+
|
158
|
+
def layers(self):
|
159
|
+
for name, module in self._model.named_modules():
|
160
|
+
if len(list(module.children())) > 0: # this module only holds other modules
|
161
|
+
continue
|
162
|
+
yield name, module
|
163
|
+
|
164
|
+
def graph(self):
|
165
|
+
import networkx as nx
|
166
|
+
g = nx.DiGraph()
|
167
|
+
for layer_name, layer in self.layers():
|
168
|
+
g.add_node(layer_name, object=layer, type=type(layer))
|
169
|
+
return g
|
170
|
+
|
171
|
+
|
172
|
+
def load_preprocess_images(image_filepaths, image_size, **kwargs):
|
173
|
+
images = load_images(image_filepaths)
|
174
|
+
images = preprocess_images(images, image_size=image_size, **kwargs)
|
175
|
+
return images
|
176
|
+
|
177
|
+
|
178
|
+
def load_images(image_filepaths):
|
179
|
+
return [load_image(image_filepath) for image_filepath in image_filepaths]
|
180
|
+
|
181
|
+
|
182
|
+
def load_image(image_filepath):
|
183
|
+
with Image.open(image_filepath) as pil_image:
|
184
|
+
if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper(): # not binary and not alpha
|
185
|
+
# work around to https://github.com/python-pillow/Pillow/issues/1144,
|
186
|
+
# see https://stackoverflow.com/a/30376272/2225200
|
187
|
+
return pil_image.copy()
|
188
|
+
else: # make sure potential binary images are in RGB
|
189
|
+
rgb_image = Image.new("RGB", pil_image.size)
|
190
|
+
rgb_image.paste(pil_image)
|
191
|
+
return rgb_image
|
192
|
+
|
193
|
+
|
194
|
+
def preprocess_images(images, image_size, **kwargs):
|
195
|
+
preprocess = torchvision_preprocess_input(image_size, **kwargs)
|
196
|
+
images = [preprocess(image) for image in images]
|
197
|
+
images = np.concatenate(images)
|
198
|
+
return images
|
199
|
+
|
200
|
+
|
201
|
+
def torchvision_preprocess_input(image_size, **kwargs):
|
202
|
+
from torchvision import transforms
|
203
|
+
return transforms.Compose([
|
204
|
+
transforms.Resize((image_size, image_size)),
|
205
|
+
torchvision_preprocess(**kwargs),
|
206
|
+
])
|
207
|
+
|
208
|
+
|
209
|
+
def torchvision_preprocess(normalize_mean=(0.485, 0.456, 0.406), normalize_std=(0.229, 0.224, 0.225)):
|
210
|
+
from torchvision import transforms
|
211
|
+
return transforms.Compose([
|
212
|
+
transforms.ToTensor(),
|
213
|
+
transforms.Normalize(mean=normalize_mean, std=normalize_std),
|
214
|
+
lambda img: img.unsqueeze(0)
|
215
|
+
])
|
216
|
+
|