brainscore-vision 2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/__init__.py +105 -0
- brainscore_vision/__main__.py +20 -0
- brainscore_vision/benchmark_helpers/__init__.py +67 -0
- brainscore_vision/benchmark_helpers/neural_common.py +70 -0
- brainscore_vision/benchmark_helpers/properties_common.py +424 -0
- brainscore_vision/benchmark_helpers/screen.py +126 -0
- brainscore_vision/benchmark_helpers/test_helper.py +160 -0
- brainscore_vision/benchmarks/README.md +7 -0
- brainscore_vision/benchmarks/__init__.py +122 -0
- brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
- brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
- brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
- brainscore_vision/benchmarks/baker2022/test.py +90 -0
- brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
- brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
- brainscore_vision/benchmarks/bmd2024/test.py +29 -0
- brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
- brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
- brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
- brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
- brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
- brainscore_vision/benchmarks/cadena2017/test.py +35 -0
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
- brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
- brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
- brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
- brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
- brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
- brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
- brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
- brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
- brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
- brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
- brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
- brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
- brainscore_vision/benchmarks/hebart2023/test.py +19 -0
- brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
- brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
- brainscore_vision/benchmarks/hermann2020/test.py +28 -0
- brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
- brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
- brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
- brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
- brainscore_vision/benchmarks/imagenet/test.py +32 -0
- brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
- brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
- brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
- brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
- brainscore_vision/benchmarks/islam2021/test.py +47 -0
- brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
- brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
- brainscore_vision/benchmarks/kar2019/test.py +93 -0
- brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
- brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
- brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
- brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
- brainscore_vision/benchmarks/malania2007/test.py +64 -0
- brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
- brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
- brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
- brainscore_vision/benchmarks/marques2020/test.py +135 -0
- brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
- brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
- brainscore_vision/benchmarks/objectnet/test.py +33 -0
- brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
- brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
- brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
- brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
- brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
- brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
- brainscore_vision/benchmarks/scialom2024/test.py +162 -0
- brainscore_vision/data/__init__.py +0 -0
- brainscore_vision/data/baker2022/__init__.py +40 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
- brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
- brainscore_vision/data/baker2022/test.py +135 -0
- brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
- brainscore_vision/data/barbumayo2019/__init__.py +23 -0
- brainscore_vision/data/barbumayo2019/test.py +10 -0
- brainscore_vision/data/bashivankar2019/__init__.py +52 -0
- brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
- brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
- brainscore_vision/data/bashivankar2019/test.py +15 -0
- brainscore_vision/data/bmd2024/__init__.py +69 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
- brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
- brainscore_vision/data/bmd2024/test.py +130 -0
- brainscore_vision/data/bracci2019/__init__.py +36 -0
- brainscore_vision/data/bracci2019/data_packaging.py +221 -0
- brainscore_vision/data/bracci2019/test.py +16 -0
- brainscore_vision/data/cadena2017/__init__.py +52 -0
- brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
- brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
- brainscore_vision/data/cadena2017/test.py +24 -0
- brainscore_vision/data/cichy2019/__init__.py +38 -0
- brainscore_vision/data/cichy2019/test.py +8 -0
- brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
- brainscore_vision/data/coggan2024_behavior/test.py +32 -0
- brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
- brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/data/david2004/__init__.py +34 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
- brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
- brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
- brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
- brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
- brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
- brainscore_vision/data/david2004/test.py +8 -0
- brainscore_vision/data/deng2009/__init__.py +22 -0
- brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
- brainscore_vision/data/deng2009/test.py +9 -0
- brainscore_vision/data/ferguson2024/__init__.py +401 -0
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
- brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
- brainscore_vision/data/ferguson2024/requirements.txt +2 -0
- brainscore_vision/data/ferguson2024/test.py +155 -0
- brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
- brainscore_vision/data/freemanziemba2013/test.py +97 -0
- brainscore_vision/data/geirhos2021/__init__.py +358 -0
- brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
- brainscore_vision/data/geirhos2021/test.py +330 -0
- brainscore_vision/data/hebart2023/__init__.py +23 -0
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
- brainscore_vision/data/hebart2023/test.py +42 -0
- brainscore_vision/data/hendrycks2019/__init__.py +45 -0
- brainscore_vision/data/hendrycks2019/test.py +26 -0
- brainscore_vision/data/igustibagus2024/__init__.py +23 -0
- brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
- brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
- brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
- brainscore_vision/data/igustibagus2024/test.py +26 -0
- brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
- brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
- brainscore_vision/data/imagenetslim15000/test.py +8 -0
- brainscore_vision/data/islam2021/__init__.py +18 -0
- brainscore_vision/data/islam2021/data_packaging.py +64 -0
- brainscore_vision/data/islam2021/test.py +11 -0
- brainscore_vision/data/kar2018/__init__.py +58 -0
- brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
- brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
- brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
- brainscore_vision/data/kar2018/test.py +10 -0
- brainscore_vision/data/kar2019/__init__.py +43 -0
- brainscore_vision/data/kar2019/data_packaging.py +116 -0
- brainscore_vision/data/kar2019/test.py +8 -0
- brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
- brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
- brainscore_vision/data/kuzovkin2018/test.py +8 -0
- brainscore_vision/data/majajhong2015/__init__.py +113 -0
- brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
- brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
- brainscore_vision/data/majajhong2015/test.py +38 -0
- brainscore_vision/data/malania2007/__init__.py +254 -0
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
- brainscore_vision/data/malania2007/test.py +147 -0
- brainscore_vision/data/maniquet2024/__init__.py +57 -0
- brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
- brainscore_vision/data/maniquet2024/test.py +16 -0
- brainscore_vision/data/marques2020/__init__.py +123 -0
- brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
- brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
- brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
- brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
- brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
- brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
- brainscore_vision/data/marques2020/test.py +54 -0
- brainscore_vision/data/rajalingham2018/__init__.py +56 -0
- brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
- brainscore_vision/data/rajalingham2018/test.py +10 -0
- brainscore_vision/data/rajalingham2020/__init__.py +39 -0
- brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
- brainscore_vision/data/rajalingham2020/test.py +8 -0
- brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
- brainscore_vision/data/rust2012/__init__.py +45 -0
- brainscore_vision/data/rust2012/rust305.py +35 -0
- brainscore_vision/data/rust2012/test.py +47 -0
- brainscore_vision/data/sanghavi2020/__init__.py +119 -0
- brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
- brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
- brainscore_vision/data/sanghavi2020/test.py +13 -0
- brainscore_vision/data/scialom2024/__init__.py +386 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
- brainscore_vision/data/scialom2024/test.py +301 -0
- brainscore_vision/data/seibert2019/__init__.py +25 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
- brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
- brainscore_vision/data/seibert2019/test.py +35 -0
- brainscore_vision/data/zhang2018/__init__.py +38 -0
- brainscore_vision/data/zhang2018/test.py +29 -0
- brainscore_vision/data_helpers/__init__.py +0 -0
- brainscore_vision/data_helpers/lookup_legacy.py +15 -0
- brainscore_vision/data_helpers/s3.py +79 -0
- brainscore_vision/metric_helpers/__init__.py +5 -0
- brainscore_vision/metric_helpers/temporal.py +119 -0
- brainscore_vision/metric_helpers/transformations.py +379 -0
- brainscore_vision/metric_helpers/utils.py +71 -0
- brainscore_vision/metric_helpers/xarray_utils.py +151 -0
- brainscore_vision/metrics/__init__.py +7 -0
- brainscore_vision/metrics/accuracy/__init__.py +4 -0
- brainscore_vision/metrics/accuracy/metric.py +16 -0
- brainscore_vision/metrics/accuracy/test.py +11 -0
- brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
- brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
- brainscore_vision/metrics/accuracy_distance/test.py +57 -0
- brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
- brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
- brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
- brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
- brainscore_vision/metrics/cka/__init__.py +14 -0
- brainscore_vision/metrics/cka/metric.py +105 -0
- brainscore_vision/metrics/cka/test.py +28 -0
- brainscore_vision/metrics/dimensionality/__init__.py +13 -0
- brainscore_vision/metrics/dimensionality/metric.py +45 -0
- brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
- brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
- brainscore_vision/metrics/distribution_similarity/test.py +10 -0
- brainscore_vision/metrics/error_consistency/__init__.py +13 -0
- brainscore_vision/metrics/error_consistency/metric.py +93 -0
- brainscore_vision/metrics/error_consistency/test.py +39 -0
- brainscore_vision/metrics/i1i2/__init__.py +16 -0
- brainscore_vision/metrics/i1i2/metric.py +299 -0
- brainscore_vision/metrics/i1i2/requirements.txt +2 -0
- brainscore_vision/metrics/i1i2/test.py +36 -0
- brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
- brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
- brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
- brainscore_vision/metrics/internal_consistency/test.py +39 -0
- brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
- brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
- brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
- brainscore_vision/metrics/mask_regression/__init__.py +16 -0
- brainscore_vision/metrics/mask_regression/metric.py +242 -0
- brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
- brainscore_vision/metrics/mask_regression/test.py +0 -0
- brainscore_vision/metrics/ost/__init__.py +23 -0
- brainscore_vision/metrics/ost/metric.py +350 -0
- brainscore_vision/metrics/ost/requirements.txt +2 -0
- brainscore_vision/metrics/ost/test.py +0 -0
- brainscore_vision/metrics/rdm/__init__.py +14 -0
- brainscore_vision/metrics/rdm/metric.py +101 -0
- brainscore_vision/metrics/rdm/requirements.txt +2 -0
- brainscore_vision/metrics/rdm/test.py +63 -0
- brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
- brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
- brainscore_vision/metrics/regression_correlation/metric.py +125 -0
- brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
- brainscore_vision/metrics/regression_correlation/test.py +36 -0
- brainscore_vision/metrics/threshold/__init__.py +5 -0
- brainscore_vision/metrics/threshold/metric.py +481 -0
- brainscore_vision/metrics/threshold/test.py +71 -0
- brainscore_vision/metrics/value_delta/__init__.py +4 -0
- brainscore_vision/metrics/value_delta/metric.py +30 -0
- brainscore_vision/metrics/value_delta/requirements.txt +1 -0
- brainscore_vision/metrics/value_delta/test.py +40 -0
- brainscore_vision/model_helpers/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/__init__.py +1 -0
- brainscore_vision/model_helpers/activations/core.py +635 -0
- brainscore_vision/model_helpers/activations/pca.py +117 -0
- brainscore_vision/model_helpers/activations/pytorch.py +152 -0
- brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
- brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
- brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
- brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
- brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
- brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
- brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
- brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
- brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
- brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
- brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
- brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
- brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
- brainscore_vision/model_helpers/conftest.py +3 -0
- brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
- brainscore_vision/model_helpers/s3.py +62 -0
- brainscore_vision/model_helpers/utils/__init__.py +15 -0
- brainscore_vision/model_helpers/utils/s3.py +42 -0
- brainscore_vision/model_interface.py +214 -0
- brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
- brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
- brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
- brainscore_vision/models/AlexNet_SIN/model.py +29 -0
- brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
- brainscore_vision/models/AlexNet_SIN/test.py +1 -0
- brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
- brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
- brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
- brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
- brainscore_vision/models/__init__.py +0 -0
- brainscore_vision/models/alexnet/__init__.py +8 -0
- brainscore_vision/models/alexnet/model.py +28 -0
- brainscore_vision/models/alexnet/requirements.txt +2 -0
- brainscore_vision/models/alexnet/test.py +15 -0
- brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
- brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
- brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
- brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
- brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
- brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
- brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
- brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
- brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
- brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
- brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
- brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
- brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
- brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
- brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
- brainscore_vision/models/alexnet_testing/__init__.py +8 -0
- brainscore_vision/models/alexnet_testing/model.py +28 -0
- brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
- brainscore_vision/models/alexnet_testing/setup.py +24 -0
- brainscore_vision/models/alexnet_testing/test.py +15 -0
- brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
- brainscore_vision/models/antialias_resnet152/model.py +35 -0
- brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
- brainscore_vision/models/antialias_resnet152/test.py +8 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
- brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/clip/__init__.py +5 -0
- brainscore_vision/models/clip/model.py +179 -0
- brainscore_vision/models/clip/requirements.txt +4 -0
- brainscore_vision/models/clip/test.py +1 -0
- brainscore_vision/models/clipvision/__init__.py +5 -0
- brainscore_vision/models/clipvision/model.py +179 -0
- brainscore_vision/models/clipvision/requirements.txt +4 -0
- brainscore_vision/models/clipvision/test.py +1 -0
- brainscore_vision/models/cornet_s/__init__.py +8 -0
- brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
- brainscore_vision/models/cornet_s/model.py +77 -0
- brainscore_vision/models/cornet_s/requirements.txt +7 -0
- brainscore_vision/models/cornet_s/test.py +8 -0
- brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
- brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
- brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
- brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
- brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
- brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
- brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
- brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
- brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
- brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
- brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
- brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
- brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
- brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
- brainscore_vision/models/effnetb1_272x240/model.py +126 -0
- brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
- brainscore_vision/models/effnetb1_272x240/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
- brainscore_vision/models/hmax/__init__.py +7 -0
- brainscore_vision/models/hmax/helpers/hmax.py +438 -0
- brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
- brainscore_vision/models/hmax/model.py +69 -0
- brainscore_vision/models/hmax/requirements.txt +5 -0
- brainscore_vision/models/hmax/test.py +8 -0
- brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
- brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
- brainscore_vision/models/mobilevit_small/__init__.py +7 -0
- brainscore_vision/models/mobilevit_small/model.py +49 -0
- brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
- brainscore_vision/models/mobilevit_small/test.py +8 -0
- brainscore_vision/models/pixels/__init__.py +8 -0
- brainscore_vision/models/pixels/model.py +35 -0
- brainscore_vision/models/pixels/test.py +15 -0
- brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
- brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
- brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r50_tvpt/__init__.py +9 -0
- brainscore_vision/models/r50_tvpt/model.py +47 -0
- brainscore_vision/models/r50_tvpt/setup.py +24 -0
- brainscore_vision/models/r50_tvpt/test.py +1 -0
- brainscore_vision/models/regnet/__init__.py +14 -0
- brainscore_vision/models/regnet/model.py +17 -0
- brainscore_vision/models/regnet/requirements.txt +2 -0
- brainscore_vision/models/regnet/test.py +17 -0
- brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
- brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
- brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
- brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
- brainscore_vision/models/resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/resnet50_julios/model.py +54 -0
- brainscore_vision/models/resnet50_julios/setup.py +24 -0
- brainscore_vision/models/resnet50_julios/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
- brainscore_vision/models/resnet50_tutorial/model.py +34 -0
- brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
- brainscore_vision/models/resnet50_tutorial/test.py +8 -0
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
- brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
- brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/resnet_50_robust/model.py +55 -0
- brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
- brainscore_vision/models/resnet_50_robust/test.py +8 -0
- brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
- brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
- brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
- brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
- brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
- brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
- brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
- brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
- brainscore_vision/models/temporal_model_GDT/model.py +72 -0
- brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_GDT/test.py +17 -0
- brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
- brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
- brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
- brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
- brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
- brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
- brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
- brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
- brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
- brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
- brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
- brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
- brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
- brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
- brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
- brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
- brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
- brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
- brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
- brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
- brainscore_vision/models/temporal_model_openstl/model.py +206 -0
- brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_openstl/test.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
- brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
- brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
- brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
- brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
- brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
- brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
- brainscore_vision/submission/__init__.py +0 -0
- brainscore_vision/submission/actions_helpers.py +153 -0
- brainscore_vision/submission/config.py +7 -0
- brainscore_vision/submission/endpoints.py +58 -0
- brainscore_vision/utils/__init__.py +91 -0
- brainscore_vision-2.1.dist-info/LICENSE +11 -0
- brainscore_vision-2.1.dist-info/METADATA +152 -0
- brainscore_vision-2.1.dist-info/RECORD +1009 -0
- brainscore_vision-2.1.dist-info/WHEEL +5 -0
- brainscore_vision-2.1.dist-info/top_level.txt +4 -0
- docs/Makefile +20 -0
- docs/source/conf.py +78 -0
- docs/source/index.rst +21 -0
- docs/source/modules/api_reference.rst +10 -0
- docs/source/modules/benchmarks.rst +8 -0
- docs/source/modules/brainscore_submission.png +0 -0
- docs/source/modules/developer_clarifications.rst +36 -0
- docs/source/modules/metrics.rst +8 -0
- docs/source/modules/model_interface.rst +8 -0
- docs/source/modules/submission.rst +112 -0
- docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
- docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
- docs/source/modules/tutorial_screenshots/init_py.png +0 -0
- docs/source/modules/tutorial_screenshots/mms.png +0 -0
- docs/source/modules/tutorial_screenshots/setup.png +0 -0
- docs/source/modules/tutorial_screenshots/sms.png +0 -0
- docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
- docs/source/modules/utils.rst +22 -0
- migrations/2020-12-20_pkl_to_nc.py +90 -0
- tests/__init__.py +6 -0
- tests/conftest.py +26 -0
- tests/test_benchmark_helpers/__init__.py +0 -0
- tests/test_benchmark_helpers/test_screen.py +75 -0
- tests/test_examples.py +41 -0
- tests/test_integration.py +43 -0
- tests/test_metric_helpers/__init__.py +0 -0
- tests/test_metric_helpers/test_temporal.py +80 -0
- tests/test_metric_helpers/test_transformations.py +171 -0
- tests/test_metric_helpers/test_xarray_utils.py +85 -0
- tests/test_model_helpers/__init__.py +6 -0
- tests/test_model_helpers/activations/__init__.py +0 -0
- tests/test_model_helpers/activations/test___init__.py +404 -0
- tests/test_model_helpers/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
- tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
- tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
- tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
- tests/test_model_helpers/temporal/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
- tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
- tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
- tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
- tests/test_model_helpers/temporal/test_utils.py +61 -0
- tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
- tests/test_model_helpers/test_imports.py +10 -0
- tests/test_model_helpers/test_s3.py +38 -0
- tests/test_models.py +15 -0
- tests/test_stimuli.py +0 -0
- tests/test_submission/__init__.py +0 -0
- tests/test_submission/mock_config.py +3 -0
- tests/test_submission/test_actions_helpers.py +67 -0
- tests/test_submission/test_db.py +54 -0
- tests/test_submission/test_endpoints.py +125 -0
- tests/test_utils.py +21 -0
@@ -0,0 +1,105 @@
|
|
1
|
+
|
2
|
+
# VOneNet: CNNs with a Primary Visual Cortex Front-End
|
3
|
+
|
4
|
+
A family of biologically-inspired Convolutional Neural Networks (CNNs). VOneNets have the following features:
|
5
|
+
- Fixed-weight neural network model of the primate primary visual cortex (V1) as the front-end.
|
6
|
+
- Robust to image perturbations
|
7
|
+
- Brain-mapped
|
8
|
+
- Flexible: can be adapted to different back-end architectures
|
9
|
+
|
10
|
+
[read more...](#longer-motivation)
|
11
|
+
|
12
|
+
## Available Models
|
13
|
+
*(Click on model names to download the weights of ImageNet-trained models. Alternatively, you can use the function get_model in the vonenet package to download the weights.)*
|
14
|
+
|
15
|
+
| Name | Description |
|
16
|
+
| -------- | ------------------------------------------------------------------------ |
|
17
|
+
| [VOneResNet50](https://vonenet-models.s3.us-east-2.amazonaws.com/voneresnet50_e70.pth.tar) | Our best performing VOneNet with a ResNet50 back-end |
|
18
|
+
| [VOneCORnet-S](https://vonenet-models.s3.us-east-2.amazonaws.com/vonecornets_e70.pth.tar) | VOneNet with a recurrent neural network back-end based on the CORnet-S |
|
19
|
+
| [VOneAlexNet](https://vonenet-models.s3.us-east-2.amazonaws.com/vonealexnet_e70.pth.tar) | VOneNet with a back-end based on AlexNet |
|
20
|
+
|
21
|
+
|
22
|
+
## Quick Start
|
23
|
+
|
24
|
+
VOneNets was trained with images normalized with mean=[0.5,0.5,0.5] and std=[0.5,0.5,0.5]
|
25
|
+
|
26
|
+
More information coming soon...
|
27
|
+
|
28
|
+
|
29
|
+
## Longer Motivation
|
30
|
+
|
31
|
+
Current state-of-the-art object recognition models are largely based on convolutional neural network (CNN) architectures, which are loosely inspired by the primate visual system. However, these CNNs can be fooled by imperceptibly small, explicitly crafted perturbations, and struggle to recognize objects in corrupted images that are easily recognized by humans. Recently, we observed that CNN models with a neural hidden layer that better matches primate primary visual cortex (V1) are also more robust to adversarial attacks. Inspired by this observation, we developed VOneNets, a new class of hybrid CNN vision models. Each VOneNet contains a fixed weight neural network front-end that simulates primate V1, called the VOneBlock, followed by a neural network back-end adapted from current CNN vision models. The VOneBlock is based on a classical neuroscientific model of V1: the linear-nonlinear-Poisson model, consisting of a biologically-constrained Gabor filter bank, simple and complex cell nonlinearities, and a V1 neuronal stochasticity generator. After training, VOneNets retain high ImageNet performance, but each is substantially more robust, outperforming the base CNNs and state-of-the-art methods by 18% and 3%, respectively, on a conglomerate benchmark of perturbations comprised of white box adversarial attacks and common image corruptions. Additionally, all components of the VOneBlock work in synergy to improve robustness.
|
32
|
+
Read more: [Dapello\*, Marques\*, et al. (biorxiv, 2020)](https://doi.org/10.1101/2020.06.16.154542)
|
33
|
+
|
34
|
+
|
35
|
+
|
36
|
+
## Requirements
|
37
|
+
|
38
|
+
- Python 3.6+
|
39
|
+
- PyTorch 0.4.1+
|
40
|
+
- numpy
|
41
|
+
- pandas
|
42
|
+
- tqdm
|
43
|
+
- scipy
|
44
|
+
|
45
|
+
|
46
|
+
## Citation
|
47
|
+
|
48
|
+
Dapello, J., Marques, T., Schrimpf, M., Geiger, F., Cox, D.D., DiCarlo, J.J. (2020) Simulating a Primary Visual Cortex at the Front of CNNs Improves Robustness to Image Perturbations. *biorxiv.* doi.org/10.1101/2020.06.16.154542
|
49
|
+
|
50
|
+
|
51
|
+
## License
|
52
|
+
|
53
|
+
GNU GPL 3+
|
54
|
+
|
55
|
+
|
56
|
+
## FAQ
|
57
|
+
|
58
|
+
Soon...
|
59
|
+
|
60
|
+
## Setup and Run
|
61
|
+
|
62
|
+
1. You need to clone it in your local repository
|
63
|
+
$ git clone https://github.com/dicarlolab/vonenet.git
|
64
|
+
|
65
|
+
2. And when you setup its codes, you must need 'val' directory. so here is link.
|
66
|
+
this link is from Korean's blog I refered as below https://seongkyun.github.io/others/2019/03/06/imagenet_dn/
|
67
|
+
|
68
|
+
** Download link**
|
69
|
+
https://academictorrents.com/collection/imagenet-2012
|
70
|
+
|
71
|
+
Once you download that large tar files, you must unzip that files
|
72
|
+
-- all instructions below are refered above link, I only translate it
|
73
|
+
|
74
|
+
# Unzip training dataset
|
75
|
+
$ mkdir train && mb ILSVRC2012_img_train.tar train/ && cd train
|
76
|
+
$ tar -xvf ILSVRC2012_img_train.tar
|
77
|
+
$ rm -f ILSVRC2012_img_train.tar (If you want to remove zipped file(tar))
|
78
|
+
$ find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done
|
79
|
+
$ cd ..
|
80
|
+
|
81
|
+
# Unzip validation dataset
|
82
|
+
$ mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar
|
83
|
+
$ wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash
|
84
|
+
|
85
|
+
when it's finished, you can see train directory, val directory
|
86
|
+
that 'val' directory is needed when setting up
|
87
|
+
|
88
|
+
## Caution!!!!
|
89
|
+
after all execution above, must remove directory or file not having name n0000
|
90
|
+
-> there will be fault in training
|
91
|
+
-> ex) 'ILSVRC2012_img_train' in train directory, 'ILSVRC2012_img_val.tar' in val directory
|
92
|
+
|
93
|
+
3. if you've done getting data, then we can setting up
|
94
|
+
go to local repository which into you cloned and open terminal (you must check your versions of python, pytorch, cudatoolkit if okay then,)
|
95
|
+
$ python3 setup.py install
|
96
|
+
$ python3 run.py --in_path {directory including above dataset, 'val' directory must be in!}
|
97
|
+
|
98
|
+
# If you see any GPU related problem especially 'GPU is not available' although you already got
|
99
|
+
$ python3 run.py --in_path {directory including above dataset, 'val' directory must be in!} --ngpus 0
|
100
|
+
|
101
|
+
ngpus is 1 as default. if you don't care running on CPU you do so
|
102
|
+
|
103
|
+
|
104
|
+
|
105
|
+
|
@@ -0,0 +1,136 @@
|
|
1
|
+
|
2
|
+
import os, argparse, time, subprocess, io, shlex
|
3
|
+
import pandas as pd
|
4
|
+
import tqdm
|
5
|
+
|
6
|
+
parser = argparse.ArgumentParser(description='ImageNet Validation')
|
7
|
+
|
8
|
+
parser.add_argument('--in_path', required=True,
|
9
|
+
help='path to ImageNet folder that contains val folder')
|
10
|
+
parser.add_argument('--batch_size', default=128, type=int,
|
11
|
+
help='size of batch for validation')
|
12
|
+
parser.add_argument('--workers', default=20,
|
13
|
+
help='number of data loading workers')
|
14
|
+
parser.add_argument('--ngpus', default=1, type=int,
|
15
|
+
help='number of GPUs to use; 0 if you want to run on CPU')
|
16
|
+
parser.add_argument('--model_arch', choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'], default='resnet50',
|
17
|
+
help='back-end model architecture to load')
|
18
|
+
|
19
|
+
FLAGS, FIRE_FLAGS = parser.parse_known_args()
|
20
|
+
|
21
|
+
|
22
|
+
def set_gpus(n=2):
|
23
|
+
"""
|
24
|
+
Finds all GPUs on the system and restricts to n of them that have the most
|
25
|
+
free memory.
|
26
|
+
"""
|
27
|
+
if n > 0:
|
28
|
+
gpus = subprocess.run(shlex.split(
|
29
|
+
'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True,
|
30
|
+
stdout=subprocess.PIPE).stdout
|
31
|
+
gpus = pd.read_csv(io.BytesIO(gpus), sep=', ', engine='python')
|
32
|
+
gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB
|
33
|
+
if os.environ.get('CUDA_VISIBLE_DEVICES') is not None:
|
34
|
+
visible = [int(i)
|
35
|
+
for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
|
36
|
+
gpus = gpus[gpus['index'].isin(visible)]
|
37
|
+
gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False)
|
38
|
+
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi
|
39
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
|
40
|
+
[str(i) for i in gpus['index'].iloc[:n]])
|
41
|
+
else:
|
42
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
43
|
+
|
44
|
+
|
45
|
+
set_gpus(FLAGS.ngpus)
|
46
|
+
|
47
|
+
import torch
|
48
|
+
import torch.nn as nn
|
49
|
+
import torchvision
|
50
|
+
from vonenet import get_model
|
51
|
+
|
52
|
+
device = torch.device("cuda" if FLAGS.ngpus > 0 else "cpu")
|
53
|
+
|
54
|
+
|
55
|
+
def val():
|
56
|
+
model = get_model(model_arch=FLAGS.model_arch, pretrained=True)
|
57
|
+
|
58
|
+
if FLAGS.ngpus == 0:
|
59
|
+
print('Running on CPU')
|
60
|
+
if FLAGS.ngpus > 0 and torch.cuda.device_count() > 1:
|
61
|
+
print('Running on multiple GPUs')
|
62
|
+
model = model.to(device)
|
63
|
+
elif FLAGS.ngpus > 0 and torch.cuda.device_count() is 1:
|
64
|
+
print('Running on single GPU')
|
65
|
+
model = model.to(device)
|
66
|
+
else:
|
67
|
+
print('No GPU detected!')
|
68
|
+
model = model.module
|
69
|
+
|
70
|
+
validator = ImageNetVal(model)
|
71
|
+
record = validator()
|
72
|
+
|
73
|
+
print(record['top1'])
|
74
|
+
print(record['top5'])
|
75
|
+
return
|
76
|
+
|
77
|
+
|
78
|
+
class ImageNetVal(object):
|
79
|
+
|
80
|
+
def __init__(self, model):
|
81
|
+
self.name = 'val'
|
82
|
+
self.model = model
|
83
|
+
self.data_loader = self.data()
|
84
|
+
self.loss = nn.CrossEntropyLoss(size_average=False)
|
85
|
+
self.loss = self.loss.to(device)
|
86
|
+
|
87
|
+
def data(self):
|
88
|
+
dataset = torchvision.datasets.ImageFolder(
|
89
|
+
os.path.join(FLAGS.in_path, 'val'),
|
90
|
+
torchvision.transforms.Compose([
|
91
|
+
torchvision.transforms.Resize(256),
|
92
|
+
torchvision.transforms.CenterCrop(224),
|
93
|
+
torchvision.transforms.ToTensor(),
|
94
|
+
torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5],
|
95
|
+
std=[0.5, 0.5, 0.5]),
|
96
|
+
]))
|
97
|
+
data_loader = torch.utils.data.DataLoader(dataset,
|
98
|
+
batch_size=FLAGS.batch_size,
|
99
|
+
shuffle=False,
|
100
|
+
num_workers=FLAGS.workers,
|
101
|
+
pin_memory=True)
|
102
|
+
|
103
|
+
return data_loader
|
104
|
+
|
105
|
+
def __call__(self):
|
106
|
+
self.model.eval()
|
107
|
+
start = time.time()
|
108
|
+
record = {'loss': 0, 'top1': 0, 'top5': 0}
|
109
|
+
with torch.no_grad():
|
110
|
+
for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name):
|
111
|
+
target = target.to(device)
|
112
|
+
output = self.model(inp)
|
113
|
+
|
114
|
+
record['loss'] += self.loss(output, target).item()
|
115
|
+
p1, p5 = accuracy(output, target, topk=(1, 5))
|
116
|
+
record['top1'] += p1
|
117
|
+
record['top5'] += p5
|
118
|
+
|
119
|
+
for key in record:
|
120
|
+
record[key] /= len(self.data_loader.dataset.samples)
|
121
|
+
record['dur'] = (time.time() - start) / len(self.data_loader)
|
122
|
+
|
123
|
+
return record
|
124
|
+
|
125
|
+
|
126
|
+
def accuracy(output, target, topk=(1,)):
|
127
|
+
with torch.no_grad():
|
128
|
+
_, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
|
129
|
+
pred = pred.t()
|
130
|
+
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
131
|
+
res = [correct[:k].sum().item() for k in topk]
|
132
|
+
return res
|
133
|
+
|
134
|
+
|
135
|
+
if __name__ == '__main__':
|
136
|
+
val()
|
@@ -0,0 +1,41 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup
|
5
|
+
|
6
|
+
with open('README.md') as readme_file:
|
7
|
+
readme = readme_file.read()
|
8
|
+
|
9
|
+
requirements = [
|
10
|
+
"torch>=0.4.0+",
|
11
|
+
"torchvision",
|
12
|
+
"numpy",
|
13
|
+
"pandas",
|
14
|
+
"scipy",
|
15
|
+
"tqdm",
|
16
|
+
"fire",
|
17
|
+
"requests",
|
18
|
+
]
|
19
|
+
|
20
|
+
setup(
|
21
|
+
name='vonenet',
|
22
|
+
version='0.1.0',
|
23
|
+
description="CNNs with a Primary Visual Cortex Front-End ",
|
24
|
+
long_description=readme,
|
25
|
+
author="Tiago Marques, Joel Dapello",
|
26
|
+
author_email='tmarques@mit.edu, dapello@mit.edu',
|
27
|
+
url='https://github.com/dicarlolab/vonenet',
|
28
|
+
packages=['vonenet'],
|
29
|
+
include_package_data=True,
|
30
|
+
install_requires=requirements,
|
31
|
+
license="GNU GPL v3",
|
32
|
+
zip_safe=False,
|
33
|
+
keywords='VOneNet, Robustness, Primary Visual Cortex',
|
34
|
+
classifiers=[
|
35
|
+
'Development Status :: 5 - Production/Stable',
|
36
|
+
'Intended Audience :: Developers',
|
37
|
+
'License :: OSI Approved :: GNU GPL v3',
|
38
|
+
'Natural Language :: English',
|
39
|
+
'Programming Language :: Python :: 3.6'
|
40
|
+
],
|
41
|
+
)
|
@@ -0,0 +1,383 @@
|
|
1
|
+
|
2
|
+
import os, argparse, time, subprocess, io, shlex, pickle, pprint
|
3
|
+
import pandas as pd
|
4
|
+
import numpy as np
|
5
|
+
import tqdm
|
6
|
+
import fire
|
7
|
+
|
8
|
+
parser = argparse.ArgumentParser(description='ImageNet Training')
|
9
|
+
## General parameters
|
10
|
+
parser.add_argument('--in_path', required=True,
|
11
|
+
help='path to ImageNet folder that contains train and val folders')
|
12
|
+
parser.add_argument('-o', '--output_path', default=None,
|
13
|
+
help='path for storing ')
|
14
|
+
parser.add_argument('-restore_epoch', '--restore_epoch', default=0, type=int,
|
15
|
+
help='epoch number for restoring model training ')
|
16
|
+
parser.add_argument('-restore_path', '--restore_path', default=None, type=str,
|
17
|
+
help='path of folder containing specific epoch file for restoring model training')
|
18
|
+
|
19
|
+
## Training parameters
|
20
|
+
parser.add_argument('--ngpus', default=0, type=int,
|
21
|
+
help='number of GPUs to use; 0 if you want to run on CPU')
|
22
|
+
parser.add_argument('-j', '--workers', default=20, type=int,
|
23
|
+
help='number of data loading workers')
|
24
|
+
parser.add_argument('--epochs', default=70, type=int,
|
25
|
+
help='number of total epochs to run')
|
26
|
+
parser.add_argument('--batch_size', default=256, type=int,
|
27
|
+
help='mini-batch size')
|
28
|
+
parser.add_argument('--optimizer', choices=['stepLR', 'plateauLR'], default='stepLR',
|
29
|
+
help='Optimizer')
|
30
|
+
parser.add_argument('--lr', '--learning_rate', default=.1, type=float,
|
31
|
+
help='initial learning rate')
|
32
|
+
parser.add_argument('--step_size', default=20, type=int,
|
33
|
+
help='after how many epochs learning rate should be decreased by step_factor')
|
34
|
+
parser.add_argument('--step_factor', default=0.1, type=float,
|
35
|
+
help='factor by which to decrease the learning rate')
|
36
|
+
parser.add_argument('--momentum', default=.9, type=float, help='momentum')
|
37
|
+
parser.add_argument('--weight_decay', default=1e-4, type=float,
|
38
|
+
help='weight decay ')
|
39
|
+
|
40
|
+
## Model parameters
|
41
|
+
parser.add_argument('--torch_seed', default=0, type=int,
|
42
|
+
help='seed for weights initializations and torch RNG')
|
43
|
+
parser.add_argument('--model_arch', choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'], default='resnet50',
|
44
|
+
help='back-end model architecture to load')
|
45
|
+
parser.add_argument('--normalization', choices=['vonenet', 'imagenet'], default='vonenet',
|
46
|
+
help='image normalization to apply to models')
|
47
|
+
parser.add_argument('--visual_degrees', default=8, type=float,
|
48
|
+
help='Field-of-View of the model in visual degrees')
|
49
|
+
|
50
|
+
## VOneBlock parameters
|
51
|
+
# Gabor filter bank
|
52
|
+
parser.add_argument('--stride', default=4, type=int,
|
53
|
+
help='stride for the first convolution (Gabor Filter Bank)')
|
54
|
+
parser.add_argument('--ksize', default=25, type=int,
|
55
|
+
help='kernel size for the first convolution (Gabor Filter Bank)')
|
56
|
+
parser.add_argument('--simple_channels', default=256, type=int,
|
57
|
+
help='number of simple channels in V1 block')
|
58
|
+
parser.add_argument('--complex_channels', default=256, type=int,
|
59
|
+
help='number of complex channels in V1 block')
|
60
|
+
parser.add_argument('--gabor_seed', default=0, type=int,
|
61
|
+
help='seed for gabor initialization')
|
62
|
+
parser.add_argument('--sf_corr', default=0.75, type=float,
|
63
|
+
help='')
|
64
|
+
parser.add_argument('--sf_max', default=6, type=float,
|
65
|
+
help='')
|
66
|
+
parser.add_argument('--sf_min', default=0, type=float,
|
67
|
+
help='')
|
68
|
+
parser.add_argument('--rand_param', choices=[True, False], default=False, type=bool,
|
69
|
+
help='random gabor params')
|
70
|
+
parser.add_argument('--k_exc', default=25, type=float,
|
71
|
+
help='')
|
72
|
+
|
73
|
+
# Noise layer
|
74
|
+
parser.add_argument('--noise_mode', choices=['gaussian', 'neuronal', None],
|
75
|
+
default=None,
|
76
|
+
help='noise distribution')
|
77
|
+
parser.add_argument('--noise_scale', default=1, type=float,
|
78
|
+
help='noise scale factor')
|
79
|
+
parser.add_argument('--noise_level', default=1, type=float,
|
80
|
+
help='noise level')
|
81
|
+
|
82
|
+
|
83
|
+
FLAGS, FIRE_FLAGS = parser.parse_known_args()
|
84
|
+
|
85
|
+
|
86
|
+
def set_gpus(n=2):
|
87
|
+
"""
|
88
|
+
Finds all GPUs on the system and restricts to n of them that have the most
|
89
|
+
free memory.
|
90
|
+
"""
|
91
|
+
if n > 0:
|
92
|
+
gpus = subprocess.run(shlex.split(
|
93
|
+
'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True,
|
94
|
+
stdout=subprocess.PIPE).stdout
|
95
|
+
gpus = pd.read_csv(io.BytesIO(gpus), sep=', ', engine='python')
|
96
|
+
gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB
|
97
|
+
if os.environ.get('CUDA_VISIBLE_DEVICES') is not None:
|
98
|
+
visible = [int(i)
|
99
|
+
for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
|
100
|
+
gpus = gpus[gpus['index'].isin(visible)]
|
101
|
+
gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False)
|
102
|
+
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi
|
103
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
|
104
|
+
[str(i) for i in gpus['index'].iloc[:n]])
|
105
|
+
else:
|
106
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
107
|
+
|
108
|
+
|
109
|
+
if FLAGS.ngpus > 0:
|
110
|
+
set_gpus(FLAGS.ngpus)
|
111
|
+
|
112
|
+
import torch
|
113
|
+
import torch.nn as nn
|
114
|
+
import torch.utils.model_zoo
|
115
|
+
import torchvision
|
116
|
+
from vonenet import get_model
|
117
|
+
|
118
|
+
torch.manual_seed(FLAGS.torch_seed)
|
119
|
+
|
120
|
+
torch.backends.cudnn.benchmark = True
|
121
|
+
|
122
|
+
if FLAGS.ngpus > 0:
|
123
|
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
124
|
+
else:
|
125
|
+
device = 'cpu'
|
126
|
+
|
127
|
+
if FLAGS.normalization == 'vonenet':
|
128
|
+
print('VOneNet normalization')
|
129
|
+
norm_mean = [0.5, 0.5, 0.5]
|
130
|
+
norm_std = [0.5, 0.5, 0.5]
|
131
|
+
elif FLAGS.normalization == 'imagenet':
|
132
|
+
print('Imagenet standard normalization')
|
133
|
+
norm_mean = [0.485, 0.456, 0.406]
|
134
|
+
norm_std = [0.229, 0.224, 0.225]
|
135
|
+
|
136
|
+
|
137
|
+
def load_model():
|
138
|
+
map_location = None if FLAGS.ngpus > 0 else 'cpu'
|
139
|
+
print('Getting VOneNet')
|
140
|
+
model = get_model(map_location=map_location, model_arch=FLAGS.model_arch, pretrained=False,
|
141
|
+
visual_degrees=FLAGS.visual_degrees, stride=FLAGS.stride, ksize=FLAGS.ksize,
|
142
|
+
sf_corr=FLAGS.sf_corr, sf_max=FLAGS.sf_max, sf_min=FLAGS.sf_min, rand_param=FLAGS.rand_param,
|
143
|
+
gabor_seed=FLAGS.gabor_seed, simple_channels=FLAGS.simple_channels,
|
144
|
+
complex_channels=FLAGS.simple_channels, noise_mode=FLAGS.noise_mode,
|
145
|
+
noise_scale=FLAGS.noise_scale, noise_level=FLAGS.noise_level, k_exc=FLAGS.k_exc)
|
146
|
+
|
147
|
+
if FLAGS.ngpus > 0 and torch.cuda.device_count() > 1:
|
148
|
+
print('We have multiple GPUs detected')
|
149
|
+
model = model.to(device)
|
150
|
+
elif FLAGS.ngpus > 0 and torch.cuda.device_count() is 1:
|
151
|
+
print('We run on GPU')
|
152
|
+
model = model.to(device)
|
153
|
+
else:
|
154
|
+
print('No GPU detected!')
|
155
|
+
model = model.module
|
156
|
+
|
157
|
+
return model
|
158
|
+
|
159
|
+
|
160
|
+
def train(save_train_epochs=.2, # how often save output during training
|
161
|
+
save_val_epochs=.5, # how often save output during validation
|
162
|
+
save_model_epochs=1, # how often save model weights
|
163
|
+
save_model_secs=720 * 10 # how often save model (in sec)
|
164
|
+
):
|
165
|
+
|
166
|
+
model = load_model()
|
167
|
+
|
168
|
+
trainer = ImageNetTrain(model)
|
169
|
+
validator = ImageNetVal(model)
|
170
|
+
|
171
|
+
start_epoch = 0
|
172
|
+
records = []
|
173
|
+
|
174
|
+
if FLAGS.restore_epoch > 0:
|
175
|
+
print('Restoring from previous...')
|
176
|
+
ckpt_data = torch.load(os.path.join(FLAGS.restore_path, f'epoch_{FLAGS.restore_epoch:02d}.pth.tar'))
|
177
|
+
start_epoch = ckpt_data['epoch']
|
178
|
+
print('Loaded epoch: '+str(start_epoch))
|
179
|
+
model.load_state_dict(ckpt_data['state_dict'])
|
180
|
+
trainer.optimizer.load_state_dict(ckpt_data['optimizer'])
|
181
|
+
results_old = pickle.load(open(os.path.join(FLAGS.restore_path, 'results.pkl'), 'rb'))
|
182
|
+
for result in results_old:
|
183
|
+
records.append(result)
|
184
|
+
|
185
|
+
results = {'meta': {'step_in_epoch': 0,
|
186
|
+
'epoch': start_epoch,
|
187
|
+
'wall_time': time.time()}
|
188
|
+
}
|
189
|
+
|
190
|
+
# records = []
|
191
|
+
recent_time = time.time()
|
192
|
+
|
193
|
+
nsteps = len(trainer.data_loader)
|
194
|
+
|
195
|
+
if save_train_epochs is not None:
|
196
|
+
save_train_steps = (np.arange(0, FLAGS.epochs + 1,
|
197
|
+
save_train_epochs) * nsteps).astype(int)
|
198
|
+
if save_val_epochs is not None:
|
199
|
+
save_val_steps = (np.arange(0, FLAGS.epochs + 1,
|
200
|
+
save_val_epochs) * nsteps).astype(int)
|
201
|
+
if save_model_epochs is not None:
|
202
|
+
save_model_steps = (np.arange(0, FLAGS.epochs + 1,
|
203
|
+
save_model_epochs) * nsteps).astype(int)
|
204
|
+
|
205
|
+
for epoch in tqdm.trange(start_epoch, FLAGS.epochs + 1, initial=0, desc='epoch'):
|
206
|
+
print(epoch)
|
207
|
+
data_load_start = np.nan
|
208
|
+
|
209
|
+
data_loader_iter = trainer.data_loader
|
210
|
+
|
211
|
+
for step, data in enumerate(tqdm.tqdm(data_loader_iter, desc=trainer.name)):
|
212
|
+
data_load_time = time.time() - data_load_start
|
213
|
+
global_step = epoch * nsteps + step
|
214
|
+
|
215
|
+
if save_val_steps is not None:
|
216
|
+
if global_step in save_val_steps:
|
217
|
+
results[validator.name] = validator()
|
218
|
+
if FLAGS.optimizer == 'plateauLR' and step == 0:
|
219
|
+
trainer.lr.step(results[validator.name]['loss'])
|
220
|
+
trainer.model.train()
|
221
|
+
print('LR: ', trainer.optimizer.param_groups[0]["lr"])
|
222
|
+
|
223
|
+
if FLAGS.output_path is not None:
|
224
|
+
if not (os.path.isdir(FLAGS.output_path)):
|
225
|
+
os.mkdir(FLAGS.output_path)
|
226
|
+
|
227
|
+
records.append(results)
|
228
|
+
if len(results) > 1:
|
229
|
+
pickle.dump(records, open(os.path.join(FLAGS.output_path, 'results.pkl'), 'wb'))
|
230
|
+
|
231
|
+
ckpt_data = {}
|
232
|
+
ckpt_data['flags'] = FLAGS.__dict__.copy()
|
233
|
+
ckpt_data['epoch'] = epoch
|
234
|
+
ckpt_data['state_dict'] = model.state_dict()
|
235
|
+
ckpt_data['optimizer'] = trainer.optimizer.state_dict()
|
236
|
+
|
237
|
+
if save_model_secs is not None:
|
238
|
+
if time.time() - recent_time > save_model_secs:
|
239
|
+
torch.save(ckpt_data, os.path.join(FLAGS.output_path,
|
240
|
+
'latest_checkpoint.pth.tar'))
|
241
|
+
recent_time = time.time()
|
242
|
+
|
243
|
+
if save_model_steps is not None:
|
244
|
+
if global_step in save_model_steps:
|
245
|
+
torch.save(ckpt_data, os.path.join(FLAGS.output_path,
|
246
|
+
f'epoch_{epoch:02d}.pth.tar'))
|
247
|
+
|
248
|
+
else:
|
249
|
+
if len(results) > 1:
|
250
|
+
pprint.pprint(results)
|
251
|
+
|
252
|
+
if epoch < FLAGS.epochs:
|
253
|
+
frac_epoch = (global_step + 1) / nsteps
|
254
|
+
record = trainer(frac_epoch, *data)
|
255
|
+
record['data_load_dur'] = data_load_time
|
256
|
+
results = {'meta': {'step_in_epoch': step + 1,
|
257
|
+
'epoch': frac_epoch,
|
258
|
+
'wall_time': time.time()}
|
259
|
+
}
|
260
|
+
if save_train_steps is not None:
|
261
|
+
if step in save_train_steps:
|
262
|
+
results[trainer.name] = record
|
263
|
+
|
264
|
+
data_load_start = time.time()
|
265
|
+
|
266
|
+
|
267
|
+
class ImageNetTrain(object):
|
268
|
+
|
269
|
+
def __init__(self, model):
|
270
|
+
self.name = 'train'
|
271
|
+
self.model = model
|
272
|
+
self.data_loader = self.data()
|
273
|
+
self.optimizer = torch.optim.SGD(self.model.parameters(), FLAGS.lr, momentum=FLAGS.momentum,
|
274
|
+
weight_decay=FLAGS.weight_decay)
|
275
|
+
if FLAGS.optimizer == 'stepLR':
|
276
|
+
self.lr = torch.optim.lr_scheduler.StepLR(self.optimizer, gamma=FLAGS.step_factor,
|
277
|
+
step_size=FLAGS.step_size)
|
278
|
+
elif FLAGS.optimizer == 'plateauLR':
|
279
|
+
self.lr = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, factor=FLAGS.step_factor,
|
280
|
+
patience=FLAGS.step_size-1, threshold=0.01)
|
281
|
+
self.loss = nn.CrossEntropyLoss()
|
282
|
+
if FLAGS.ngpus > 0:
|
283
|
+
self.loss = self.loss.cuda()
|
284
|
+
|
285
|
+
def data(self):
|
286
|
+
dataset = torchvision.datasets.ImageFolder(
|
287
|
+
os.path.join(FLAGS.in_path, 'train'),
|
288
|
+
torchvision.transforms.Compose([
|
289
|
+
torchvision.transforms.RandomResizedCrop(224),
|
290
|
+
torchvision.transforms.RandomHorizontalFlip(),
|
291
|
+
torchvision.transforms.ToTensor(),
|
292
|
+
torchvision.transforms.Normalize(mean=norm_mean, std=norm_std)
|
293
|
+
]))
|
294
|
+
data_loader = torch.utils.data.DataLoader(dataset,
|
295
|
+
batch_size=FLAGS.batch_size,
|
296
|
+
shuffle=True,
|
297
|
+
num_workers=FLAGS.workers,
|
298
|
+
pin_memory=True)
|
299
|
+
|
300
|
+
return data_loader
|
301
|
+
|
302
|
+
def __call__(self, frac_epoch, inp, target):
|
303
|
+
start = time.time()
|
304
|
+
if FLAGS.optimizer == 'stepLR':
|
305
|
+
self.lr.step(epoch=frac_epoch)
|
306
|
+
target = target.to(device)
|
307
|
+
|
308
|
+
output = self.model(inp)
|
309
|
+
|
310
|
+
record = {}
|
311
|
+
loss = self.loss(output, target)
|
312
|
+
record['loss'] = loss.item()
|
313
|
+
record['top1'], record['top5'] = accuracy(output, target, topk=(1, 5))
|
314
|
+
record['top1'] /= len(output)
|
315
|
+
record['top5'] /= len(output)
|
316
|
+
# record['learning_rate'] = self.lr.get_lr()[0]
|
317
|
+
record['learning_rate'] = self.optimizer.param_groups[0]["lr"]
|
318
|
+
self.optimizer.zero_grad()
|
319
|
+
loss.backward()
|
320
|
+
self.optimizer.step()
|
321
|
+
|
322
|
+
record['dur'] = time.time() - start
|
323
|
+
return record
|
324
|
+
|
325
|
+
|
326
|
+
class ImageNetVal(object):
|
327
|
+
|
328
|
+
def __init__(self, model):
|
329
|
+
self.name = 'val'
|
330
|
+
self.model = model
|
331
|
+
self.data_loader = self.data()
|
332
|
+
self.loss = nn.CrossEntropyLoss(size_average=False)
|
333
|
+
self.loss = self.loss.to(device)
|
334
|
+
|
335
|
+
def data(self):
|
336
|
+
dataset = torchvision.datasets.ImageFolder(
|
337
|
+
os.path.join(FLAGS.in_path, 'val'),
|
338
|
+
torchvision.transforms.Compose([
|
339
|
+
torchvision.transforms.Resize(256),
|
340
|
+
torchvision.transforms.CenterCrop(224),
|
341
|
+
torchvision.transforms.ToTensor(),
|
342
|
+
torchvision.transforms.Normalize(mean=norm_mean, std=norm_std),
|
343
|
+
]))
|
344
|
+
data_loader = torch.utils.data.DataLoader(dataset,
|
345
|
+
batch_size=FLAGS.batch_size,
|
346
|
+
shuffle=False,
|
347
|
+
num_workers=FLAGS.workers,
|
348
|
+
pin_memory=True)
|
349
|
+
|
350
|
+
return data_loader
|
351
|
+
|
352
|
+
def __call__(self):
|
353
|
+
self.model.eval()
|
354
|
+
start = time.time()
|
355
|
+
record = {'loss': 0, 'top1': 0, 'top5': 0}
|
356
|
+
with torch.no_grad():
|
357
|
+
for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name):
|
358
|
+
target = target.to(device)
|
359
|
+
output = self.model(inp)
|
360
|
+
|
361
|
+
record['loss'] += self.loss(output, target).item()
|
362
|
+
p1, p5 = accuracy(output, target, topk=(1, 5))
|
363
|
+
record['top1'] += p1
|
364
|
+
record['top5'] += p5
|
365
|
+
|
366
|
+
for key in record:
|
367
|
+
record[key] /= len(self.data_loader.dataset.samples)
|
368
|
+
record['dur'] = (time.time() - start) / len(self.data_loader)
|
369
|
+
|
370
|
+
return record
|
371
|
+
|
372
|
+
|
373
|
+
def accuracy(output, target, topk=(1,)):
|
374
|
+
with torch.no_grad():
|
375
|
+
_, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
|
376
|
+
pred = pred.t()
|
377
|
+
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
378
|
+
res = [correct[:k].sum().item() for k in topk]
|
379
|
+
return res
|
380
|
+
|
381
|
+
|
382
|
+
if __name__ == '__main__':
|
383
|
+
fire.Fire(command=FIRE_FLAGS)
|