brainscore-vision 2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/__init__.py +105 -0
- brainscore_vision/__main__.py +20 -0
- brainscore_vision/benchmark_helpers/__init__.py +67 -0
- brainscore_vision/benchmark_helpers/neural_common.py +70 -0
- brainscore_vision/benchmark_helpers/properties_common.py +424 -0
- brainscore_vision/benchmark_helpers/screen.py +126 -0
- brainscore_vision/benchmark_helpers/test_helper.py +160 -0
- brainscore_vision/benchmarks/README.md +7 -0
- brainscore_vision/benchmarks/__init__.py +122 -0
- brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
- brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
- brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
- brainscore_vision/benchmarks/baker2022/test.py +90 -0
- brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
- brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
- brainscore_vision/benchmarks/bmd2024/test.py +29 -0
- brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
- brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
- brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
- brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
- brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
- brainscore_vision/benchmarks/cadena2017/test.py +35 -0
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
- brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
- brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
- brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
- brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
- brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
- brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
- brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
- brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
- brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
- brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
- brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
- brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
- brainscore_vision/benchmarks/hebart2023/test.py +19 -0
- brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
- brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
- brainscore_vision/benchmarks/hermann2020/test.py +28 -0
- brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
- brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
- brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
- brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
- brainscore_vision/benchmarks/imagenet/test.py +32 -0
- brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
- brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
- brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
- brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
- brainscore_vision/benchmarks/islam2021/test.py +47 -0
- brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
- brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
- brainscore_vision/benchmarks/kar2019/test.py +93 -0
- brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
- brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
- brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
- brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
- brainscore_vision/benchmarks/malania2007/test.py +64 -0
- brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
- brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
- brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
- brainscore_vision/benchmarks/marques2020/test.py +135 -0
- brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
- brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
- brainscore_vision/benchmarks/objectnet/test.py +33 -0
- brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
- brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
- brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
- brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
- brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
- brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
- brainscore_vision/benchmarks/scialom2024/test.py +162 -0
- brainscore_vision/data/__init__.py +0 -0
- brainscore_vision/data/baker2022/__init__.py +40 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
- brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
- brainscore_vision/data/baker2022/test.py +135 -0
- brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
- brainscore_vision/data/barbumayo2019/__init__.py +23 -0
- brainscore_vision/data/barbumayo2019/test.py +10 -0
- brainscore_vision/data/bashivankar2019/__init__.py +52 -0
- brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
- brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
- brainscore_vision/data/bashivankar2019/test.py +15 -0
- brainscore_vision/data/bmd2024/__init__.py +69 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
- brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
- brainscore_vision/data/bmd2024/test.py +130 -0
- brainscore_vision/data/bracci2019/__init__.py +36 -0
- brainscore_vision/data/bracci2019/data_packaging.py +221 -0
- brainscore_vision/data/bracci2019/test.py +16 -0
- brainscore_vision/data/cadena2017/__init__.py +52 -0
- brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
- brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
- brainscore_vision/data/cadena2017/test.py +24 -0
- brainscore_vision/data/cichy2019/__init__.py +38 -0
- brainscore_vision/data/cichy2019/test.py +8 -0
- brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
- brainscore_vision/data/coggan2024_behavior/test.py +32 -0
- brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
- brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/data/david2004/__init__.py +34 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
- brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
- brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
- brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
- brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
- brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
- brainscore_vision/data/david2004/test.py +8 -0
- brainscore_vision/data/deng2009/__init__.py +22 -0
- brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
- brainscore_vision/data/deng2009/test.py +9 -0
- brainscore_vision/data/ferguson2024/__init__.py +401 -0
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
- brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
- brainscore_vision/data/ferguson2024/requirements.txt +2 -0
- brainscore_vision/data/ferguson2024/test.py +155 -0
- brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
- brainscore_vision/data/freemanziemba2013/test.py +97 -0
- brainscore_vision/data/geirhos2021/__init__.py +358 -0
- brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
- brainscore_vision/data/geirhos2021/test.py +330 -0
- brainscore_vision/data/hebart2023/__init__.py +23 -0
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
- brainscore_vision/data/hebart2023/test.py +42 -0
- brainscore_vision/data/hendrycks2019/__init__.py +45 -0
- brainscore_vision/data/hendrycks2019/test.py +26 -0
- brainscore_vision/data/igustibagus2024/__init__.py +23 -0
- brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
- brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
- brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
- brainscore_vision/data/igustibagus2024/test.py +26 -0
- brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
- brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
- brainscore_vision/data/imagenetslim15000/test.py +8 -0
- brainscore_vision/data/islam2021/__init__.py +18 -0
- brainscore_vision/data/islam2021/data_packaging.py +64 -0
- brainscore_vision/data/islam2021/test.py +11 -0
- brainscore_vision/data/kar2018/__init__.py +58 -0
- brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
- brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
- brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
- brainscore_vision/data/kar2018/test.py +10 -0
- brainscore_vision/data/kar2019/__init__.py +43 -0
- brainscore_vision/data/kar2019/data_packaging.py +116 -0
- brainscore_vision/data/kar2019/test.py +8 -0
- brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
- brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
- brainscore_vision/data/kuzovkin2018/test.py +8 -0
- brainscore_vision/data/majajhong2015/__init__.py +113 -0
- brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
- brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
- brainscore_vision/data/majajhong2015/test.py +38 -0
- brainscore_vision/data/malania2007/__init__.py +254 -0
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
- brainscore_vision/data/malania2007/test.py +147 -0
- brainscore_vision/data/maniquet2024/__init__.py +57 -0
- brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
- brainscore_vision/data/maniquet2024/test.py +16 -0
- brainscore_vision/data/marques2020/__init__.py +123 -0
- brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
- brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
- brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
- brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
- brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
- brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
- brainscore_vision/data/marques2020/test.py +54 -0
- brainscore_vision/data/rajalingham2018/__init__.py +56 -0
- brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
- brainscore_vision/data/rajalingham2018/test.py +10 -0
- brainscore_vision/data/rajalingham2020/__init__.py +39 -0
- brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
- brainscore_vision/data/rajalingham2020/test.py +8 -0
- brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
- brainscore_vision/data/rust2012/__init__.py +45 -0
- brainscore_vision/data/rust2012/rust305.py +35 -0
- brainscore_vision/data/rust2012/test.py +47 -0
- brainscore_vision/data/sanghavi2020/__init__.py +119 -0
- brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
- brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
- brainscore_vision/data/sanghavi2020/test.py +13 -0
- brainscore_vision/data/scialom2024/__init__.py +386 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
- brainscore_vision/data/scialom2024/test.py +301 -0
- brainscore_vision/data/seibert2019/__init__.py +25 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
- brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
- brainscore_vision/data/seibert2019/test.py +35 -0
- brainscore_vision/data/zhang2018/__init__.py +38 -0
- brainscore_vision/data/zhang2018/test.py +29 -0
- brainscore_vision/data_helpers/__init__.py +0 -0
- brainscore_vision/data_helpers/lookup_legacy.py +15 -0
- brainscore_vision/data_helpers/s3.py +79 -0
- brainscore_vision/metric_helpers/__init__.py +5 -0
- brainscore_vision/metric_helpers/temporal.py +119 -0
- brainscore_vision/metric_helpers/transformations.py +379 -0
- brainscore_vision/metric_helpers/utils.py +71 -0
- brainscore_vision/metric_helpers/xarray_utils.py +151 -0
- brainscore_vision/metrics/__init__.py +7 -0
- brainscore_vision/metrics/accuracy/__init__.py +4 -0
- brainscore_vision/metrics/accuracy/metric.py +16 -0
- brainscore_vision/metrics/accuracy/test.py +11 -0
- brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
- brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
- brainscore_vision/metrics/accuracy_distance/test.py +57 -0
- brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
- brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
- brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
- brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
- brainscore_vision/metrics/cka/__init__.py +14 -0
- brainscore_vision/metrics/cka/metric.py +105 -0
- brainscore_vision/metrics/cka/test.py +28 -0
- brainscore_vision/metrics/dimensionality/__init__.py +13 -0
- brainscore_vision/metrics/dimensionality/metric.py +45 -0
- brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
- brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
- brainscore_vision/metrics/distribution_similarity/test.py +10 -0
- brainscore_vision/metrics/error_consistency/__init__.py +13 -0
- brainscore_vision/metrics/error_consistency/metric.py +93 -0
- brainscore_vision/metrics/error_consistency/test.py +39 -0
- brainscore_vision/metrics/i1i2/__init__.py +16 -0
- brainscore_vision/metrics/i1i2/metric.py +299 -0
- brainscore_vision/metrics/i1i2/requirements.txt +2 -0
- brainscore_vision/metrics/i1i2/test.py +36 -0
- brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
- brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
- brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
- brainscore_vision/metrics/internal_consistency/test.py +39 -0
- brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
- brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
- brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
- brainscore_vision/metrics/mask_regression/__init__.py +16 -0
- brainscore_vision/metrics/mask_regression/metric.py +242 -0
- brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
- brainscore_vision/metrics/mask_regression/test.py +0 -0
- brainscore_vision/metrics/ost/__init__.py +23 -0
- brainscore_vision/metrics/ost/metric.py +350 -0
- brainscore_vision/metrics/ost/requirements.txt +2 -0
- brainscore_vision/metrics/ost/test.py +0 -0
- brainscore_vision/metrics/rdm/__init__.py +14 -0
- brainscore_vision/metrics/rdm/metric.py +101 -0
- brainscore_vision/metrics/rdm/requirements.txt +2 -0
- brainscore_vision/metrics/rdm/test.py +63 -0
- brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
- brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
- brainscore_vision/metrics/regression_correlation/metric.py +125 -0
- brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
- brainscore_vision/metrics/regression_correlation/test.py +36 -0
- brainscore_vision/metrics/threshold/__init__.py +5 -0
- brainscore_vision/metrics/threshold/metric.py +481 -0
- brainscore_vision/metrics/threshold/test.py +71 -0
- brainscore_vision/metrics/value_delta/__init__.py +4 -0
- brainscore_vision/metrics/value_delta/metric.py +30 -0
- brainscore_vision/metrics/value_delta/requirements.txt +1 -0
- brainscore_vision/metrics/value_delta/test.py +40 -0
- brainscore_vision/model_helpers/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/__init__.py +1 -0
- brainscore_vision/model_helpers/activations/core.py +635 -0
- brainscore_vision/model_helpers/activations/pca.py +117 -0
- brainscore_vision/model_helpers/activations/pytorch.py +152 -0
- brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
- brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
- brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
- brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
- brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
- brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
- brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
- brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
- brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
- brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
- brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
- brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
- brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
- brainscore_vision/model_helpers/conftest.py +3 -0
- brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
- brainscore_vision/model_helpers/s3.py +62 -0
- brainscore_vision/model_helpers/utils/__init__.py +15 -0
- brainscore_vision/model_helpers/utils/s3.py +42 -0
- brainscore_vision/model_interface.py +214 -0
- brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
- brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
- brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
- brainscore_vision/models/AlexNet_SIN/model.py +29 -0
- brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
- brainscore_vision/models/AlexNet_SIN/test.py +1 -0
- brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
- brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
- brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
- brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
- brainscore_vision/models/__init__.py +0 -0
- brainscore_vision/models/alexnet/__init__.py +8 -0
- brainscore_vision/models/alexnet/model.py +28 -0
- brainscore_vision/models/alexnet/requirements.txt +2 -0
- brainscore_vision/models/alexnet/test.py +15 -0
- brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
- brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
- brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
- brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
- brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
- brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
- brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
- brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
- brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
- brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
- brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
- brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
- brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
- brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
- brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
- brainscore_vision/models/alexnet_testing/__init__.py +8 -0
- brainscore_vision/models/alexnet_testing/model.py +28 -0
- brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
- brainscore_vision/models/alexnet_testing/setup.py +24 -0
- brainscore_vision/models/alexnet_testing/test.py +15 -0
- brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
- brainscore_vision/models/antialias_resnet152/model.py +35 -0
- brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
- brainscore_vision/models/antialias_resnet152/test.py +8 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
- brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/clip/__init__.py +5 -0
- brainscore_vision/models/clip/model.py +179 -0
- brainscore_vision/models/clip/requirements.txt +4 -0
- brainscore_vision/models/clip/test.py +1 -0
- brainscore_vision/models/clipvision/__init__.py +5 -0
- brainscore_vision/models/clipvision/model.py +179 -0
- brainscore_vision/models/clipvision/requirements.txt +4 -0
- brainscore_vision/models/clipvision/test.py +1 -0
- brainscore_vision/models/cornet_s/__init__.py +8 -0
- brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
- brainscore_vision/models/cornet_s/model.py +77 -0
- brainscore_vision/models/cornet_s/requirements.txt +7 -0
- brainscore_vision/models/cornet_s/test.py +8 -0
- brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
- brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
- brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
- brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
- brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
- brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
- brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
- brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
- brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
- brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
- brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
- brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
- brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
- brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
- brainscore_vision/models/effnetb1_272x240/model.py +126 -0
- brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
- brainscore_vision/models/effnetb1_272x240/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
- brainscore_vision/models/hmax/__init__.py +7 -0
- brainscore_vision/models/hmax/helpers/hmax.py +438 -0
- brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
- brainscore_vision/models/hmax/model.py +69 -0
- brainscore_vision/models/hmax/requirements.txt +5 -0
- brainscore_vision/models/hmax/test.py +8 -0
- brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
- brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
- brainscore_vision/models/mobilevit_small/__init__.py +7 -0
- brainscore_vision/models/mobilevit_small/model.py +49 -0
- brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
- brainscore_vision/models/mobilevit_small/test.py +8 -0
- brainscore_vision/models/pixels/__init__.py +8 -0
- brainscore_vision/models/pixels/model.py +35 -0
- brainscore_vision/models/pixels/test.py +15 -0
- brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
- brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
- brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r50_tvpt/__init__.py +9 -0
- brainscore_vision/models/r50_tvpt/model.py +47 -0
- brainscore_vision/models/r50_tvpt/setup.py +24 -0
- brainscore_vision/models/r50_tvpt/test.py +1 -0
- brainscore_vision/models/regnet/__init__.py +14 -0
- brainscore_vision/models/regnet/model.py +17 -0
- brainscore_vision/models/regnet/requirements.txt +2 -0
- brainscore_vision/models/regnet/test.py +17 -0
- brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
- brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
- brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
- brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
- brainscore_vision/models/resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/resnet50_julios/model.py +54 -0
- brainscore_vision/models/resnet50_julios/setup.py +24 -0
- brainscore_vision/models/resnet50_julios/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
- brainscore_vision/models/resnet50_tutorial/model.py +34 -0
- brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
- brainscore_vision/models/resnet50_tutorial/test.py +8 -0
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
- brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
- brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/resnet_50_robust/model.py +55 -0
- brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
- brainscore_vision/models/resnet_50_robust/test.py +8 -0
- brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
- brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
- brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
- brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
- brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
- brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
- brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
- brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
- brainscore_vision/models/temporal_model_GDT/model.py +72 -0
- brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_GDT/test.py +17 -0
- brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
- brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
- brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
- brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
- brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
- brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
- brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
- brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
- brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
- brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
- brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
- brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
- brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
- brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
- brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
- brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
- brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
- brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
- brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
- brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
- brainscore_vision/models/temporal_model_openstl/model.py +206 -0
- brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_openstl/test.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
- brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
- brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
- brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
- brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
- brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
- brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
- brainscore_vision/submission/__init__.py +0 -0
- brainscore_vision/submission/actions_helpers.py +153 -0
- brainscore_vision/submission/config.py +7 -0
- brainscore_vision/submission/endpoints.py +58 -0
- brainscore_vision/utils/__init__.py +91 -0
- brainscore_vision-2.1.dist-info/LICENSE +11 -0
- brainscore_vision-2.1.dist-info/METADATA +152 -0
- brainscore_vision-2.1.dist-info/RECORD +1009 -0
- brainscore_vision-2.1.dist-info/WHEEL +5 -0
- brainscore_vision-2.1.dist-info/top_level.txt +4 -0
- docs/Makefile +20 -0
- docs/source/conf.py +78 -0
- docs/source/index.rst +21 -0
- docs/source/modules/api_reference.rst +10 -0
- docs/source/modules/benchmarks.rst +8 -0
- docs/source/modules/brainscore_submission.png +0 -0
- docs/source/modules/developer_clarifications.rst +36 -0
- docs/source/modules/metrics.rst +8 -0
- docs/source/modules/model_interface.rst +8 -0
- docs/source/modules/submission.rst +112 -0
- docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
- docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
- docs/source/modules/tutorial_screenshots/init_py.png +0 -0
- docs/source/modules/tutorial_screenshots/mms.png +0 -0
- docs/source/modules/tutorial_screenshots/setup.png +0 -0
- docs/source/modules/tutorial_screenshots/sms.png +0 -0
- docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
- docs/source/modules/utils.rst +22 -0
- migrations/2020-12-20_pkl_to_nc.py +90 -0
- tests/__init__.py +6 -0
- tests/conftest.py +26 -0
- tests/test_benchmark_helpers/__init__.py +0 -0
- tests/test_benchmark_helpers/test_screen.py +75 -0
- tests/test_examples.py +41 -0
- tests/test_integration.py +43 -0
- tests/test_metric_helpers/__init__.py +0 -0
- tests/test_metric_helpers/test_temporal.py +80 -0
- tests/test_metric_helpers/test_transformations.py +171 -0
- tests/test_metric_helpers/test_xarray_utils.py +85 -0
- tests/test_model_helpers/__init__.py +6 -0
- tests/test_model_helpers/activations/__init__.py +0 -0
- tests/test_model_helpers/activations/test___init__.py +404 -0
- tests/test_model_helpers/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
- tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
- tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
- tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
- tests/test_model_helpers/temporal/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
- tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
- tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
- tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
- tests/test_model_helpers/temporal/test_utils.py +61 -0
- tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
- tests/test_model_helpers/test_imports.py +10 -0
- tests/test_model_helpers/test_s3.py +38 -0
- tests/test_models.py +15 -0
- tests/test_stimuli.py +0 -0
- tests/test_submission/__init__.py +0 -0
- tests/test_submission/mock_config.py +3 -0
- tests/test_submission/test_actions_helpers.py +67 -0
- tests/test_submission/test_db.py +54 -0
- tests/test_submission/test_endpoints.py +125 -0
- tests/test_utils.py +21 -0
@@ -0,0 +1,849 @@
|
|
1
|
+
import itertools
|
2
|
+
import math
|
3
|
+
import pickle
|
4
|
+
import importlib
|
5
|
+
|
6
|
+
import brainscore
|
7
|
+
|
8
|
+
import xarray as xr
|
9
|
+
from PIL import Image
|
10
|
+
from pixelmatch.contrib.PIL import pixelmatch
|
11
|
+
|
12
|
+
from brainio.assemblies import NeuroidAssembly
|
13
|
+
from pathlib import Path
|
14
|
+
|
15
|
+
import copy
|
16
|
+
import numpy as np
|
17
|
+
import pandas as pd
|
18
|
+
from brainscore.benchmarks.screen import place_on_screen
|
19
|
+
from candidate_models.base_models import cornet
|
20
|
+
|
21
|
+
from model_tools.activations import PytorchWrapper
|
22
|
+
|
23
|
+
from sklearn import preprocessing
|
24
|
+
from sklearn.model_selection import train_test_split
|
25
|
+
from model_tools.brain_transformation import ModelCommitment
|
26
|
+
from tqdm import tqdm
|
27
|
+
|
28
|
+
|
29
|
+
SPLIT_NUMBER = 100
|
30
|
+
MAX_NUM_NEURONS = 71
|
31
|
+
HVM_TEST_IMAGES_NUM = 30
|
32
|
+
OOD_TEST_IMAGES_NUM = 30
|
33
|
+
|
34
|
+
CATEGORIES = ['apple', 'bear', 'bird', 'car', 'chair', 'dog', 'elephant', 'face', 'plane', 'zebra']
|
35
|
+
SILHOUETTE_DOMAINS = ['convex_hull', 'outline', 'skeleton', 'silhouette']
|
36
|
+
|
37
|
+
List_all_models = ['resnext101_32x16d_wsl', 'resnext101_32x32d_wsl', 'resnext101_32x48d_wsl', 'resnext101_32x8d_wsl']
|
38
|
+
|
39
|
+
|
40
|
+
############################################################
|
41
|
+
# Loading functions: brain model specific
|
42
|
+
############################################################
|
43
|
+
|
44
|
+
def get_brainmodel(identifier, penultimate_layer:False):
|
45
|
+
'''
|
46
|
+
Load brain model from the correct source
|
47
|
+
Arguments:
|
48
|
+
identifier: Architecture name of brain model
|
49
|
+
penultimate_layer: boolean (True: take the penultimate layer, False: take the IT layer of model,
|
50
|
+
Returns:
|
51
|
+
brain_model: brain model scaffold
|
52
|
+
'''
|
53
|
+
if identifier in ['resnet50-barlow', 'custom_model_cv_18_dagger_408', 'efficientnet-b6', 'ViT_L_32_imagenet1k', 'ViT_L_16_imagenet1k', 'r3m_resnet34', 'r3m_resnet50']:
|
54
|
+
identifier_package_mapping = {'resnet50-barlow': 'resnet_selfsup_submission', 'custom_model_cv_18_dagger_408': 'crossvit_18_dagger_408_finetuned',
|
55
|
+
'efficientnet-b6': 'efficientnet_models', 'ViT_L_32_imagenet1k': 'ViT', 'ViT_L_16_imagenet1k': 'ViT',
|
56
|
+
'r3m_resnet34': 'r3m_main', 'r3m_resnet50': 'r3m_main'}
|
57
|
+
packagename = identifier_package_mapping[identifier]
|
58
|
+
module = importlib.import_module(f"{packagename}.models.base_models")
|
59
|
+
get_submission_model = getattr(module, "get_model")
|
60
|
+
get_submission_layers = getattr(module, "get_layers")
|
61
|
+
basemodel = get_submission_model(identifier)
|
62
|
+
layers = get_submission_layers(identifier)
|
63
|
+
brain_model = ModelCommitment(identifier=identifier, activations_model=basemodel, layers=layers)
|
64
|
+
return brain_model
|
65
|
+
|
66
|
+
if (identifier == 'CORnet-S') & (penultimate_layer == True): #TODO: Does this work?
|
67
|
+
# only do this when choosing penultimate layer, *not* when choosing IT layer
|
68
|
+
basemodel = cornet(identifier)
|
69
|
+
basemodel = PytorchWrapper(model=basemodel._model, preprocessing=basemodel._extractor.preprocess)
|
70
|
+
brain_model = ModelCommitment(identifier=identifier, activations_model=basemodel, layers=['decoder.avgpool'])
|
71
|
+
return brain_model
|
72
|
+
brain_model = brain_translated_pool[identifier]
|
73
|
+
return brain_model
|
74
|
+
|
75
|
+
|
76
|
+
def retrieve_activations_from_brainmodel(brain_model, image_source: str, penultimate_layer) -> NeuroidAssembly:
|
77
|
+
'''
|
78
|
+
Returns a xarray DataArray with two dimensions stimulus_path and neuroid as well as the additional metadata layer on the neuroid
|
79
|
+
|
80
|
+
Arguments:
|
81
|
+
brain_model: Architecture name of brain model
|
82
|
+
image_source: images for model activation
|
83
|
+
penultimate_layer: either None if IT layer is chosen or the desired penultimate layer name
|
84
|
+
|
85
|
+
Returns:
|
86
|
+
activations(NeuroidAssembly xarray): activated brain model with the desired image source and layer
|
87
|
+
'''
|
88
|
+
# Get stimulus set for images
|
89
|
+
stimulus_set = brainscore.get_stimulus_set(image_source)
|
90
|
+
# Reshape images for brain model
|
91
|
+
stimset = place_on_screen(stimulus_set, brain_model.visual_degrees(), 8)
|
92
|
+
if penultimate_layer != None:
|
93
|
+
# Define "recording area" in brain model
|
94
|
+
brain_model.layer_model.region_layer_map['IT'] = penultimate_layer
|
95
|
+
brain_model.start_recording('IT', time_bins=[(70, 170)])
|
96
|
+
# Activate the brain model with given image dataset
|
97
|
+
activations = brain_model.look_at(stimset)
|
98
|
+
# Reduce to 2d array, deleting time_bin dimension
|
99
|
+
activations = activations.squeeze()
|
100
|
+
# Reshape the dimensions
|
101
|
+
activations = activations.transpose('presentation', 'neuroid')
|
102
|
+
|
103
|
+
if image_source == 'dicarlo.domain_transfer':
|
104
|
+
# Delete unwanted sources
|
105
|
+
activations = activations.where(activations.stimulus_source != 'GeirhosOOD', drop=True)
|
106
|
+
activations = activations.where(activations.stimulus_source != 'CueConflict', drop=True)
|
107
|
+
activations = activations.where(activations.stimulus_source != 'ObjectNet', drop=True)
|
108
|
+
|
109
|
+
return activations
|
110
|
+
|
111
|
+
def get_brain_model_activation(brain_model_name, image_source, penultimate_layer_boolean=False):
|
112
|
+
'''
|
113
|
+
Activates brain model with respective image source and the respective layer.
|
114
|
+
|
115
|
+
Arguments:
|
116
|
+
brain_model_name: Architecture name of brain model
|
117
|
+
image_source: images for model activation
|
118
|
+
penultimate_layer: boolean (True: take the penultimate layer, False: take the IT layer of model,
|
119
|
+
Returns:
|
120
|
+
brain_model_activations (NeuroidAssembly xarray): activated brain model with the desired image source and layer (xarray)
|
121
|
+
'''
|
122
|
+
# Activate brain model with image dataset
|
123
|
+
brain_model = get_brainmodel(brain_model_name, penultimate_layer_boolean)
|
124
|
+
# Activate the disred layer
|
125
|
+
if penultimate_layer_boolean:
|
126
|
+
penultimate_layer = brain_model.layers[-1]
|
127
|
+
brain_model_activations = retrieve_activations_from_brainmodel(brain_model, image_source, penultimate_layer)
|
128
|
+
else:
|
129
|
+
brain_model_activations = retrieve_activations_from_brainmodel(brain_model, image_source, penultimate_layer=None)
|
130
|
+
return brain_model_activations
|
131
|
+
|
132
|
+
def loading_brain_model_activation(brain_model_name, image_source, penultimate_layer): #TODO:Correct?
|
133
|
+
'''
|
134
|
+
Loads brain model activation and adds background ids to each of the HVM-like images (Silhouette images).
|
135
|
+
|
136
|
+
Arguments:
|
137
|
+
brain_model_name: Architecture name of brain model
|
138
|
+
image_source: images for model activation
|
139
|
+
penultimate_layer: boolean (True: take the penultimate layer, False: take the IT layer of model,
|
140
|
+
Returns:
|
141
|
+
domain_transfer_data
|
142
|
+
'''
|
143
|
+
brain_model_activation = get_brain_model_activation(brain_model_name, image_source, penultimate_layer)
|
144
|
+
hvm_data, rest_data, non_silhouette_data = load_silhouette_data(data=brain_model_activation)
|
145
|
+
domain_transfer_data = create_background_ids(hvm_data, rest_data, non_silhouette_data)
|
146
|
+
return domain_transfer_data
|
147
|
+
|
148
|
+
|
149
|
+
############################################################
|
150
|
+
############################################################
|
151
|
+
############################################################
|
152
|
+
|
153
|
+
|
154
|
+
def get_brain_model_performance(brain_model_name: str, image_source: str, estimator, image_arry, penultimate_layer: False, split_num):
|
155
|
+
'''
|
156
|
+
Returns a pandas dataframe with brain model performance for full image and neurons range. Performance is averaged over split_num of splits.
|
157
|
+
|
158
|
+
Arguments:
|
159
|
+
brain_model_name: Architecture name of brain model
|
160
|
+
image_source: images for model activation
|
161
|
+
estimator: Classifier for decoder
|
162
|
+
image_arry: array with number of training images,
|
163
|
+
penultimate_layer: boolean (True: take the penultimate layer, False: take the IT layer of model,
|
164
|
+
split_num: number of splits to average over
|
165
|
+
|
166
|
+
Returns:
|
167
|
+
save pandas dataframe with full image & neuron range performance. Saves dataframe for each split and the averaged performance over all splits
|
168
|
+
Split dataframe columns: #Neurons, #Images training, Accuracy test data
|
169
|
+
Averaged dataframe columns: #Neurons, #Images training, Accuracy test data, Std test data
|
170
|
+
'''
|
171
|
+
brain_model_activations = loading_brain_model_activation(brain_model_name, image_source, penultimate_layer)
|
172
|
+
# Calculate performance
|
173
|
+
get_performance_splits_and_average(brain_model_activations=brain_model_activations, num_images_arry=image_arry, num_splits=split_num,
|
174
|
+
estimator=estimator, brain_model_name=brain_model_name)
|
175
|
+
# get_performance_splits_and_average_single_image(brain_model_activations=brain_model_activations, num_images=MAX_NUM_IMAGES, num_splits=SPLIT_NUMBER,
|
176
|
+
# estimator=estimator, brain_model_name=brain_model_name, num_primate_it_neurons_scaling_factor_matching=NEURONS)
|
177
|
+
print(f'{brain_model_name} brain model performance was saved')
|
178
|
+
|
179
|
+
#################################################
|
180
|
+
#################################################
|
181
|
+
#################################################
|
182
|
+
#################################################
|
183
|
+
# Functions overlapping with hvm_crossdomain
|
184
|
+
#################################################
|
185
|
+
|
186
|
+
def create_background_ids(hvm_data, rest_data, non_silhouette_data):
|
187
|
+
'''
|
188
|
+
Domain-transfer data is loaded and hvm-like images get their respective background ids assigned based on their matching hvm images.
|
189
|
+
|
190
|
+
Arguments:
|
191
|
+
hvm_data: HVM data,
|
192
|
+
rest_data: HVM-like (Silhouette) data,
|
193
|
+
non_silhouette_data: non-HVM-like data (non Silhouette)
|
194
|
+
|
195
|
+
Returns:
|
196
|
+
domain_transfer_data: full data with an additional column: background_ids which indicates the matching background between hvm and hvm-like images
|
197
|
+
'''
|
198
|
+
# Add background_ids to hvm images
|
199
|
+
hvm_data = hvm_data.assign_coords(background_id=('presentation', np.arange(1, 121)))
|
200
|
+
non_silhouette_data = non_silhouette_data.assign_coords(
|
201
|
+
background_id=('presentation', np.zeros(len(non_silhouette_data))))
|
202
|
+
|
203
|
+
# Loop through each category to find the respective images
|
204
|
+
for category in tqdm(CATEGORIES, desc='looping categories'):
|
205
|
+
hvm_category = hvm_data[hvm_data['object_label'] == category]
|
206
|
+
oods_category = rest_data[rest_data['object_label'] == category]
|
207
|
+
# Find the matching backgrounds in hvm images
|
208
|
+
background_ids = find_matching_background(oods_category, hvm_category)
|
209
|
+
# Store the background ids in NeuronAssembly
|
210
|
+
oods_category = oods_category.assign_coords(background_id=('presentation', background_ids))
|
211
|
+
category_data = xr.concat((hvm_category, oods_category), dim='presentation')
|
212
|
+
# Concate all categories together
|
213
|
+
if category == 'apple':
|
214
|
+
full_data = copy.copy(category_data)
|
215
|
+
else:
|
216
|
+
full_data = xr.concat((full_data, category_data), dim='presentation')
|
217
|
+
domain_transfer_data = xr.concat((full_data, non_silhouette_data), dim='presentation')
|
218
|
+
|
219
|
+
return domain_transfer_data
|
220
|
+
|
221
|
+
|
222
|
+
def find_matching_background(oods_category, hvm_category):
|
223
|
+
'''
|
224
|
+
hvm and hvm-like images share the same background. To identify similar backgrounds images are compared pixel-wise to each over.
|
225
|
+
Images that share the most overlapp are then labeled with the same background id as the respective hvm-image.
|
226
|
+
|
227
|
+
Arguments:
|
228
|
+
oods_category (NeuronRecordingAssembly): all images from one single hvm-like domain without background id,
|
229
|
+
hvm_category (NeuronRecordingAssembly): hvm images with background id
|
230
|
+
|
231
|
+
Returns:
|
232
|
+
background_ids: list of matching background ids for the single hvm-like domian
|
233
|
+
'''
|
234
|
+
background_ids = []
|
235
|
+
# Find the respective background id from hvm images for each OOD image
|
236
|
+
#'https://brainio.dicarlo.s3.amazonaws.com/assy_dicarlo_Sanghavi2021_domain_transfer.nc,8c6a02348ca892d75a83a6ffa0551e098e1edae0,dicarlo.domain_transferdicarlo.Marques2020_size,stimulus_set,StimulusSet,S3,https://brainio.dicarlo.s3.amazonaws.com/image_dicarlo_Marques2020_size.csv,0fd0aeea8fa6ff2b30ee9a6a684d4600590d631f'
|
237
|
+
|
238
|
+
data_path = Path(__file__).parent / 'Sanghavi-domain_transfer-data/image_dicarlo_domain_transfer'
|
239
|
+
oods_category_image_file_path = oods_category.filename
|
240
|
+
|
241
|
+
for ood_image in tqdm(oods_category_image_file_path, desc='looping images'):
|
242
|
+
image_filename = ood_image.item()
|
243
|
+
image_path = str(data_path / image_filename)
|
244
|
+
image_ood = Image.open(image_path)
|
245
|
+
|
246
|
+
hvm_image_file_path = hvm_category.filename
|
247
|
+
for hvm_image, hvm_background_id in zip(hvm_image_file_path, hvm_category.background_id):
|
248
|
+
image_filename = hvm_image.item()
|
249
|
+
image_path = str(data_path / image_filename)
|
250
|
+
image_hvm = Image.open(image_path)
|
251
|
+
mismatch = pixelmatch(image_hvm, image_ood)
|
252
|
+
if mismatch <= 20000:
|
253
|
+
background_ids.append(hvm_background_id.item())
|
254
|
+
break
|
255
|
+
else:
|
256
|
+
pass
|
257
|
+
return background_ids
|
258
|
+
|
259
|
+
|
260
|
+
def load_silhouette_data(data):
|
261
|
+
'''
|
262
|
+
Separating domain-transfer data into hvm, hvm-like (silhouette) and rest (non-silhouette) data. This separation is needed to give each hvm-like
|
263
|
+
image the same background number as its respective hvm version (images are sharing the same background).
|
264
|
+
|
265
|
+
Arguements:
|
266
|
+
data: full data that is going to be split into hvm, hvm-like (silhouette) and rest (non-silhouette) data
|
267
|
+
|
268
|
+
Returns:
|
269
|
+
hvm_data (NeuronRecordingAssembly): hvm data
|
270
|
+
rest_data (NeuronRecordingAssembly): hvm-like data
|
271
|
+
non_silhouette_style_data (NeuronRecordingAssembly): rest data
|
272
|
+
'''
|
273
|
+
try:
|
274
|
+
silhouette_style_data = data[data['identifier'] == 'Silhouette']
|
275
|
+
non_silhouette_style_data = data[data['identifier'] != 'Silhouette']
|
276
|
+
except:
|
277
|
+
silhouette_style_data = data[data['stimulus_source'] == 'Silhouette']
|
278
|
+
non_silhouette_style_data = data[data['stimulus_source'] != 'Silhouette']
|
279
|
+
hvm_data = silhouette_style_data[silhouette_style_data['object_style'] == 'original']
|
280
|
+
rest_data = silhouette_style_data[silhouette_style_data['object_style'] != 'original']
|
281
|
+
|
282
|
+
return hvm_data, rest_data, non_silhouette_style_data
|
283
|
+
|
284
|
+
def get_single_domain_data(data, image_source_in_domain, object_style_in_domain):
|
285
|
+
'''
|
286
|
+
Filters the data for a single domain
|
287
|
+
|
288
|
+
Arguments:
|
289
|
+
data: NeuronRecordingAssembly xarray
|
290
|
+
image_source_in_domain (str): Image source of wanted domain
|
291
|
+
object_style_in_domain (str): Image style of wanted domain
|
292
|
+
|
293
|
+
Returns:
|
294
|
+
domain_data: data for single domain
|
295
|
+
'''
|
296
|
+
# Get domain data
|
297
|
+
if image_source_in_domain in ['Art', 'Silhouette']:
|
298
|
+
try:
|
299
|
+
domain_data = data.where((data.identifier == image_source_in_domain) & (data.object_style == object_style_in_domain), drop=True)
|
300
|
+
except:
|
301
|
+
domain_data = data.where((data.stimulus_source == image_source_in_domain) & (data.object_style == object_style_in_domain), drop=True)
|
302
|
+
|
303
|
+
else:
|
304
|
+
try:
|
305
|
+
domain_data = data.where(data.identifier == image_source_in_domain, drop=True)
|
306
|
+
except:
|
307
|
+
domain_data = data.where(data.stimulus_source == image_source_in_domain, drop=True)
|
308
|
+
|
309
|
+
return domain_data
|
310
|
+
|
311
|
+
def get_crossdomain_data_dictionary(domain_transfer_data):
|
312
|
+
'''
|
313
|
+
Create a dictionary with each crossdomain data as key and its data as values
|
314
|
+
|
315
|
+
Arguments:
|
316
|
+
domain_transfer_data (NeuronRecordingAssembly): complete dataset
|
317
|
+
|
318
|
+
Returns:
|
319
|
+
dictionary with each crossdomain data as key and its data as values
|
320
|
+
'''
|
321
|
+
# Create dictionary
|
322
|
+
crossdomain_data_dict = {}
|
323
|
+
crossdomains = ['original', 'cartoon', 'line_drawing', 'mosaic', 'painting', 'sketch', 'convex_hull', 'outline', 'skeleton', 'silhouette', 'cococolor', 'cocogray', 'tdw']
|
324
|
+
crossdomain_image_source = ['Silhouette', 'Art', 'Art', 'Art', 'Art', 'Art', 'Silhouette', 'Silhouette', 'Silhouette', 'Silhouette', 'COCOColor', 'COCOGray', 'TDW']
|
325
|
+
for image_source, object_style in zip(crossdomain_image_source, crossdomains):
|
326
|
+
crossdomain_data = get_single_domain_data(data=domain_transfer_data, image_source_in_domain=image_source, object_style_in_domain=object_style)
|
327
|
+
if object_style == 'original':
|
328
|
+
crossdomain_data_dict['hvm'] = crossdomain_data
|
329
|
+
else:
|
330
|
+
crossdomain_data_dict[object_style] = crossdomain_data
|
331
|
+
|
332
|
+
return crossdomain_data_dict
|
333
|
+
|
334
|
+
|
335
|
+
def get_crossdomain_dataframes(single_neuron_image=False):
|
336
|
+
'''
|
337
|
+
Creates a dictionary with each crossdomain data as key and an empty dataframe as value
|
338
|
+
|
339
|
+
Arguments:
|
340
|
+
single_neuron_image: boolean (True: add additional column with split number, False: no additional column)
|
341
|
+
|
342
|
+
Returns:
|
343
|
+
dictionary with each crossdomain data as key and an empty dataframe as value. Columns are #Neurons, #Images training, Accuracy test data
|
344
|
+
'''
|
345
|
+
dataframe_dict = {}
|
346
|
+
# Create dataframe
|
347
|
+
if not single_neuron_image:
|
348
|
+
df = pd.DataFrame(columns=['#Neurons', '#Images training', 'Accuracy test data'])
|
349
|
+
else:
|
350
|
+
df = pd.DataFrame(columns=['#Neurons', '#Images training', 'Accuracy test data', 'Split number'])
|
351
|
+
|
352
|
+
crossdomains = ['hvm', 'cartoon', 'line_drawing', 'mosaic', 'painting', 'sketch', 'convex_hull', 'outline', 'skeleton', 'silhouette', 'cococolor', 'cocogray', 'tdw']
|
353
|
+
for crossdomain in crossdomains:
|
354
|
+
dataframe_dict[crossdomain] = copy.copy(df)
|
355
|
+
return dataframe_dict
|
356
|
+
|
357
|
+
def split_training_test_images(crossdomain_data_dictionary):
|
358
|
+
'''
|
359
|
+
Splits data into training data pool and test images. Make sure that background id of testing hvm and training non-hvm images are not identical.
|
360
|
+
|
361
|
+
Arguments:
|
362
|
+
crossdomain_data_dictionary (dict): dictionary which contains each crossdomain as key and respective NeuronRecordingAssembly as values
|
363
|
+
|
364
|
+
Returns:
|
365
|
+
crossdomain_test_data_dictionary (dict): dictionary with each crossdomain as key and respective test images (NeuronRecordingAssembly) as values
|
366
|
+
training_images (NeuronRecordingAssembly): training images pool. Contains only HVM images
|
367
|
+
'''
|
368
|
+
# Create crossdomain testing images dictionary
|
369
|
+
crossdomain_test_data_dictionary = {}
|
370
|
+
# Loop through each crossdomain and seed a random subset of 50 images for testing
|
371
|
+
for crossdomain in crossdomain_data_dictionary.keys():
|
372
|
+
crossdomain_data = crossdomain_data_dictionary[crossdomain]
|
373
|
+
if crossdomain == 'hvm':
|
374
|
+
test_images, training_images = reduce_data_num_images(data_complete=crossdomain_data, number_images=HVM_TEST_IMAGES_NUM)
|
375
|
+
background_ids_silhouette_img = test_images.background_id.values
|
376
|
+
|
377
|
+
elif crossdomain in SILHOUETTE_DOMAINS:
|
378
|
+
test_indices = np.where(np.in1d(crossdomain_data.background_id, background_ids_silhouette_img))
|
379
|
+
test_images = crossdomain_data[test_indices]
|
380
|
+
else:
|
381
|
+
test_images, _ = reduce_data_num_images(data_complete=crossdomain_data, number_images=OOD_TEST_IMAGES_NUM)
|
382
|
+
crossdomain_test_data_dictionary[crossdomain] = test_images
|
383
|
+
|
384
|
+
return crossdomain_test_data_dictionary, training_images
|
385
|
+
|
386
|
+
def reduce_data_num_images(data_complete, number_images):
|
387
|
+
'''
|
388
|
+
Draws a randomly seeded subset of data while making sure that each object category is represented equally
|
389
|
+
|
390
|
+
Arguments:
|
391
|
+
data_complete (NeuronRecordingAssembly): complete dataset
|
392
|
+
number_images (int): number of images for training dataset
|
393
|
+
|
394
|
+
Returns:
|
395
|
+
stratified_training_data (NeuronRecordingAssembly): training data with equal number of each object category
|
396
|
+
rest_data (NeuronRecordingAssembly): remaining data from complete data - training data
|
397
|
+
'''
|
398
|
+
if number_images == len(data_complete):
|
399
|
+
place_holder = None
|
400
|
+
return data_complete, place_holder
|
401
|
+
else:
|
402
|
+
try:
|
403
|
+
stratified_training_data, rest_data = train_test_split(data_complete, train_size=number_images, stratify=data_complete.object_label)
|
404
|
+
except:
|
405
|
+
stratified_training_data, rest_data = train_test_split(data_complete, train_size=number_images, stratify=data_complete.category_name)
|
406
|
+
|
407
|
+
return stratified_training_data, rest_data
|
408
|
+
|
409
|
+
|
410
|
+
def get_final_traning_data(complete_training_data, num_images_training, num_neurons):
|
411
|
+
'''
|
412
|
+
Draws final traning images and neurons for one split.
|
413
|
+
|
414
|
+
Arguments:
|
415
|
+
complete_training_data (dict with NeuronRecordingAssembly): keys: domain names, values: complete training data pool for one split,
|
416
|
+
num_images_training: desired number of training images,
|
417
|
+
num_neurons: desired number of training neurons
|
418
|
+
|
419
|
+
Returns:
|
420
|
+
final_traning_data (dict with NeuronRecordingAssembly): keys: domain names, values: final training data for this split,
|
421
|
+
neuron_indices: training neurons indices (is used to align with the neurons in testing data)
|
422
|
+
'''
|
423
|
+
# Draw random subset of images from training data
|
424
|
+
final_training_images, _ = reduce_data_num_images(data_complete=complete_training_data, number_images=num_images_training)
|
425
|
+
# Draw random subset of neurons
|
426
|
+
final_traning_data, neuron_indices = reduce_data_num_neurons(data=final_training_images, num_neurons=num_neurons)
|
427
|
+
return final_traning_data, neuron_indices
|
428
|
+
|
429
|
+
def reduce_data_num_neurons(data, num_neurons):
|
430
|
+
'''
|
431
|
+
Reduces the number of neurons in data by randomly drawing neuron ids from complete dataset
|
432
|
+
|
433
|
+
Arguments:
|
434
|
+
data: (NeuronRecordingAssembly) complete dataset
|
435
|
+
num_neurons: (int) wanted number of neurons that data shoulbe be reduced to
|
436
|
+
|
437
|
+
Returns:
|
438
|
+
reduced_neurons_num_data: data with reduced number of neurons
|
439
|
+
random_indices_neurons: indices of neurons in reduced_neurons_num_data
|
440
|
+
|
441
|
+
'''
|
442
|
+
# Seed random numbers
|
443
|
+
random_indices_neurons = np.random.choice(len(data.neuroid), num_neurons, replace=False)
|
444
|
+
# Select only the random chosen neurons for training and testing data
|
445
|
+
reduced_neurons_num_data = data[:, random_indices_neurons]
|
446
|
+
return reduced_neurons_num_data, random_indices_neurons
|
447
|
+
|
448
|
+
def get_decoder(data, estimator):
|
449
|
+
'''
|
450
|
+
Trains decoder.
|
451
|
+
|
452
|
+
Arguments:
|
453
|
+
data: (NeuronRecordingAssembly) xarray
|
454
|
+
estimator: (sklearn classifier function) Estimator e.g. RidgeClassifierCV, ElasticNetCV etc.
|
455
|
+
|
456
|
+
Returns:
|
457
|
+
clf: trained decoder
|
458
|
+
'''
|
459
|
+
# Get input & output data
|
460
|
+
X = data.data
|
461
|
+
try:
|
462
|
+
y = data.object_label.data
|
463
|
+
except:
|
464
|
+
y = data.category_name.data
|
465
|
+
|
466
|
+
# Get estimator
|
467
|
+
clf = copy.copy(estimator) # Ridge Regression CV
|
468
|
+
|
469
|
+
try:
|
470
|
+
clf.fit(X, y)
|
471
|
+
except:
|
472
|
+
binary_label = preprocessing.LabelBinarizer()
|
473
|
+
y = binary_label.fit_transform(y)
|
474
|
+
clf.fit(X, y)
|
475
|
+
|
476
|
+
|
477
|
+
|
478
|
+
return clf
|
479
|
+
|
480
|
+
def get_final_testing_data(crossdomain_test_images_dictionary, neuron_indices):
|
481
|
+
'''
|
482
|
+
Reduce testing data to the correct (number of) neurons.
|
483
|
+
|
484
|
+
Arguments:
|
485
|
+
crossdomain_test_images_dictionary (dict with NeuronRecordingAssembly): key: domain names, values: training data,
|
486
|
+
neuron_indices: indices of desired neurons
|
487
|
+
|
488
|
+
Returns:
|
489
|
+
crossdomain_test_images_dictionary_final: (dict with NeuronRecordingAssembly): key: domain names, values: final training data with the correct neurons
|
490
|
+
'''
|
491
|
+
crossdomain_test_images_dictionary_final = {}
|
492
|
+
for crossdomain in crossdomain_test_images_dictionary.keys():
|
493
|
+
crossdomain_test_images_dictionary_final[crossdomain] = crossdomain_test_images_dictionary[crossdomain][:, neuron_indices]
|
494
|
+
return crossdomain_test_images_dictionary_final
|
495
|
+
|
496
|
+
def add_accuracies_to_split_df(final_test_data_dictionary, decoder, split_dataframe, num_neurons, num_training_images):
|
497
|
+
'''
|
498
|
+
Fill split dataframe with decoder performance and correct number of training images and neurons that had been used in this split
|
499
|
+
Arguments:
|
500
|
+
final_test_data_dictionary (dict with NeuronRecordingAssembly): key: domain names, values: final training data with the correct neurons,
|
501
|
+
decoder: trained decoder,
|
502
|
+
split_dataframe (dict): keys: domain names, values: dataframe with columns: #Neurons, #Images training, Accuracy test data,
|
503
|
+
num_neurons: number of training neurons,
|
504
|
+
num_training_images: number of training images
|
505
|
+
|
506
|
+
Retruns:
|
507
|
+
split_dataframe (dict): keys: domain names, values: dataframe with columns: #Neurons, #Images training, Accuracy test data
|
508
|
+
'''
|
509
|
+
# Get and store the test accuracy for each crossdomain
|
510
|
+
for crossdomain in final_test_data_dictionary.keys():
|
511
|
+
test_accuracy = get_classifier_score_2AFC(classifier=decoder, data=final_test_data_dictionary[crossdomain])
|
512
|
+
crossdomain_df = split_dataframe[crossdomain]
|
513
|
+
# Fill dataframe
|
514
|
+
crossdomain_df = crossdomain_df.append({
|
515
|
+
'#Neurons': num_neurons,
|
516
|
+
'#Images training': num_training_images,
|
517
|
+
'Accuracy test data': test_accuracy
|
518
|
+
}, ignore_index=True)
|
519
|
+
split_dataframe[crossdomain] = crossdomain_df
|
520
|
+
|
521
|
+
return split_dataframe
|
522
|
+
|
523
|
+
|
524
|
+
def get_classifier_score_2AFC(classifier, data):
|
525
|
+
'''
|
526
|
+
Calculates the 2AFC score
|
527
|
+
|
528
|
+
Arguments:
|
529
|
+
classifier: pre-trained classifier
|
530
|
+
data (NeuronRecordingAssembly): test data
|
531
|
+
|
532
|
+
Returns:
|
533
|
+
2AFC score
|
534
|
+
'''
|
535
|
+
# Get input & output data
|
536
|
+
X = data.data
|
537
|
+
try:
|
538
|
+
y = data.object_label.data
|
539
|
+
except:
|
540
|
+
y = data.category_name.data
|
541
|
+
|
542
|
+
categories = np.unique(y)
|
543
|
+
number_of_categories = len(categories)
|
544
|
+
predict_probs = classifier.decision_function(X)
|
545
|
+
scores = np.zeros(len(y))
|
546
|
+
indices_row = np.arange(len(y))
|
547
|
+
indices_column = np.arange(len(categories))
|
548
|
+
|
549
|
+
for indx in indices_row:
|
550
|
+
category_index = np.where(categories == y[indx])
|
551
|
+
sum = 0
|
552
|
+
indx_column = np.delete(indices_column, category_index)
|
553
|
+
for idx in indx_column:
|
554
|
+
if predict_probs[indx, category_index] > predict_probs[indx, idx]:
|
555
|
+
sum = sum + 1
|
556
|
+
else:
|
557
|
+
continue
|
558
|
+
|
559
|
+
score = sum / (number_of_categories - 1)
|
560
|
+
scores[indx] = score
|
561
|
+
|
562
|
+
avg_score = np.mean(scores)
|
563
|
+
return avg_score
|
564
|
+
|
565
|
+
|
566
|
+
#################################################
|
567
|
+
#################################################
|
568
|
+
#################################################
|
569
|
+
#################################################
|
570
|
+
#################################################
|
571
|
+
# Brain model speficic functions
|
572
|
+
#################################################
|
573
|
+
|
574
|
+
|
575
|
+
def get_performance_splits_and_average(brain_model_activations, num_images_arry, num_splits, estimator, brain_model_name):
|
576
|
+
'''Saves the real dataframes for each crossdomain and split, the fitted extrapolation parameters for each and the averaged real data performance'''
|
577
|
+
|
578
|
+
# Check dimensionality of NeuroAssembly
|
579
|
+
assert set(brain_model_activations.dims) == {'presentation', 'neuroid'}
|
580
|
+
# Secure reproducibility of data
|
581
|
+
np.random.seed(42)
|
582
|
+
# Load data: get a dctionary with all crossdomain data
|
583
|
+
crossdomain_data_dict = get_crossdomain_data_dictionary(domain_transfer_data=brain_model_activations)
|
584
|
+
# Load dataframes for each crossdomain
|
585
|
+
crossdomain_dataframes = get_crossdomain_dataframes()
|
586
|
+
# Get the correct neuron_array for each brain_model
|
587
|
+
num_neurons_arry = create_power_of_two_array_neurons(brain_model_activations=brain_model_activations) #TODO: Undo the #
|
588
|
+
# num_neurons_arry = np.asarray((1, 3, 5, 10, 20, 30, 40, 50, 71))
|
589
|
+
|
590
|
+
# Loop through the splits
|
591
|
+
for split in np.arange(num_splits):
|
592
|
+
# Create in each split a new dataframe and save this one
|
593
|
+
split_crossdomain_dataframes = get_crossdomain_dataframes()
|
594
|
+
# Get new test images for each split, want to keep the test images consistent for one split over all images x neurons rounds
|
595
|
+
crossdomain_test_images_dict, complete_training_data = split_training_test_images(crossdomain_data_dictionary=crossdomain_data_dict) # TODO: test if the background ids are identical for all Silhouette images in test data
|
596
|
+
|
597
|
+
# Loop through the number of neurons
|
598
|
+
for num_neurons, num_images_train in tqdm(itertools.product(num_neurons_arry, num_images_arry), desc='Neuron & image round'):
|
599
|
+
# Sample final training data with the right number of neurons & images
|
600
|
+
final_training_data, neuron_indices = get_final_traning_data(complete_training_data=complete_training_data, num_images_training=num_images_train,
|
601
|
+
num_neurons=num_neurons)
|
602
|
+
# Train the decoder #
|
603
|
+
split_decoder = get_decoder(data=final_training_data, estimator=estimator)
|
604
|
+
|
605
|
+
# Get the final testing data with the correct number of neurons
|
606
|
+
final_test_data_dict = get_final_testing_data(crossdomain_test_images_dictionary=crossdomain_test_images_dict, neuron_indices=neuron_indices)
|
607
|
+
# Get the test accuracy and store it in the split dataframe
|
608
|
+
split_crossdomain_dataframes = add_accuracies_to_split_df(final_test_data_dictionary=final_test_data_dict, decoder=split_decoder,
|
609
|
+
split_dataframe=split_crossdomain_dataframes, num_neurons=num_neurons, num_training_images=num_images_train)
|
610
|
+
#TODO: correct number of training test images?
|
611
|
+
crossdomain_dataframes = save_split_dataframes(split_crossdomain_dataframes=split_crossdomain_dataframes, crossdomain_dataframes=crossdomain_dataframes, split=split,
|
612
|
+
brain_model_name=brain_model_name)
|
613
|
+
|
614
|
+
save_split_averaged_dataframes(crossdomain_dataframes=crossdomain_dataframes, neurons_array=num_neurons_arry, images_array=num_images_arry, brain_model_name=brain_model_name)
|
615
|
+
|
616
|
+
|
617
|
+
########################
|
618
|
+
# Other functions that are brain model specific
|
619
|
+
########################
|
620
|
+
def create_power_of_two_array_neurons(brain_model_activations):
|
621
|
+
max_number = len(brain_model_activations.neuroid)
|
622
|
+
# Get the potenzial for power of 2 and round the number down
|
623
|
+
potenzial = math.floor(np.log2(max_number))
|
624
|
+
# Create an power of two array until max number
|
625
|
+
power_of_two_array = 2 ** np.arange(potenzial+1)
|
626
|
+
|
627
|
+
# Add the max number of neurons to the array
|
628
|
+
if power_of_two_array[-1] != max_number:
|
629
|
+
power_of_two_array = np.append(power_of_two_array, max_number)
|
630
|
+
|
631
|
+
return power_of_two_array
|
632
|
+
|
633
|
+
############################################################
|
634
|
+
# Saving functions
|
635
|
+
############################################################
|
636
|
+
def save_dataframe(dataframe, csv_dataframe_name):
|
637
|
+
savepath = Path(__file__).parent / 'dataframes_new_models' / csv_dataframe_name #TODO: undo folder name
|
638
|
+
dataframe.to_csv(savepath)
|
639
|
+
print(f"Saved to {savepath}")
|
640
|
+
|
641
|
+
def save_dictionary(dictionary, pkl_filename):
|
642
|
+
with open(pkl_filename, 'wb') as file:
|
643
|
+
# A new file will be created
|
644
|
+
pickle.dump(dictionary, file)
|
645
|
+
|
646
|
+
def open_pkl(filename_pkl):
|
647
|
+
with open(filename_pkl, 'rb') as f:
|
648
|
+
dictionary = pickle.load(f)
|
649
|
+
return dictionary
|
650
|
+
|
651
|
+
####################################
|
652
|
+
# Brain model specific saving functions
|
653
|
+
####################################
|
654
|
+
|
655
|
+
def save_split_dataframes(split_crossdomain_dataframes, crossdomain_dataframes, split, brain_model_name):
|
656
|
+
#def save_split_dataframes(split_crossdomain_dataframes, crossdomain_dataframes, split, brain_model_name,primate_it_num_neurons):
|
657
|
+
'''
|
658
|
+
Concats each split dataframe together to get on single dataframe at the end with all performances over multiple splits and save the current split dataframe
|
659
|
+
|
660
|
+
Arguments:
|
661
|
+
split_crossdomain_dataframes (dict): keys: domain name, values: dataframes with performance for each #Neurons x #Images combination
|
662
|
+
crossdomain_dataframes (dict): keys: domain name, values: dataframes with performance for each #Neurons x #Images combination stored over multiple splits
|
663
|
+
split: number of split
|
664
|
+
brain_model_name: name of brain model
|
665
|
+
Returns:
|
666
|
+
saves split dataframe for each domain
|
667
|
+
crossdomain_dataframes (dict): keys: domain name, values: dataframes with performance for each #Neurons x #Images combination stored over multiple splits
|
668
|
+
|
669
|
+
'''
|
670
|
+
for crossdomain in split_crossdomain_dataframes.keys():
|
671
|
+
crossdomain_dataframes[crossdomain] = pd.concat([crossdomain_dataframes[crossdomain], split_crossdomain_dataframes[crossdomain]], ignore_index=True)
|
672
|
+
# save_dataframe(dataframe=split_crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_split_{split}_num_neurons_primate.csv')
|
673
|
+
# save_dataframe(dataframe=split_crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_split_{split}_penultimate_layer.csv')
|
674
|
+
# save_dataframe(dataframe=split_crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_split_{split}_penultimate_layer_multiple_neurons_{primate_it_num_neurons}_neuron_match.csv')
|
675
|
+
save_dataframe(dataframe=split_crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_split_{split}_it_layer.csv')
|
676
|
+
|
677
|
+
return crossdomain_dataframes
|
678
|
+
|
679
|
+
def save_split_averaged_dataframes(crossdomain_dataframes, neurons_array, images_array, brain_model_name):
|
680
|
+
'''
|
681
|
+
Saves dataframe with perfromance averaged over multiple splits for each domain.
|
682
|
+
|
683
|
+
Arguments:
|
684
|
+
crossdomain_dataframes (dict): keys: domain name, values: dataframes with performance for each #Neurons x #Images combination stored over multiple splits
|
685
|
+
neurons_array: array with the number of training neurons over all splits
|
686
|
+
images_array: array with the number of training images over all splits
|
687
|
+
brain_model_name: name of brain model
|
688
|
+
Returns:
|
689
|
+
saves averaged performance dataframe for each domain
|
690
|
+
'''
|
691
|
+
# Average over all splits. Get mean and standard deviation
|
692
|
+
crossdomain_dataframes_averaged = get_crossdomain_dataframes()
|
693
|
+
for crossdomain in crossdomain_dataframes.keys():
|
694
|
+
crossdomain_dataframes_averaged[crossdomain]['#Neurons'] = np.repeat(neurons_array, len(images_array))
|
695
|
+
crossdomain_dataframes_averaged[crossdomain]['#Images training'] = np.tile(images_array, len(neurons_array))
|
696
|
+
crossdomain_dataframes_averaged[crossdomain]['Accuracy test data'] = crossdomain_dataframes[crossdomain].groupby(['#Neurons', '#Images training']).mean().values
|
697
|
+
crossdomain_dataframes_averaged[crossdomain]['Std test data'] = crossdomain_dataframes[crossdomain].groupby(['#Neurons', '#Images training']).std().values
|
698
|
+
# save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_num_neurons_primate.csv')
|
699
|
+
# save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_penultimate_layer.csv')
|
700
|
+
# save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_penultimate_layer_multiple_neurons.csv')
|
701
|
+
save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_it_layer.csv')
|
702
|
+
|
703
|
+
|
704
|
+
##############################################################
|
705
|
+
##############################################################
|
706
|
+
##############################################################
|
707
|
+
##############################################################
|
708
|
+
##############################################################
|
709
|
+
##############################################################
|
710
|
+
##############################################################
|
711
|
+
##############################################################
|
712
|
+
|
713
|
+
def get_neuron_array_for_single_img(brain_model_name, brain_model_activation, primate_it_number_of_neurons):
|
714
|
+
brain_model_scaling_factor = get_scaling_factor_num_neurons(brain_model=brain_model_name, primate_it_num_neurons=primate_it_number_of_neurons)
|
715
|
+
scaling_factor_multiplier = brain_model_scaling_factor/primate_it_number_of_neurons
|
716
|
+
neuron_arry = np.asarray((10, 20, 30, 40, 50, 71))
|
717
|
+
num_neurons_arry = neuron_arry * scaling_factor_multiplier
|
718
|
+
round_up = np.vectorize(math.ceil)
|
719
|
+
num_neurons_arry = round_up(num_neurons_arry)
|
720
|
+
max_num_neurons_brain_model = len(brain_model_activation.neuroid)
|
721
|
+
if num_neurons_arry[-1] > max_num_neurons_brain_model:
|
722
|
+
num_neurons_arry[-1] = max_num_neurons_brain_model
|
723
|
+
else:
|
724
|
+
num_neurons_arry = np.append(num_neurons_arry, max_num_neurons_brain_model)
|
725
|
+
|
726
|
+
return num_neurons_arry
|
727
|
+
|
728
|
+
def get_performance_splits_and_average_single_image(brain_model_activations, num_images, num_splits, estimator, brain_model_name, num_primate_it_neurons_scaling_factor_matching):
|
729
|
+
'''Saves the real dataframes for each crossdomain and split, the fitted extrapolation parameters for each and the averaged real data performance'''
|
730
|
+
|
731
|
+
# Check dimensionality of NeuroAssembly
|
732
|
+
assert set(brain_model_activations.dims) == {'presentation', 'neuroid'}
|
733
|
+
|
734
|
+
# Load data: get a dctionary with all crossdomain data
|
735
|
+
crossdomain_data_dict = get_crossdomain_data_dictionary(brain_model_activations)
|
736
|
+
# Load dataframes for each crossdomain
|
737
|
+
crossdomain_dataframes = get_crossdomain_dataframes()
|
738
|
+
# Get the correct neuron_array for each brain_model
|
739
|
+
num_neurons_arry = get_neuron_array_for_single_img(brain_model_name, brain_model_activations, primate_it_number_of_neurons=num_primate_it_neurons_scaling_factor_matching)
|
740
|
+
|
741
|
+
# Loop through splits
|
742
|
+
for split in np.arange(num_splits):
|
743
|
+
# Create in each split a new dataframe and save this one
|
744
|
+
split_crossdomain_dataframes = get_crossdomain_dataframes()
|
745
|
+
# Get new test images for each split, want to keep the test images consistent for one split over all images x neurons rounds
|
746
|
+
crossdomain_test_images_dict, complete_training_data = split_training_test_images(crossdomain_data_dictionary=crossdomain_data_dict)
|
747
|
+
|
748
|
+
# Loop through the number of neurons
|
749
|
+
for num_neurons in tqdm(num_neurons_arry, desc='Neurons'):
|
750
|
+
# Round the number of units up
|
751
|
+
num_neurons = math.ceil(num_neurons)
|
752
|
+
# Sample final training data with the right number of neurons & images
|
753
|
+
final_training_data, neuron_indices = get_final_traning_data(complete_training_data=complete_training_data, num_images_training=num_images,
|
754
|
+
num_neurons=num_neurons)
|
755
|
+
# Train the decoder #
|
756
|
+
split_decoder, _ = get_decoder(data=final_training_data, estimator=estimator)
|
757
|
+
# Get the final testing data with the correct number of neurons
|
758
|
+
final_test_data_dict = get_final_testing_data(crossdomain_test_images_dictionary=crossdomain_test_images_dict, neuron_indices=neuron_indices)
|
759
|
+
# Get the test accuracy and store it in the split dataframe
|
760
|
+
split_crossdomain_dataframes = add_accuracies_to_split_df(final_test_data_dictionary=final_test_data_dict, decoder=split_decoder,
|
761
|
+
split_dataframe=split_crossdomain_dataframes, num_neurons=num_neurons, num_training_images=num_images)
|
762
|
+
|
763
|
+
crossdomain_dataframes = save_split_dataframes(split_crossdomain_dataframes=split_crossdomain_dataframes, crossdomain_dataframes=crossdomain_dataframes, split=split,
|
764
|
+
brain_model_name=brain_model_name, primate_it_num_neurons=num_primate_it_neurons_scaling_factor_matching)
|
765
|
+
|
766
|
+
save_split_averaged_dataframes_single_image(crossdomain_dataframes=crossdomain_dataframes, neurons_array=num_neurons_arry, image_num=num_images, brain_model_name=brain_model_name, primate_it_num_neurons=num_primate_it_neurons_scaling_factor_matching)
|
767
|
+
|
768
|
+
def get_scaling_factor_num_neurons(brain_model, primate_it_num_neurons):
|
769
|
+
if primate_it_num_neurons == None:
|
770
|
+
neuron_dict = open_pkl(filename_pkl='Deep_nets_crossdomain_performance_scaling_factors_penultimate_layer.pkl')
|
771
|
+
else:
|
772
|
+
neuron_dict = open_pkl(filename_pkl=f'Deep_nets_crossdomain_performance_scaling_factors_penultimate_layer_{primate_it_num_neurons}_neuron_match.pkl')
|
773
|
+
brain_model_num_neurons = neuron_dict[brain_model]
|
774
|
+
return brain_model_num_neurons
|
775
|
+
|
776
|
+
|
777
|
+
def get_performance_splits_and_average_single_neuron_image(brain_model_activations, num_images, num_splits, estimator, brain_model_name, num_primate_it_neurons_for_scaling_factor_match):
|
778
|
+
'''Saves the real dataframes for each crossdomain and split, the fitted extrapolation parameters for each and the averaged real data performance'''
|
779
|
+
# Check dimensionality of NeuroAssembly
|
780
|
+
assert set(brain_model_activations.dims) == {'presentation', 'neuroid'}
|
781
|
+
|
782
|
+
# Load data: get a dctionary with all crossdomain data
|
783
|
+
crossdomain_data_dict = get_crossdomain_data_dictionary(brain_model_activations)
|
784
|
+
# Load dataframes for each crossdomain
|
785
|
+
crossdomain_dataframes = get_crossdomain_dataframes(single_neuron_image=True)
|
786
|
+
# Get the correct neuron_array for each brain_model
|
787
|
+
num_neurons = get_scaling_factor_num_neurons(brain_model=brain_model_name, primate_it_num_neurons=num_primate_it_neurons_for_scaling_factor_match)
|
788
|
+
|
789
|
+
# Loop through the kfold splits
|
790
|
+
for split in np.arange(num_splits):
|
791
|
+
# Get new test images for each split
|
792
|
+
crossdomain_test_images_dict, complete_training_data = split_training_test_images(crossdomain_data_dictionary=crossdomain_data_dict)
|
793
|
+
|
794
|
+
# Sample final training data with the right number of neurons & images
|
795
|
+
final_training_data, neuron_indices = get_final_traning_data(complete_training_data=complete_training_data, num_images_training=num_images,
|
796
|
+
num_neurons=num_neurons)
|
797
|
+
# Train the decoder #
|
798
|
+
split_decoder, _ = get_decoder(data=final_training_data, estimator=estimator)
|
799
|
+
# Get the final testing data with the correct number of neurons
|
800
|
+
final_test_data_dict = get_final_testing_data(crossdomain_test_images_dictionary=crossdomain_test_images_dict, neuron_indices=neuron_indices)
|
801
|
+
|
802
|
+
# Get the test accuracy and store it in the split dataframe
|
803
|
+
crossdomain_dataframes = add_accuracies_to_split_df_single_neuron_image(final_test_data_dictionary=final_test_data_dict, decoder=split_decoder,
|
804
|
+
split_dataframe=crossdomain_dataframes, num_neurons=num_neurons, num_training_images=num_images,
|
805
|
+
split_num=split)
|
806
|
+
|
807
|
+
save_dataframes_single_neuron_image(crossdomain_dataframes=crossdomain_dataframes, brain_model_name=brain_model_name)
|
808
|
+
|
809
|
+
def save_dataframes_single_neuron_image(crossdomain_dataframes, brain_model_name):
|
810
|
+
for crossdomain in crossdomain_dataframes.keys():
|
811
|
+
save_dataframe(dataframe=crossdomain_dataframes[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_scaling_factor_penultimate_layer_all_splits.csv')
|
812
|
+
|
813
|
+
|
814
|
+
|
815
|
+
|
816
|
+
|
817
|
+
def save_split_averaged_dataframes_single_image(crossdomain_dataframes, neurons_array, image_num, brain_model_name, primate_it_num_neurons):
|
818
|
+
# Average over all splits. Get mean and standard deviation
|
819
|
+
crossdomain_dataframes_averaged = get_crossdomain_dataframes()
|
820
|
+
for crossdomain in crossdomain_dataframes.keys():
|
821
|
+
crossdomain_dataframes_averaged[crossdomain]['#Neurons'] = neurons_array
|
822
|
+
crossdomain_dataframes_averaged[crossdomain]['#Images training'] = np.repeat(image_num, repeats=len(neurons_array))
|
823
|
+
crossdomain_dataframes_averaged[crossdomain]['Accuracy test data'] = crossdomain_dataframes[crossdomain].groupby(['#Neurons', '#Images training']).mean().values
|
824
|
+
crossdomain_dataframes_averaged[crossdomain]['Std test data'] = crossdomain_dataframes[crossdomain].groupby(['#Neurons', '#Images training']).std().values
|
825
|
+
save_dataframe(dataframe=crossdomain_dataframes_averaged[crossdomain], csv_dataframe_name=f'Deep_nets_performance_hvm_{crossdomain}_{brain_model_name}_averaged_performance_penultimate_layer_multiple_neurons_{primate_it_num_neurons}_neuron_match.csv')
|
826
|
+
|
827
|
+
|
828
|
+
####################################
|
829
|
+
# Data handling functions
|
830
|
+
#################################
|
831
|
+
|
832
|
+
def add_accuracies_to_split_df_single_neuron_image(final_test_data_dictionary, decoder, split_dataframe, num_neurons, num_training_images, split_num):
|
833
|
+
# Get and store the test accuracy for each crossdomain
|
834
|
+
for crossdomain in final_test_data_dictionary.keys():
|
835
|
+
test_accuracy = get_classifier_score_2AFC(classifier=decoder, data=final_test_data_dictionary[crossdomain])
|
836
|
+
crossdomain_df = split_dataframe[crossdomain]
|
837
|
+
# Fill dataframe
|
838
|
+
crossdomain_df = crossdomain_df.append({
|
839
|
+
'#Neurons': num_neurons,
|
840
|
+
'#Images training': num_training_images,
|
841
|
+
'Accuracy test data': test_accuracy,
|
842
|
+
'Split number': split_num
|
843
|
+
}, ignore_index=True)
|
844
|
+
split_dataframe[crossdomain] = crossdomain_df
|
845
|
+
|
846
|
+
return split_dataframe
|
847
|
+
|
848
|
+
|
849
|
+
|