brainscore-vision 2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/__init__.py +105 -0
- brainscore_vision/__main__.py +20 -0
- brainscore_vision/benchmark_helpers/__init__.py +67 -0
- brainscore_vision/benchmark_helpers/neural_common.py +70 -0
- brainscore_vision/benchmark_helpers/properties_common.py +424 -0
- brainscore_vision/benchmark_helpers/screen.py +126 -0
- brainscore_vision/benchmark_helpers/test_helper.py +160 -0
- brainscore_vision/benchmarks/README.md +7 -0
- brainscore_vision/benchmarks/__init__.py +122 -0
- brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
- brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
- brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
- brainscore_vision/benchmarks/baker2022/test.py +90 -0
- brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
- brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
- brainscore_vision/benchmarks/bmd2024/test.py +29 -0
- brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
- brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
- brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
- brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
- brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
- brainscore_vision/benchmarks/cadena2017/test.py +35 -0
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
- brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
- brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
- brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
- brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
- brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
- brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
- brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
- brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
- brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
- brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
- brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
- brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
- brainscore_vision/benchmarks/hebart2023/test.py +19 -0
- brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
- brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
- brainscore_vision/benchmarks/hermann2020/test.py +28 -0
- brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
- brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
- brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
- brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
- brainscore_vision/benchmarks/imagenet/test.py +32 -0
- brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
- brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
- brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
- brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
- brainscore_vision/benchmarks/islam2021/test.py +47 -0
- brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
- brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
- brainscore_vision/benchmarks/kar2019/test.py +93 -0
- brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
- brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
- brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
- brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
- brainscore_vision/benchmarks/malania2007/test.py +64 -0
- brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
- brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
- brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
- brainscore_vision/benchmarks/marques2020/test.py +135 -0
- brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
- brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
- brainscore_vision/benchmarks/objectnet/test.py +33 -0
- brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
- brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
- brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
- brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
- brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
- brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
- brainscore_vision/benchmarks/scialom2024/test.py +162 -0
- brainscore_vision/data/__init__.py +0 -0
- brainscore_vision/data/baker2022/__init__.py +40 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
- brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
- brainscore_vision/data/baker2022/test.py +135 -0
- brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
- brainscore_vision/data/barbumayo2019/__init__.py +23 -0
- brainscore_vision/data/barbumayo2019/test.py +10 -0
- brainscore_vision/data/bashivankar2019/__init__.py +52 -0
- brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
- brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
- brainscore_vision/data/bashivankar2019/test.py +15 -0
- brainscore_vision/data/bmd2024/__init__.py +69 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
- brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
- brainscore_vision/data/bmd2024/test.py +130 -0
- brainscore_vision/data/bracci2019/__init__.py +36 -0
- brainscore_vision/data/bracci2019/data_packaging.py +221 -0
- brainscore_vision/data/bracci2019/test.py +16 -0
- brainscore_vision/data/cadena2017/__init__.py +52 -0
- brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
- brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
- brainscore_vision/data/cadena2017/test.py +24 -0
- brainscore_vision/data/cichy2019/__init__.py +38 -0
- brainscore_vision/data/cichy2019/test.py +8 -0
- brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
- brainscore_vision/data/coggan2024_behavior/test.py +32 -0
- brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
- brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/data/david2004/__init__.py +34 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
- brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
- brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
- brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
- brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
- brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
- brainscore_vision/data/david2004/test.py +8 -0
- brainscore_vision/data/deng2009/__init__.py +22 -0
- brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
- brainscore_vision/data/deng2009/test.py +9 -0
- brainscore_vision/data/ferguson2024/__init__.py +401 -0
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
- brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
- brainscore_vision/data/ferguson2024/requirements.txt +2 -0
- brainscore_vision/data/ferguson2024/test.py +155 -0
- brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
- brainscore_vision/data/freemanziemba2013/test.py +97 -0
- brainscore_vision/data/geirhos2021/__init__.py +358 -0
- brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
- brainscore_vision/data/geirhos2021/test.py +330 -0
- brainscore_vision/data/hebart2023/__init__.py +23 -0
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
- brainscore_vision/data/hebart2023/test.py +42 -0
- brainscore_vision/data/hendrycks2019/__init__.py +45 -0
- brainscore_vision/data/hendrycks2019/test.py +26 -0
- brainscore_vision/data/igustibagus2024/__init__.py +23 -0
- brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
- brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
- brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
- brainscore_vision/data/igustibagus2024/test.py +26 -0
- brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
- brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
- brainscore_vision/data/imagenetslim15000/test.py +8 -0
- brainscore_vision/data/islam2021/__init__.py +18 -0
- brainscore_vision/data/islam2021/data_packaging.py +64 -0
- brainscore_vision/data/islam2021/test.py +11 -0
- brainscore_vision/data/kar2018/__init__.py +58 -0
- brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
- brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
- brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
- brainscore_vision/data/kar2018/test.py +10 -0
- brainscore_vision/data/kar2019/__init__.py +43 -0
- brainscore_vision/data/kar2019/data_packaging.py +116 -0
- brainscore_vision/data/kar2019/test.py +8 -0
- brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
- brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
- brainscore_vision/data/kuzovkin2018/test.py +8 -0
- brainscore_vision/data/majajhong2015/__init__.py +113 -0
- brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
- brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
- brainscore_vision/data/majajhong2015/test.py +38 -0
- brainscore_vision/data/malania2007/__init__.py +254 -0
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
- brainscore_vision/data/malania2007/test.py +147 -0
- brainscore_vision/data/maniquet2024/__init__.py +57 -0
- brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
- brainscore_vision/data/maniquet2024/test.py +16 -0
- brainscore_vision/data/marques2020/__init__.py +123 -0
- brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
- brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
- brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
- brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
- brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
- brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
- brainscore_vision/data/marques2020/test.py +54 -0
- brainscore_vision/data/rajalingham2018/__init__.py +56 -0
- brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
- brainscore_vision/data/rajalingham2018/test.py +10 -0
- brainscore_vision/data/rajalingham2020/__init__.py +39 -0
- brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
- brainscore_vision/data/rajalingham2020/test.py +8 -0
- brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
- brainscore_vision/data/rust2012/__init__.py +45 -0
- brainscore_vision/data/rust2012/rust305.py +35 -0
- brainscore_vision/data/rust2012/test.py +47 -0
- brainscore_vision/data/sanghavi2020/__init__.py +119 -0
- brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
- brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
- brainscore_vision/data/sanghavi2020/test.py +13 -0
- brainscore_vision/data/scialom2024/__init__.py +386 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
- brainscore_vision/data/scialom2024/test.py +301 -0
- brainscore_vision/data/seibert2019/__init__.py +25 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
- brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
- brainscore_vision/data/seibert2019/test.py +35 -0
- brainscore_vision/data/zhang2018/__init__.py +38 -0
- brainscore_vision/data/zhang2018/test.py +29 -0
- brainscore_vision/data_helpers/__init__.py +0 -0
- brainscore_vision/data_helpers/lookup_legacy.py +15 -0
- brainscore_vision/data_helpers/s3.py +79 -0
- brainscore_vision/metric_helpers/__init__.py +5 -0
- brainscore_vision/metric_helpers/temporal.py +119 -0
- brainscore_vision/metric_helpers/transformations.py +379 -0
- brainscore_vision/metric_helpers/utils.py +71 -0
- brainscore_vision/metric_helpers/xarray_utils.py +151 -0
- brainscore_vision/metrics/__init__.py +7 -0
- brainscore_vision/metrics/accuracy/__init__.py +4 -0
- brainscore_vision/metrics/accuracy/metric.py +16 -0
- brainscore_vision/metrics/accuracy/test.py +11 -0
- brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
- brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
- brainscore_vision/metrics/accuracy_distance/test.py +57 -0
- brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
- brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
- brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
- brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
- brainscore_vision/metrics/cka/__init__.py +14 -0
- brainscore_vision/metrics/cka/metric.py +105 -0
- brainscore_vision/metrics/cka/test.py +28 -0
- brainscore_vision/metrics/dimensionality/__init__.py +13 -0
- brainscore_vision/metrics/dimensionality/metric.py +45 -0
- brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
- brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
- brainscore_vision/metrics/distribution_similarity/test.py +10 -0
- brainscore_vision/metrics/error_consistency/__init__.py +13 -0
- brainscore_vision/metrics/error_consistency/metric.py +93 -0
- brainscore_vision/metrics/error_consistency/test.py +39 -0
- brainscore_vision/metrics/i1i2/__init__.py +16 -0
- brainscore_vision/metrics/i1i2/metric.py +299 -0
- brainscore_vision/metrics/i1i2/requirements.txt +2 -0
- brainscore_vision/metrics/i1i2/test.py +36 -0
- brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
- brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
- brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
- brainscore_vision/metrics/internal_consistency/test.py +39 -0
- brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
- brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
- brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
- brainscore_vision/metrics/mask_regression/__init__.py +16 -0
- brainscore_vision/metrics/mask_regression/metric.py +242 -0
- brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
- brainscore_vision/metrics/mask_regression/test.py +0 -0
- brainscore_vision/metrics/ost/__init__.py +23 -0
- brainscore_vision/metrics/ost/metric.py +350 -0
- brainscore_vision/metrics/ost/requirements.txt +2 -0
- brainscore_vision/metrics/ost/test.py +0 -0
- brainscore_vision/metrics/rdm/__init__.py +14 -0
- brainscore_vision/metrics/rdm/metric.py +101 -0
- brainscore_vision/metrics/rdm/requirements.txt +2 -0
- brainscore_vision/metrics/rdm/test.py +63 -0
- brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
- brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
- brainscore_vision/metrics/regression_correlation/metric.py +125 -0
- brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
- brainscore_vision/metrics/regression_correlation/test.py +36 -0
- brainscore_vision/metrics/threshold/__init__.py +5 -0
- brainscore_vision/metrics/threshold/metric.py +481 -0
- brainscore_vision/metrics/threshold/test.py +71 -0
- brainscore_vision/metrics/value_delta/__init__.py +4 -0
- brainscore_vision/metrics/value_delta/metric.py +30 -0
- brainscore_vision/metrics/value_delta/requirements.txt +1 -0
- brainscore_vision/metrics/value_delta/test.py +40 -0
- brainscore_vision/model_helpers/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/__init__.py +1 -0
- brainscore_vision/model_helpers/activations/core.py +635 -0
- brainscore_vision/model_helpers/activations/pca.py +117 -0
- brainscore_vision/model_helpers/activations/pytorch.py +152 -0
- brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
- brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
- brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
- brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
- brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
- brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
- brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
- brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
- brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
- brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
- brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
- brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
- brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
- brainscore_vision/model_helpers/conftest.py +3 -0
- brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
- brainscore_vision/model_helpers/s3.py +62 -0
- brainscore_vision/model_helpers/utils/__init__.py +15 -0
- brainscore_vision/model_helpers/utils/s3.py +42 -0
- brainscore_vision/model_interface.py +214 -0
- brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
- brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
- brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
- brainscore_vision/models/AlexNet_SIN/model.py +29 -0
- brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
- brainscore_vision/models/AlexNet_SIN/test.py +1 -0
- brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
- brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
- brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
- brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
- brainscore_vision/models/__init__.py +0 -0
- brainscore_vision/models/alexnet/__init__.py +8 -0
- brainscore_vision/models/alexnet/model.py +28 -0
- brainscore_vision/models/alexnet/requirements.txt +2 -0
- brainscore_vision/models/alexnet/test.py +15 -0
- brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
- brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
- brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
- brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
- brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
- brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
- brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
- brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
- brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
- brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
- brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
- brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
- brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
- brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
- brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
- brainscore_vision/models/alexnet_testing/__init__.py +8 -0
- brainscore_vision/models/alexnet_testing/model.py +28 -0
- brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
- brainscore_vision/models/alexnet_testing/setup.py +24 -0
- brainscore_vision/models/alexnet_testing/test.py +15 -0
- brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
- brainscore_vision/models/antialias_resnet152/model.py +35 -0
- brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
- brainscore_vision/models/antialias_resnet152/test.py +8 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
- brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/clip/__init__.py +5 -0
- brainscore_vision/models/clip/model.py +179 -0
- brainscore_vision/models/clip/requirements.txt +4 -0
- brainscore_vision/models/clip/test.py +1 -0
- brainscore_vision/models/clipvision/__init__.py +5 -0
- brainscore_vision/models/clipvision/model.py +179 -0
- brainscore_vision/models/clipvision/requirements.txt +4 -0
- brainscore_vision/models/clipvision/test.py +1 -0
- brainscore_vision/models/cornet_s/__init__.py +8 -0
- brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
- brainscore_vision/models/cornet_s/model.py +77 -0
- brainscore_vision/models/cornet_s/requirements.txt +7 -0
- brainscore_vision/models/cornet_s/test.py +8 -0
- brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
- brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
- brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
- brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
- brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
- brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
- brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
- brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
- brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
- brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
- brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
- brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
- brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
- brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
- brainscore_vision/models/effnetb1_272x240/model.py +126 -0
- brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
- brainscore_vision/models/effnetb1_272x240/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
- brainscore_vision/models/hmax/__init__.py +7 -0
- brainscore_vision/models/hmax/helpers/hmax.py +438 -0
- brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
- brainscore_vision/models/hmax/model.py +69 -0
- brainscore_vision/models/hmax/requirements.txt +5 -0
- brainscore_vision/models/hmax/test.py +8 -0
- brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
- brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
- brainscore_vision/models/mobilevit_small/__init__.py +7 -0
- brainscore_vision/models/mobilevit_small/model.py +49 -0
- brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
- brainscore_vision/models/mobilevit_small/test.py +8 -0
- brainscore_vision/models/pixels/__init__.py +8 -0
- brainscore_vision/models/pixels/model.py +35 -0
- brainscore_vision/models/pixels/test.py +15 -0
- brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
- brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
- brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r50_tvpt/__init__.py +9 -0
- brainscore_vision/models/r50_tvpt/model.py +47 -0
- brainscore_vision/models/r50_tvpt/setup.py +24 -0
- brainscore_vision/models/r50_tvpt/test.py +1 -0
- brainscore_vision/models/regnet/__init__.py +14 -0
- brainscore_vision/models/regnet/model.py +17 -0
- brainscore_vision/models/regnet/requirements.txt +2 -0
- brainscore_vision/models/regnet/test.py +17 -0
- brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
- brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
- brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
- brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
- brainscore_vision/models/resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/resnet50_julios/model.py +54 -0
- brainscore_vision/models/resnet50_julios/setup.py +24 -0
- brainscore_vision/models/resnet50_julios/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
- brainscore_vision/models/resnet50_tutorial/model.py +34 -0
- brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
- brainscore_vision/models/resnet50_tutorial/test.py +8 -0
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
- brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
- brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/resnet_50_robust/model.py +55 -0
- brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
- brainscore_vision/models/resnet_50_robust/test.py +8 -0
- brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
- brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
- brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
- brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
- brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
- brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
- brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
- brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
- brainscore_vision/models/temporal_model_GDT/model.py +72 -0
- brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_GDT/test.py +17 -0
- brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
- brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
- brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
- brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
- brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
- brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
- brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
- brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
- brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
- brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
- brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
- brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
- brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
- brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
- brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
- brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
- brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
- brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
- brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
- brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
- brainscore_vision/models/temporal_model_openstl/model.py +206 -0
- brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_openstl/test.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
- brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
- brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
- brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
- brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
- brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
- brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
- brainscore_vision/submission/__init__.py +0 -0
- brainscore_vision/submission/actions_helpers.py +153 -0
- brainscore_vision/submission/config.py +7 -0
- brainscore_vision/submission/endpoints.py +58 -0
- brainscore_vision/utils/__init__.py +91 -0
- brainscore_vision-2.1.dist-info/LICENSE +11 -0
- brainscore_vision-2.1.dist-info/METADATA +152 -0
- brainscore_vision-2.1.dist-info/RECORD +1009 -0
- brainscore_vision-2.1.dist-info/WHEEL +5 -0
- brainscore_vision-2.1.dist-info/top_level.txt +4 -0
- docs/Makefile +20 -0
- docs/source/conf.py +78 -0
- docs/source/index.rst +21 -0
- docs/source/modules/api_reference.rst +10 -0
- docs/source/modules/benchmarks.rst +8 -0
- docs/source/modules/brainscore_submission.png +0 -0
- docs/source/modules/developer_clarifications.rst +36 -0
- docs/source/modules/metrics.rst +8 -0
- docs/source/modules/model_interface.rst +8 -0
- docs/source/modules/submission.rst +112 -0
- docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
- docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
- docs/source/modules/tutorial_screenshots/init_py.png +0 -0
- docs/source/modules/tutorial_screenshots/mms.png +0 -0
- docs/source/modules/tutorial_screenshots/setup.png +0 -0
- docs/source/modules/tutorial_screenshots/sms.png +0 -0
- docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
- docs/source/modules/utils.rst +22 -0
- migrations/2020-12-20_pkl_to_nc.py +90 -0
- tests/__init__.py +6 -0
- tests/conftest.py +26 -0
- tests/test_benchmark_helpers/__init__.py +0 -0
- tests/test_benchmark_helpers/test_screen.py +75 -0
- tests/test_examples.py +41 -0
- tests/test_integration.py +43 -0
- tests/test_metric_helpers/__init__.py +0 -0
- tests/test_metric_helpers/test_temporal.py +80 -0
- tests/test_metric_helpers/test_transformations.py +171 -0
- tests/test_metric_helpers/test_xarray_utils.py +85 -0
- tests/test_model_helpers/__init__.py +6 -0
- tests/test_model_helpers/activations/__init__.py +0 -0
- tests/test_model_helpers/activations/test___init__.py +404 -0
- tests/test_model_helpers/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
- tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
- tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
- tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
- tests/test_model_helpers/temporal/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
- tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
- tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
- tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
- tests/test_model_helpers/temporal/test_utils.py +61 -0
- tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
- tests/test_model_helpers/test_imports.py +10 -0
- tests/test_model_helpers/test_s3.py +38 -0
- tests/test_models.py +15 -0
- tests/test_stimuli.py +0 -0
- tests/test_submission/__init__.py +0 -0
- tests/test_submission/mock_config.py +3 -0
- tests/test_submission/test_actions_helpers.py +67 -0
- tests/test_submission/test_db.py +54 -0
- tests/test_submission/test_endpoints.py +125 -0
- tests/test_utils.py +21 -0
@@ -0,0 +1,66 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import pytest
|
3
|
+
|
4
|
+
from brainio.assemblies import NeuroidAssembly
|
5
|
+
from brainscore_vision.model_helpers.brain_transformation import TemporalAligned
|
6
|
+
from brainscore_vision.model_interface import BrainModel
|
7
|
+
|
8
|
+
|
9
|
+
class LayerMappedModelMock:
|
10
|
+
def __init__(self, output_temporal=False):
|
11
|
+
# attributes that TemporalAligned copies for outside use
|
12
|
+
self.region_layer_map = None
|
13
|
+
self.activations_model = None
|
14
|
+
self.start_task = None
|
15
|
+
self.output_temporal = output_temporal
|
16
|
+
|
17
|
+
def start_recording(self, *args, **kwargs):
|
18
|
+
pass
|
19
|
+
|
20
|
+
def look_at(self, *args, **kwargs):
|
21
|
+
if self.output_temporal:
|
22
|
+
return NeuroidAssembly([[[1, 1], [2, 2], [3, 3]], [[1, 1], [2, 2], [3, 3]]],
|
23
|
+
coords={'stimulus_id': ('presentation', ['image1', 'image2']),
|
24
|
+
'object_name': (
|
25
|
+
'presentation', ['number', 'number']),
|
26
|
+
'neuroid_id': ('neuroid', [1, 2, 3]),
|
27
|
+
'region': ('neuroid', ['IT'] * 3),
|
28
|
+
'time_bin_start': ('time_bin', [0, 150]),
|
29
|
+
'time_bin_end': ('time_bin', [150, 300]), },
|
30
|
+
dims=['presentation', 'neuroid', 'time_bin'])
|
31
|
+
else:
|
32
|
+
return NeuroidAssembly([[1, 2, 3], [1, 2, 3]], coords={'stimulus_id': ('presentation', ['image1', 'image2']),
|
33
|
+
'object_name': (
|
34
|
+
'presentation', ['number', 'number']),
|
35
|
+
'neuroid_id': ('neuroid', [1, 2, 3]),
|
36
|
+
'region': ('neuroid', ['IT'] * 3), },
|
37
|
+
dims=['presentation', 'neuroid'])
|
38
|
+
|
39
|
+
|
40
|
+
class TestTemporalAligned:
|
41
|
+
@pytest.mark.parametrize('output_temporal', [False, True])
|
42
|
+
def test_single_timebin(self, output_temporal):
|
43
|
+
model = TemporalAligned(layer_model=LayerMappedModelMock(output_temporal))
|
44
|
+
model.start_recording(recording_target=BrainModel.RecordingTarget.IT, time_bins=[(70, 170)])
|
45
|
+
recordings = model.look_at('dummy')
|
46
|
+
assert set(recordings.dims) == {'presentation', 'neuroid'} # squeezed time-bin
|
47
|
+
|
48
|
+
@pytest.mark.parametrize('output_temporal', [False, True])
|
49
|
+
def test_two_timebins(self, output_temporal):
|
50
|
+
layer_model = LayerMappedModelMock(output_temporal)
|
51
|
+
model = TemporalAligned(layer_model=layer_model)
|
52
|
+
model.start_recording(recording_target=BrainModel.RecordingTarget.IT, time_bins=[(70, 170), (170, 270)])
|
53
|
+
recordings = model.look_at('dummy')
|
54
|
+
assert set(recordings.dims) == {'presentation', 'neuroid', 'time_bin'}
|
55
|
+
np.testing.assert_array_equal(recordings['time_bin_start'].values, [70, 170])
|
56
|
+
np.testing.assert_array_equal(recordings['time_bin_end'].values, [170, 270])
|
57
|
+
|
58
|
+
@pytest.mark.parametrize('output_temporal', [False, True])
|
59
|
+
def test_18_timebins(self, output_temporal):
|
60
|
+
model = TemporalAligned(layer_model=LayerMappedModelMock(output_temporal))
|
61
|
+
time_bins = [(70 + i * 10, 80 + i * 10) for i in range(18)]
|
62
|
+
model.start_recording(recording_target=BrainModel.RecordingTarget.IT, time_bins=time_bins)
|
63
|
+
recordings = model.look_at('dummy')
|
64
|
+
assert set(recordings.dims) == {'presentation', 'neuroid', 'time_bin'}
|
65
|
+
np.testing.assert_array_equal(recordings['time_bin_start'].values, [start for start, end in time_bins])
|
66
|
+
np.testing.assert_array_equal(recordings['time_bin_end'].values, [end for start, end in time_bins])
|
File without changes
|
File without changes
|
@@ -0,0 +1,96 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import os
|
3
|
+
import pytest
|
4
|
+
|
5
|
+
from brainio.stimuli import StimulusSet
|
6
|
+
from brainscore_vision.model_helpers.activations.temporal.inputs.base import Stimulus
|
7
|
+
from brainscore_vision.model_helpers.activations.temporal.model import ActivationWrapper
|
8
|
+
from brainscore_vision.model_helpers.activations.temporal.core import TemporalInferencer, CausalInferencer
|
9
|
+
from collections import OrderedDict
|
10
|
+
|
11
|
+
|
12
|
+
"""This module tests model_helpers.activations.temporal.core.extractor using the default settings"""
|
13
|
+
|
14
|
+
|
15
|
+
video_paths = [
|
16
|
+
os.path.join(os.path.dirname(__file__), "..", "dots1.mp4"),
|
17
|
+
os.path.join(os.path.dirname(__file__), "..", "dots2.mp4"),
|
18
|
+
]
|
19
|
+
video_durations = [2000, 6000]
|
20
|
+
img_path = os.path.join(os.path.dirname(__file__), "../../activations/rgb.jpg")
|
21
|
+
fps = 1
|
22
|
+
|
23
|
+
|
24
|
+
def get_fake_models(causal=False, **kwargs):
|
25
|
+
def transform_video(video):
|
26
|
+
frames = video.to_numpy()[:, :12, :12]
|
27
|
+
return frames
|
28
|
+
|
29
|
+
class FakeActivationWrapper(ActivationWrapper):
|
30
|
+
def __init__(self, **kwargs):
|
31
|
+
super().__init__("dummy", transform_video, **kwargs)
|
32
|
+
|
33
|
+
def get_activations(self, inputs, layers):
|
34
|
+
ret = OrderedDict()
|
35
|
+
for layer in layers:
|
36
|
+
ret[layer] = np.stack(inputs)
|
37
|
+
return ret
|
38
|
+
|
39
|
+
layer_activation_format = {**{f'layer{i}': "THWC" for i in range(1, 3)}}
|
40
|
+
|
41
|
+
inferencer_cls = TemporalInferencer if not causal else CausalInferencer
|
42
|
+
if inferencer_cls is CausalInferencer:
|
43
|
+
kwargs['duration'] = (0, 3000)
|
44
|
+
wrapper = FakeActivationWrapper(inferencer_cls=inferencer_cls, fps=fps,
|
45
|
+
layer_activation_format=layer_activation_format, max_workers=1, batch_size=4, **kwargs)
|
46
|
+
layers = list(layer_activation_format.keys())
|
47
|
+
return wrapper, layers
|
48
|
+
|
49
|
+
|
50
|
+
@pytest.mark.memory_intense
|
51
|
+
@pytest.mark.parametrize(["causal", "padding", "time_align"], [(False, True, "ignore_time"), (True, False, "per_frame_aligned")])
|
52
|
+
def test_from_video_path(causal, padding, time_align):
|
53
|
+
video_names = ["dots1.mp4", "dots2.mp4"] # 2s vs 6s
|
54
|
+
stimuli_paths = video_paths
|
55
|
+
|
56
|
+
activations_extractor, layers = get_fake_models(causal=causal,
|
57
|
+
batch_padding=padding, time_alignment=time_align)
|
58
|
+
activations = activations_extractor.from_paths(stimuli_paths=stimuli_paths,
|
59
|
+
layers=layers)
|
60
|
+
|
61
|
+
assert activations is not None
|
62
|
+
assert len(activations['stimulus_path']) == 2
|
63
|
+
assert len(np.unique(activations['layer'])) == len(layers)
|
64
|
+
|
65
|
+
expected_num_time_bins = 6 * fps
|
66
|
+
if causal:
|
67
|
+
assert activations.sizes['time_bin'] == expected_num_time_bins
|
68
|
+
|
69
|
+
import gc
|
70
|
+
gc.collect() # free some memory, we're piling up a lot of activations at this point
|
71
|
+
|
72
|
+
|
73
|
+
def _build_stimulus_set(video_names):
|
74
|
+
stimulus_set = StimulusSet([{'stimulus_id': video_name, 'some_meta': video_name[::-1]}
|
75
|
+
for video_name in video_names])
|
76
|
+
stimulus_set.stimulus_paths = {video_name: path
|
77
|
+
for video_name, path in zip(video_names, video_paths)}
|
78
|
+
return stimulus_set
|
79
|
+
|
80
|
+
|
81
|
+
@pytest.mark.memory_intense
|
82
|
+
@pytest.mark.parametrize(["causal", "padding"], [(False, True), (True, False)])
|
83
|
+
def test_from_stimulus_set(causal, padding):
|
84
|
+
video_names = ["dots1.mp4", "dots2.mp4"]
|
85
|
+
stimulus_set = _build_stimulus_set(video_names)
|
86
|
+
|
87
|
+
activations_extractor, layers = get_fake_models(causal=causal, batch_padding=padding)
|
88
|
+
activations = activations_extractor(stimulus_set, layers=layers)
|
89
|
+
|
90
|
+
assert activations is not None
|
91
|
+
assert set(activations['stimulus_id'].values) == set(video_names)
|
92
|
+
assert all(activations['some_meta'].values == [video_name[::-1] for video_name in video_names])
|
93
|
+
assert len(np.unique(activations['layer'])) == len(layers)
|
94
|
+
|
95
|
+
import gc
|
96
|
+
gc.collect() # free some memory, we're piling up a lot of activations at this point
|
@@ -0,0 +1,189 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import os
|
3
|
+
import pytest
|
4
|
+
|
5
|
+
from collections import OrderedDict
|
6
|
+
|
7
|
+
from brainscore_vision.model_helpers.activations.temporal.core import Inferencer, TemporalInferencer, CausalInferencer, BlockInferencer
|
8
|
+
from brainscore_vision.model_helpers.activations.temporal.inputs import Video, Stimulus
|
9
|
+
|
10
|
+
|
11
|
+
"""This module tests model_helpers.activations.temporal.core.inferencer
|
12
|
+
|
13
|
+
Different inferencers are tested:
|
14
|
+
- Inferencer: the basic inferencer that does not enforce any temporal context
|
15
|
+
- TemporalInferencer: the inferencer that aligns the activations to the video time
|
16
|
+
- CausalInferencer: the inferencer that ensures the activations are causal
|
17
|
+
- BlockInferencer: the inferencer that divides the video into blocks and infer the activations for each block
|
18
|
+
"""
|
19
|
+
|
20
|
+
|
21
|
+
video_paths = [
|
22
|
+
os.path.join(os.path.dirname(__file__), "..", "dots1.mp4"),
|
23
|
+
os.path.join(os.path.dirname(__file__), "..", "dots2.mp4"),
|
24
|
+
]
|
25
|
+
video_durations = [2000, 6000]
|
26
|
+
img_path = os.path.join(os.path.dirname(__file__), "../../activations/rgb.jpg")
|
27
|
+
|
28
|
+
|
29
|
+
def dummy_get_features(model_inputs, layers):
|
30
|
+
feature = np.stack(model_inputs)
|
31
|
+
B, F, H, W, C = feature.shape
|
32
|
+
feature = feature.reshape(B, F, H//80, 80, W//80, 80, C).mean((3, 5))[..., :2] # BFHWC=B,F,6,3,2
|
33
|
+
batch_activation = OrderedDict({
|
34
|
+
"layer1": feature,
|
35
|
+
"layer2": feature[:, 0, 0, 0]
|
36
|
+
})
|
37
|
+
return batch_activation
|
38
|
+
|
39
|
+
def dummy_preprocess(video):
|
40
|
+
feature = video.to_numpy()[:, 200:680, 200:440, :]
|
41
|
+
return feature
|
42
|
+
|
43
|
+
def time_down_sample_preprocess(video):
|
44
|
+
feature = video.to_numpy()[::2, 200:680, 200:440, :]
|
45
|
+
return feature
|
46
|
+
|
47
|
+
dummy_layer_activation_format = {
|
48
|
+
"layer1": "THWC",
|
49
|
+
"layer2": "C",
|
50
|
+
}
|
51
|
+
|
52
|
+
dummy_layers = ["layer1", "layer2"]
|
53
|
+
|
54
|
+
|
55
|
+
@pytest.mark.memory_intense
|
56
|
+
@pytest.mark.parametrize("max_spatial_size", [None, 2, 4])
|
57
|
+
def test_inferencer(max_spatial_size):
|
58
|
+
inferencer = Inferencer(dummy_get_features, dummy_preprocess, dummy_layer_activation_format,
|
59
|
+
Video, max_workers=1, max_spatial_size=max_spatial_size, batch_grouper=lambda s: s.duration)
|
60
|
+
model_assembly = inferencer(video_paths, layers=dummy_layers)
|
61
|
+
if max_spatial_size is None:
|
62
|
+
# 6 second video with fps 60 has 360 frames
|
63
|
+
# the model simply return the same number of frames as the temporal size of activations
|
64
|
+
# so the number of channel_temporal should be 360
|
65
|
+
assert model_assembly.sizes["neuroid"] == 360*6*3*2 + 2
|
66
|
+
else:
|
67
|
+
assert model_assembly.sizes["neuroid"] == 360*max_spatial_size*(max_spatial_size//2) * 2 + 2
|
68
|
+
assert model_assembly.sizes["stimulus_path"] == 2
|
69
|
+
|
70
|
+
|
71
|
+
@pytest.mark.parametrize("time_alignment", ["evenly_spaced", "ignore_time"])
|
72
|
+
@pytest.mark.parametrize("fps", [10, 30, 45])
|
73
|
+
def test_temporal_inferencer(time_alignment, fps):
|
74
|
+
inferencer = TemporalInferencer(dummy_get_features, dummy_preprocess,
|
75
|
+
dummy_layer_activation_format, max_workers=1,
|
76
|
+
fps=fps, time_alignment=time_alignment)
|
77
|
+
model_assembly = inferencer(video_paths, layers=dummy_layers)
|
78
|
+
assert model_assembly['time_bin_start'].values[0] == 0
|
79
|
+
assert model_assembly['time_bin_end'].values[-1] == max(video_durations)
|
80
|
+
|
81
|
+
if time_alignment != "ignore_time":
|
82
|
+
# since the longer video lasts for 6 seconds, and the temporal inferencer align all output assembly to have fps
|
83
|
+
# specified when constructing the inferencer, the number of time bins should be 6*fps
|
84
|
+
assert model_assembly.sizes["time_bin"] == 6 * fps
|
85
|
+
assert np.isclose(model_assembly['time_bin_end'].values[0] - model_assembly['time_bin_start'].values[0], 1000/fps)
|
86
|
+
else:
|
87
|
+
assert model_assembly.sizes["time_bin"] == 1
|
88
|
+
assert model_assembly['time_bin_end'].values[0] - model_assembly['time_bin_start'].values[0] == max(video_durations)
|
89
|
+
|
90
|
+
# manual computation check
|
91
|
+
output_values = model_assembly.sel(stimulus_path=video_paths[1])\
|
92
|
+
.isel(neuroid=model_assembly.layer=="layer1")\
|
93
|
+
.transpose('time_bin', 'neuroid').values.reshape(-1)
|
94
|
+
|
95
|
+
manual_compute_values = []
|
96
|
+
video = Video.from_path(video_paths[1]).set_fps(fps)
|
97
|
+
manual_compute_values = dummy_get_features([dummy_preprocess(video)], ["layer1"])["layer1"][0].reshape(-1)
|
98
|
+
manual_compute_values = manual_compute_values.astype(output_values.dtype)
|
99
|
+
assert np.allclose(output_values, manual_compute_values)
|
100
|
+
|
101
|
+
|
102
|
+
@pytest.mark.memory_intense
|
103
|
+
def test_img_input():
|
104
|
+
fps = 30
|
105
|
+
inferencer = TemporalInferencer(dummy_get_features, dummy_preprocess,
|
106
|
+
dummy_layer_activation_format, max_workers=1,
|
107
|
+
fps=fps, convert_img_to_video=True, img_duration=1000)
|
108
|
+
model_assembly = inferencer([img_path], layers=dummy_layers)
|
109
|
+
assert model_assembly.sizes["time_bin"] == fps
|
110
|
+
|
111
|
+
|
112
|
+
def test_compute_temporal_context():
|
113
|
+
fps=10
|
114
|
+
inferencer = CausalInferencer(None, None, None, fps=fps, duration=(200, 1000), temporal_context_strategy="greedy")
|
115
|
+
assert inferencer._compute_temporal_context() == (200, 1000)
|
116
|
+
|
117
|
+
inferencer = CausalInferencer(None, None, None, fps=fps, duration=(200, 1000), temporal_context_strategy="conservative")
|
118
|
+
assert inferencer._compute_temporal_context() == (200, 200)
|
119
|
+
|
120
|
+
inferencer = CausalInferencer(None, None, None, fps=fps, duration=(0, 1000), num_frames=(2, 5), temporal_context_strategy="greedy")
|
121
|
+
assert inferencer._compute_temporal_context() == (200, 500)
|
122
|
+
|
123
|
+
inferencer = CausalInferencer(None, None, None, fps=fps, duration=(0, 1000), num_frames=(2, 15), temporal_context_strategy="greedy")
|
124
|
+
assert inferencer._compute_temporal_context() == (200, 1000)
|
125
|
+
|
126
|
+
inferencer = CausalInferencer(None, None, None, fps=fps, duration=(0, 1000), num_frames=(2, 15), temporal_context_strategy="fix", fixed_temporal_context=500)
|
127
|
+
assert inferencer._compute_temporal_context() == (200, 500)
|
128
|
+
|
129
|
+
|
130
|
+
@pytest.mark.memory_intense
|
131
|
+
@pytest.mark.parametrize("preprocess", ["normal", "downsample"])
|
132
|
+
@pytest.mark.parametrize("fps", [1, 40])
|
133
|
+
def test_causal_inferencer(preprocess, fps):
|
134
|
+
if preprocess == "normal":
|
135
|
+
preprocess = dummy_preprocess
|
136
|
+
else:
|
137
|
+
preprocess = time_down_sample_preprocess
|
138
|
+
inferencer = CausalInferencer(dummy_get_features, dummy_preprocess,
|
139
|
+
dummy_layer_activation_format,
|
140
|
+
fps=fps, max_workers=1)
|
141
|
+
model_assembly = inferencer(video_paths, layers=dummy_layers)
|
142
|
+
assert model_assembly.sizes["time_bin"] == 6 * fps
|
143
|
+
assert np.isclose(model_assembly['time_bin_end'].values[0] - model_assembly['time_bin_start'].values[0], 1000/fps)
|
144
|
+
assert inferencer._compute_temporal_context() == (1000/fps, np.inf)
|
145
|
+
|
146
|
+
# manual computation check
|
147
|
+
output_values = model_assembly.sel(stimulus_path=video_paths[1])\
|
148
|
+
.isel(neuroid=model_assembly.layer=="layer1")\
|
149
|
+
.transpose('time_bin', 'neuroid').values
|
150
|
+
manual_compute_values = []
|
151
|
+
video = Video.from_path(video_paths[1]).set_fps(fps)
|
152
|
+
interval = 1000/fps
|
153
|
+
for time_end in np.arange(interval, 6000+interval, interval):
|
154
|
+
clip = video.set_window(0, time_end)
|
155
|
+
manual_compute_values.append(dummy_get_features([dummy_preprocess(clip)], ["layer1"])["layer1"][0, -1])
|
156
|
+
manual_compute_values = np.stack(manual_compute_values).reshape(len(manual_compute_values), -1).astype(output_values.dtype)
|
157
|
+
assert np.allclose(output_values, manual_compute_values)
|
158
|
+
|
159
|
+
|
160
|
+
@pytest.mark.memory_intense
|
161
|
+
@pytest.mark.parametrize("preprocess", ["normal", "downsample"])
|
162
|
+
@pytest.mark.parametrize("fps", [1, 40])
|
163
|
+
def test_block_inferencer(preprocess, fps):
|
164
|
+
if preprocess == "normal":
|
165
|
+
preprocessing = dummy_preprocess
|
166
|
+
else:
|
167
|
+
preprocessing = time_down_sample_preprocess
|
168
|
+
inferencer = BlockInferencer(dummy_get_features, preprocessing, dummy_layer_activation_format, fps=fps,
|
169
|
+
duration=(200, 4000), temporal_context_strategy="greedy", max_workers=1)
|
170
|
+
model_assembly = inferencer(video_paths, layers=dummy_layers)
|
171
|
+
assert model_assembly.sizes["time_bin"] == 8 * fps # block overflow 2 x 4 seconds
|
172
|
+
assert np.isclose(model_assembly['time_bin_end'].values[0] - model_assembly['time_bin_start'].values[0], 1000/fps)
|
173
|
+
|
174
|
+
# manual computation check
|
175
|
+
output_values = model_assembly.sel(stimulus_path=video_paths[1])\
|
176
|
+
.isel(neuroid=model_assembly.layer=="layer1")\
|
177
|
+
.transpose('time_bin', 'neuroid').values
|
178
|
+
manual_compute_values = []
|
179
|
+
video = Video.from_path(video_paths[1]).set_fps(fps)
|
180
|
+
interval = 4000
|
181
|
+
for time_end in np.arange(interval, 6000+interval, interval):
|
182
|
+
time_start = time_end - interval
|
183
|
+
clip = video.set_window(time_start, time_end)
|
184
|
+
manual_compute_values.append(dummy_get_features([preprocessing(clip)], ["layer1"])["layer1"][0])
|
185
|
+
manual_compute_values = np.concatenate(manual_compute_values)
|
186
|
+
manual_compute_values = manual_compute_values.reshape(len(manual_compute_values), -1).astype(output_values.dtype)
|
187
|
+
if preprocess == "downsample":
|
188
|
+
output_values = output_values[::2]
|
189
|
+
assert np.allclose(output_values, manual_compute_values)
|
@@ -0,0 +1,103 @@
|
|
1
|
+
import os
|
2
|
+
import numpy as np
|
3
|
+
|
4
|
+
from brainscore_vision.model_helpers.activations.temporal.inputs import Video, Image
|
5
|
+
|
6
|
+
|
7
|
+
"""This module tests model_helpers.activations.temporal.inputs
|
8
|
+
|
9
|
+
Different inputs are tested:
|
10
|
+
- Video
|
11
|
+
- Image
|
12
|
+
Specifically, the different transformations (set fps, set size, etc.) of the inputs are tested.
|
13
|
+
"""
|
14
|
+
|
15
|
+
video_paths = [
|
16
|
+
os.path.join(os.path.dirname(__file__), "..", "dots1.mp4"),
|
17
|
+
os.path.join(os.path.dirname(__file__), "..", "dots2.mp4"),
|
18
|
+
]
|
19
|
+
video_durations = [2000, 6000]
|
20
|
+
img_path = os.path.join(os.path.dirname(__file__), "../../activations/rgb.jpg")
|
21
|
+
|
22
|
+
|
23
|
+
def test_video_load_frames():
|
24
|
+
video1 = Video.from_path(video_paths[0])
|
25
|
+
fps = video1.fps
|
26
|
+
duration = video1.duration
|
27
|
+
all_frames = video1.to_frames()
|
28
|
+
interval = 1000 / fps
|
29
|
+
for t, f in zip(np.arange(interval, duration, interval), all_frames):
|
30
|
+
v = video1.set_window(0, t)
|
31
|
+
this_f = v.to_frames()[-1]
|
32
|
+
assert (this_f == f).all()
|
33
|
+
|
34
|
+
def test_video():
|
35
|
+
video1 = Video.from_path(video_paths[0])
|
36
|
+
video2 = Video.from_img_path(img_path, 1000, 30)
|
37
|
+
|
38
|
+
assert video2.duration == 1000
|
39
|
+
|
40
|
+
video3 = video1.set_window(-10, 0, padding="repeat")
|
41
|
+
video4 = video1.set_window(-20, -10, padding="repeat")
|
42
|
+
assert (video3.to_numpy() == video4.to_numpy()).all()
|
43
|
+
assert (video3.to_numpy()[0] == video1.to_numpy()[0]).all()
|
44
|
+
|
45
|
+
assert video2.fps == 30
|
46
|
+
assert video2.set_fps(1).to_numpy().shape[0] == 1
|
47
|
+
|
48
|
+
video5 = video1.set_size((120, 100))
|
49
|
+
assert tuple(video5.to_numpy().shape[1:3]) == (100, 120)
|
50
|
+
|
51
|
+
video6 = video1.set_fps(30)
|
52
|
+
assert (video6.to_numpy()[1] == video1.to_numpy()[2]).all()
|
53
|
+
assert (video6.to_numpy()[2] == video1.to_numpy()[4]).all()
|
54
|
+
|
55
|
+
video6 = video1.set_fps(20)
|
56
|
+
assert (video6.to_numpy()[1] == video1.to_numpy()[3]).all()
|
57
|
+
assert (video6.to_numpy()[2] == video1.to_numpy()[6]).all()
|
58
|
+
|
59
|
+
video7 = video1.set_window(-100, 100).set_window(100, 200)
|
60
|
+
assert video7.duration == 100
|
61
|
+
assert (video7.to_numpy() == video1.set_window(0, 100).to_numpy()).all()
|
62
|
+
|
63
|
+
video8 = video1.set_window(300, 500).set_window(0, 100)
|
64
|
+
assert video8.duration == 100
|
65
|
+
assert (video8.to_numpy() == video1.set_window(300, 400).to_numpy()).all()
|
66
|
+
|
67
|
+
# test copy
|
68
|
+
video9 = video1.set_fps(5).copy().set_fps(30).copy()
|
69
|
+
assert (video9.to_numpy()[1] == video1.to_numpy()[2]).all()
|
70
|
+
assert (video9.to_numpy()[2] == video1.to_numpy()[4]).all()
|
71
|
+
|
72
|
+
for frame in [10, 50, 100]:
|
73
|
+
time_start = 1000 / video1.fps * frame
|
74
|
+
video10 = video1.set_window(time_start, time_start+1000/video1.fps)
|
75
|
+
assert video10.to_numpy().shape[0] == 1
|
76
|
+
assert (video10.to_numpy()[0] == video1.to_numpy()[frame]).all()
|
77
|
+
|
78
|
+
video10 = video1.set_window(0, time_start+1000/video1.fps)
|
79
|
+
assert video10.to_numpy().shape[0] == frame+1
|
80
|
+
assert (video10.to_numpy()[frame] == video1.to_numpy()[frame]).all()
|
81
|
+
|
82
|
+
video10 = video1.set_window(time_start, video1.duration)
|
83
|
+
assert video10.to_numpy().shape[0] == video1.to_numpy().shape[0] - frame
|
84
|
+
assert (video10.to_numpy()[0] == video1.to_numpy()[frame]).all()
|
85
|
+
|
86
|
+
for fps in [7.5, 9, 1, 43, 1000/video1.duration, 1001/video1.duration]:
|
87
|
+
video11 = video1.set_fps(fps)
|
88
|
+
assert video11.to_numpy().shape[0] == np.ceil(video1.duration * fps / 1000)
|
89
|
+
|
90
|
+
for v in [video1, video2]:
|
91
|
+
target_num_frames = 7
|
92
|
+
duration = 1000 / v.fps * target_num_frames
|
93
|
+
common = list(np.arange(0, v.duration, 100))
|
94
|
+
extra1 = 1000 / v.fps * 3 + v.duration
|
95
|
+
extra2 = 1000 / v.fps * 2 + v.duration
|
96
|
+
extra3 = 1000 / v.fps * 1
|
97
|
+
for t in [extra1, extra2, extra3] + common:
|
98
|
+
video = v.set_window(t-duration, t, padding="repeat")
|
99
|
+
assert video.to_numpy().shape[0] == target_num_frames
|
100
|
+
|
101
|
+
def test_image():
|
102
|
+
img = Image.from_path(img_path)
|
103
|
+
assert img.set_size((10, 12)).to_numpy().shape[:2] == (12, 10)
|
File without changes
|
@@ -0,0 +1,122 @@
|
|
1
|
+
from brainscore_vision.model_helpers.brain_transformation.temporal import time_align, assembly_time_align
|
2
|
+
import numpy as np
|
3
|
+
|
4
|
+
# imports
|
5
|
+
import numpy as np
|
6
|
+
from brainio.assemblies import DataAssembly
|
7
|
+
|
8
|
+
|
9
|
+
"""
|
10
|
+
This module tests the time alignment functionalities:
|
11
|
+
ie., given a set of target time bins, align the neural assembly with a set of source time bins to them.
|
12
|
+
"""
|
13
|
+
|
14
|
+
|
15
|
+
T = 3
|
16
|
+
P = 6
|
17
|
+
N = 4
|
18
|
+
time_bins = np.array([(0, 10), (10, 20), (20, 30)])
|
19
|
+
stimulus_id = ["A", "B", "C", "D", "E", "F"]
|
20
|
+
neuroid_id = ["n1", "n2", "n3", "n4"]
|
21
|
+
|
22
|
+
# latent variable that generates both "video" and "neural signals"
|
23
|
+
latent1 = [1, 3, 2]
|
24
|
+
latent2 = [2, 3, 1]
|
25
|
+
latent3 = [1, 2, 3]
|
26
|
+
latent4 = [3, 2, 1]
|
27
|
+
latent5 = [3, 1, 2]
|
28
|
+
latent6 = [2, 1, 3]
|
29
|
+
latent = np.array([latent1, latent2, latent3, latent4, latent5, latent6])
|
30
|
+
|
31
|
+
# videos
|
32
|
+
from PIL import Image
|
33
|
+
vs = np.random.rand(P, T, 8, 8, 3) + latent[..., None, None, None] # 0~4
|
34
|
+
vs = (vs * 255//4).astype(np.uint8)
|
35
|
+
videos = [[Image.fromarray(img) for img in video] for video in vs]
|
36
|
+
video_paths = [f"video{i}.mp4"for i in range(P)]
|
37
|
+
|
38
|
+
# neural signals at time 1 2 3:
|
39
|
+
n1 = latent * 2
|
40
|
+
n2 = latent * 1
|
41
|
+
n3 = latent * .5
|
42
|
+
n4 = latent * .1
|
43
|
+
perfect_response = np.array([n1, n2, n3, n4])
|
44
|
+
noisy_response = perfect_response + np.random.randn(N, P, T) * 0.1
|
45
|
+
|
46
|
+
# assemblies
|
47
|
+
def _make_neural_assembly(data):
|
48
|
+
assembly = DataAssembly(
|
49
|
+
data,
|
50
|
+
dims=["neuroid", "presentation", "time_bin"],
|
51
|
+
coords={
|
52
|
+
"neuroid_id": ("neuroid", neuroid_id),
|
53
|
+
"stimulus_id": ("presentation", stimulus_id),
|
54
|
+
"time_bin_start": ("time_bin", time_bins[:, 0]),
|
55
|
+
"time_bin_end": ("time_bin", time_bins[:, 1]),
|
56
|
+
}
|
57
|
+
)
|
58
|
+
return assembly
|
59
|
+
|
60
|
+
assembly = _make_neural_assembly(noisy_response)
|
61
|
+
|
62
|
+
def _except(func, *args, **kwargs):
|
63
|
+
try:
|
64
|
+
func(*args, **kwargs)
|
65
|
+
except:
|
66
|
+
return
|
67
|
+
else:
|
68
|
+
raise False
|
69
|
+
|
70
|
+
|
71
|
+
def test_time_align():
|
72
|
+
# case 1
|
73
|
+
source_time_bins = [(0, 100), (100, 200), (200, 300)]
|
74
|
+
target_time_bins = [(0, 50), (250, 300)]
|
75
|
+
|
76
|
+
ret = time_align(source_time_bins, target_time_bins, mode = "center")
|
77
|
+
belong_to = np.array([[1, 0, 0], [0, 0, 1]])
|
78
|
+
assert (ret == belong_to).all()
|
79
|
+
|
80
|
+
ret = time_align(source_time_bins, target_time_bins, mode = "portion")
|
81
|
+
belong_to = np.array([[0.5, 0, 0], [0, 0, 0.5]])
|
82
|
+
assert (ret == belong_to).all()
|
83
|
+
|
84
|
+
# case 2
|
85
|
+
starts = np.arange(0, 100, 10)
|
86
|
+
ends = starts + 10
|
87
|
+
target_time_bins = np.stack([starts, ends], axis=-1)
|
88
|
+
starts = np.arange(0, 100, 1000/30)
|
89
|
+
ends = starts + 1000/30
|
90
|
+
source_time_bins = np.stack([starts, ends], axis=-1)
|
91
|
+
assert (time_align(source_time_bins, target_time_bins).argmax(1) == np.array([0,0,0,1,1,1,1,2,2,2])).all()
|
92
|
+
|
93
|
+
|
94
|
+
def test_assembly_time_align():
|
95
|
+
ret = assembly_time_align(assembly, [(0, 10), (20, 30), (10, 20)])
|
96
|
+
assert (assembly.isel(time_bin=[0, 2, 1]) == ret.isel(time_bin=[0, 1, 2])).all()
|
97
|
+
_except(assembly_time_align, assembly, [(0, 10), (20, 30), (40, 50)])
|
98
|
+
_except(assembly_time_align, assembly, [(30, 20)])
|
99
|
+
ret = assembly_time_align(assembly, [(20, 30), (0, 10), (10, 20)])
|
100
|
+
assert (assembly.isel(time_bin=[2, 0, 1]) == ret.isel(time_bin=[0, 1, 2])).all()
|
101
|
+
|
102
|
+
ret = assembly_time_align(assembly, [(0, 30), (0, 10), (10, 20)], mode="portion")
|
103
|
+
assert (assembly.isel(time_bin=[1, 2]) == ret.isel(time_bin=[2, 1])).all()
|
104
|
+
assert (ret.isel(time_bin=0).values == assembly.mean("time_bin").values).all()
|
105
|
+
|
106
|
+
ret = assembly_time_align(assembly, [(5, 25)], mode="portion")
|
107
|
+
val = (assembly.isel(time_bin=0).values * 0.5 + assembly.isel(time_bin=1).values + assembly.isel(time_bin=2).values * 0.5) / 2
|
108
|
+
assert np.isclose(ret.isel(time_bin=0).values, val).all()
|
109
|
+
|
110
|
+
ret = assembly_time_align(assembly, [(9, 21)], mode="portion")
|
111
|
+
val = (assembly.isel(time_bin=0).values * .1 + assembly.isel(time_bin=1).values + assembly.isel(time_bin=2).values * .1) / 1.2
|
112
|
+
assert np.isclose(ret.isel(time_bin=0).values, val).all()
|
113
|
+
|
114
|
+
ret = assembly_time_align(assembly, [(9, 13)], mode="portion")
|
115
|
+
val = (assembly.isel(time_bin=0).values * 0.25 + assembly.isel(time_bin=1).values * 0.75)
|
116
|
+
assert np.isclose(ret.isel(time_bin=0).values, val).all()
|
117
|
+
|
118
|
+
_except(assembly_time_align, assembly, [(20, 35)], mode="portion")
|
119
|
+
_except(assembly_time_align, assembly, [(0, 31)], mode="portion")
|
120
|
+
_except(assembly_time_align, assembly, [(0, 30.00001)], mode="portion")
|
121
|
+
|
122
|
+
assert (assembly_time_align(assembly, [(0, 10)], mode="portion").values == assembly_time_align(assembly, [(0, 10)], mode="center").values).all()
|
@@ -0,0 +1,61 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from brainscore_vision.model_helpers.activations.temporal.utils import (
|
3
|
+
batch_2d_resize
|
4
|
+
)
|
5
|
+
|
6
|
+
|
7
|
+
# this tests whether the batch_2d_resize with "pool" mode works as expected (ie, average over regions)
|
8
|
+
def test_proportional_average_pooling():
|
9
|
+
arr = np.array([
|
10
|
+
[1, 2],
|
11
|
+
[3, 4],
|
12
|
+
[5, 6],
|
13
|
+
])[...,None].astype(float)
|
14
|
+
|
15
|
+
size_0 = (2, 1)
|
16
|
+
arr_0 = np.array([
|
17
|
+
[3, 4],
|
18
|
+
])[...,None]
|
19
|
+
|
20
|
+
def proportional_average_pooling(arr, size):
|
21
|
+
return batch_2d_resize(arr[None,:], size, "pool")[0]
|
22
|
+
|
23
|
+
assert np.allclose(proportional_average_pooling(arr, size_0), arr_0)
|
24
|
+
|
25
|
+
size_1 = (2, 2)
|
26
|
+
arr_1 = np.array([
|
27
|
+
[1*2/3+3*1/3, 2*2/3+4*1/3],
|
28
|
+
[3*1/3+5*2/3, 4*1/3+6*2/3],
|
29
|
+
])[...,None]
|
30
|
+
|
31
|
+
assert np.allclose(proportional_average_pooling(arr, size_1), arr_1)
|
32
|
+
|
33
|
+
size_2 = (1, 2)
|
34
|
+
arr_2 = np.array([
|
35
|
+
[(1*2/3+3*1/3+2*2/3+4*1/3)/2],
|
36
|
+
[(3*1/3+5*2/3+4*1/3+6*2/3)/2],
|
37
|
+
])[...,None]
|
38
|
+
|
39
|
+
assert np.allclose(proportional_average_pooling(arr, size_2), arr_2)
|
40
|
+
|
41
|
+
size_3 = (3, 3)
|
42
|
+
arr_3 = np.array([
|
43
|
+
[1, 1.5, 2],
|
44
|
+
[3, 3.5, 4],
|
45
|
+
[5, 5.5, 6],
|
46
|
+
])[...,None]
|
47
|
+
|
48
|
+
assert np.allclose(proportional_average_pooling(arr, size_3), arr_3)
|
49
|
+
|
50
|
+
size_4 = (1, 1)
|
51
|
+
arr_4 = arr.reshape(-1, 1).mean(0)[None, None, :]
|
52
|
+
assert np.allclose(proportional_average_pooling(arr, size_4), arr_4)
|
53
|
+
|
54
|
+
size_5 = (2, 4)
|
55
|
+
arr_5 = np.array([
|
56
|
+
[1, 2],
|
57
|
+
[(1*1+3*2)/3, (2*1+4*2)/3],
|
58
|
+
[(3*2+5*1)/3, (4*2+6*1)/3],
|
59
|
+
[5, 6],
|
60
|
+
])[...,None].astype(float)
|
61
|
+
assert np.allclose(proportional_average_pooling(arr, size_5), arr_5)
|