brainscore-vision 2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/__init__.py +105 -0
- brainscore_vision/__main__.py +20 -0
- brainscore_vision/benchmark_helpers/__init__.py +67 -0
- brainscore_vision/benchmark_helpers/neural_common.py +70 -0
- brainscore_vision/benchmark_helpers/properties_common.py +424 -0
- brainscore_vision/benchmark_helpers/screen.py +126 -0
- brainscore_vision/benchmark_helpers/test_helper.py +160 -0
- brainscore_vision/benchmarks/README.md +7 -0
- brainscore_vision/benchmarks/__init__.py +122 -0
- brainscore_vision/benchmarks/baker2022/__init__.py +9 -0
- brainscore_vision/benchmarks/baker2022/benchmark.py +125 -0
- brainscore_vision/benchmarks/baker2022/requirements.txt +1 -0
- brainscore_vision/benchmarks/baker2022/test.py +90 -0
- brainscore_vision/benchmarks/bmd2024/__init__.py +8 -0
- brainscore_vision/benchmarks/bmd2024/benchmark.py +51 -0
- brainscore_vision/benchmarks/bmd2024/test.py +29 -0
- brainscore_vision/benchmarks/bracci2019/__init__.py +8 -0
- brainscore_vision/benchmarks/bracci2019/benchmark.py +286 -0
- brainscore_vision/benchmarks/bracci2019/requirements.txt +3 -0
- brainscore_vision/benchmarks/cadena2017/__init__.py +5 -0
- brainscore_vision/benchmarks/cadena2017/benchmark.py +91 -0
- brainscore_vision/benchmarks/cadena2017/test.py +35 -0
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +8 -0
- brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +133 -0
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +21 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +15 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py +201 -0
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/benchmarks/ferguson2024/__init__.py +24 -0
- brainscore_vision/benchmarks/ferguson2024/benchmark.py +210 -0
- brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +251 -0
- brainscore_vision/benchmarks/ferguson2024/requirements.txt +5 -0
- brainscore_vision/benchmarks/ferguson2024/test.py +114 -0
- brainscore_vision/benchmarks/freemanziemba2013/__init__.py +10 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/benchmark.py +53 -0
- brainscore_vision/benchmarks/freemanziemba2013/benchmarks/public_benchmarks.py +37 -0
- brainscore_vision/benchmarks/freemanziemba2013/test.py +98 -0
- brainscore_vision/benchmarks/geirhos2021/__init__.py +59 -0
- brainscore_vision/benchmarks/geirhos2021/benchmark.py +132 -0
- brainscore_vision/benchmarks/geirhos2021/test.py +189 -0
- brainscore_vision/benchmarks/hebart2023/__init__.py +4 -0
- brainscore_vision/benchmarks/hebart2023/benchmark.py +72 -0
- brainscore_vision/benchmarks/hebart2023/test.py +19 -0
- brainscore_vision/benchmarks/hermann2020/__init__.py +6 -0
- brainscore_vision/benchmarks/hermann2020/benchmark.py +63 -0
- brainscore_vision/benchmarks/hermann2020/test.py +28 -0
- brainscore_vision/benchmarks/igustibagus2024/__init__.py +11 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +306 -0
- brainscore_vision/benchmarks/igustibagus2024/domain_transfer_neural.py +134 -0
- brainscore_vision/benchmarks/igustibagus2024/test.py +45 -0
- brainscore_vision/benchmarks/imagenet/__init__.py +4 -0
- brainscore_vision/benchmarks/imagenet/benchmark.py +50 -0
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50001 -0
- brainscore_vision/benchmarks/imagenet/test.py +32 -0
- brainscore_vision/benchmarks/imagenet_c/__init__.py +7 -0
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +204 -0
- brainscore_vision/benchmarks/imagenet_c/test.py +57 -0
- brainscore_vision/benchmarks/islam2021/__init__.py +11 -0
- brainscore_vision/benchmarks/islam2021/benchmark.py +107 -0
- brainscore_vision/benchmarks/islam2021/test.py +47 -0
- brainscore_vision/benchmarks/kar2019/__init__.py +4 -0
- brainscore_vision/benchmarks/kar2019/benchmark.py +88 -0
- brainscore_vision/benchmarks/kar2019/test.py +93 -0
- brainscore_vision/benchmarks/majajhong2015/__init__.py +18 -0
- brainscore_vision/benchmarks/majajhong2015/benchmark.py +96 -0
- brainscore_vision/benchmarks/majajhong2015/test.py +103 -0
- brainscore_vision/benchmarks/malania2007/__init__.py +13 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +235 -0
- brainscore_vision/benchmarks/malania2007/test.py +64 -0
- brainscore_vision/benchmarks/maniquet2024/__init__.py +6 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +199 -0
- brainscore_vision/benchmarks/maniquet2024/test.py +17 -0
- brainscore_vision/benchmarks/marques2020/__init__.py +76 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/cavanaugh2002a_benchmark.py +119 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982a_benchmark.py +84 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/devalois1982b_benchmark.py +88 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/freemanZiemba2013_benchmark.py +138 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/ringach2002_benchmark.py +167 -0
- brainscore_vision/benchmarks/marques2020/benchmarks/schiller1976_benchmark.py +100 -0
- brainscore_vision/benchmarks/marques2020/test.py +135 -0
- brainscore_vision/benchmarks/objectnet/__init__.py +4 -0
- brainscore_vision/benchmarks/objectnet/benchmark.py +52 -0
- brainscore_vision/benchmarks/objectnet/test.py +33 -0
- brainscore_vision/benchmarks/rajalingham2018/__init__.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/benchmark.py +74 -0
- brainscore_vision/benchmarks/rajalingham2018/benchmarks/public_benchmark.py +10 -0
- brainscore_vision/benchmarks/rajalingham2018/test.py +125 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=alexnet,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet18,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/identifier=resnet34,stimuli_identifier=objectome-240.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2018/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/benchmarks/rajalingham2020/__init__.py +4 -0
- brainscore_vision/benchmarks/rajalingham2020/benchmark.py +52 -0
- brainscore_vision/benchmarks/rajalingham2020/test.py +39 -0
- brainscore_vision/benchmarks/sanghavi2020/__init__.py +17 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavi2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavijozwik2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/benchmarks/sanghavimurty2020_benchmark.py +44 -0
- brainscore_vision/benchmarks/sanghavi2020/test.py +83 -0
- brainscore_vision/benchmarks/scialom2024/__init__.py +52 -0
- brainscore_vision/benchmarks/scialom2024/benchmark.py +97 -0
- brainscore_vision/benchmarks/scialom2024/test.py +162 -0
- brainscore_vision/data/__init__.py +0 -0
- brainscore_vision/data/baker2022/__init__.py +40 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py +43 -0
- brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py +81 -0
- brainscore_vision/data/baker2022/data_packaging/mapping.py +60 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py +46 -0
- brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py +94 -0
- brainscore_vision/data/baker2022/test.py +135 -0
- brainscore_vision/data/barbumayo2019/BarbuMayo2019.py +26 -0
- brainscore_vision/data/barbumayo2019/__init__.py +23 -0
- brainscore_vision/data/barbumayo2019/test.py +10 -0
- brainscore_vision/data/bashivankar2019/__init__.py +52 -0
- brainscore_vision/data/bashivankar2019/data_packaging/2020-08-17_npc_v4_data.h5.png +0 -0
- brainscore_vision/data/bashivankar2019/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/bashivankar2019/data_packaging/synthetic.py +162 -0
- brainscore_vision/data/bashivankar2019/test.py +15 -0
- brainscore_vision/data/bmd2024/__init__.py +69 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py +91 -0
- brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py +48 -0
- brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv +401 -0
- brainscore_vision/data/bmd2024/test.py +130 -0
- brainscore_vision/data/bracci2019/__init__.py +36 -0
- brainscore_vision/data/bracci2019/data_packaging.py +221 -0
- brainscore_vision/data/bracci2019/test.py +16 -0
- brainscore_vision/data/cadena2017/__init__.py +52 -0
- brainscore_vision/data/cadena2017/data_packaging/2018-08-07_tolias_v1.ipynb +25880 -0
- brainscore_vision/data/cadena2017/data_packaging/analysis.py +26 -0
- brainscore_vision/data/cadena2017/test.py +24 -0
- brainscore_vision/data/cichy2019/__init__.py +38 -0
- brainscore_vision/data/cichy2019/test.py +8 -0
- brainscore_vision/data/coggan2024_behavior/__init__.py +36 -0
- brainscore_vision/data/coggan2024_behavior/data_packaging.py +166 -0
- brainscore_vision/data/coggan2024_behavior/test.py +32 -0
- brainscore_vision/data/coggan2024_fMRI/__init__.py +27 -0
- brainscore_vision/data/coggan2024_fMRI/data_packaging.py +123 -0
- brainscore_vision/data/coggan2024_fMRI/test.py +25 -0
- brainscore_vision/data/david2004/__init__.py +34 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-10_gallant_data.ipynb +3647 -0
- brainscore_vision/data/david2004/data_packaging/2018-05-23_gallant_data.ipynb +3149 -0
- brainscore_vision/data/david2004/data_packaging/2018-06-05_gallant_data.ipynb +3628 -0
- brainscore_vision/data/david2004/data_packaging/__init__.py +61 -0
- brainscore_vision/data/david2004/data_packaging/convertGallant.m +100 -0
- brainscore_vision/data/david2004/data_packaging/convertGallantV1Aligned.m +58 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +484 -0
- brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +895 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.m +107 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.mexw64 +0 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +91 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +307 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/license.txt +24 -0
- brainscore_vision/data/david2004/data_packaging/lib/GetMD5/uTest_GetMD5.m +290 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +472 -0
- brainscore_vision/data/david2004/data_packaging/lib/glob/license.txt +27 -0
- brainscore_vision/data/david2004/data_packaging/xr_align_debug.py +137 -0
- brainscore_vision/data/david2004/test.py +8 -0
- brainscore_vision/data/deng2009/__init__.py +22 -0
- brainscore_vision/data/deng2009/deng2009imagenet.py +33 -0
- brainscore_vision/data/deng2009/test.py +9 -0
- brainscore_vision/data/ferguson2024/__init__.py +401 -0
- brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +164 -0
- brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py +20 -0
- brainscore_vision/data/ferguson2024/requirements.txt +2 -0
- brainscore_vision/data/ferguson2024/test.py +155 -0
- brainscore_vision/data/freemanziemba2013/__init__.py +133 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2018-10-05_movshon.ipynb +2002 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-21_movshon_aperture.ipynb +4730 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/2020-02-26_movshon_aperture_test.ipynb +2228 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/aperture_correct.py +160 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/data_packaging.py +57 -0
- brainscore_vision/data/freemanziemba2013/data_packaging/movshon.py +202 -0
- brainscore_vision/data/freemanziemba2013/test.py +97 -0
- brainscore_vision/data/geirhos2021/__init__.py +358 -0
- brainscore_vision/data/geirhos2021/creating_geirhos_ids.ipynb +468 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/colour/colour_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/contrast/contrast_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/cue-conflict/cue-conflict_stimulus_set.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_data_assembly.py +96 -0
- brainscore_vision/data/geirhos2021/data_packaging/edge/edge_stimulus_set.py +69 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonI/eidolonI_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonII/eidolonII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_data_assembly.py +92 -0
- brainscore_vision/data/geirhos2021/data_packaging/eidolonIII/eidolonIII_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_data_assembly.py +83 -0
- brainscore_vision/data/geirhos2021/data_packaging/false-colour/false-colour_stimulus_set.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_stimulus_set.py +81 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +84 -0
- brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/power-equalisation/power-equalisation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/rotation/rotation_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_data_assembly.py +100 -0
- brainscore_vision/data/geirhos2021/data_packaging/silhouette/silhouette_stimulus_set.py +71 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_data_assembly.py +88 -0
- brainscore_vision/data/geirhos2021/data_packaging/sketch/sketch_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_data_assembly.py +87 -0
- brainscore_vision/data/geirhos2021/data_packaging/stylized/stylized_stimulus_set.py +75 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_data_assembly.py +86 -0
- brainscore_vision/data/geirhos2021/data_packaging/uniform-noise/uniform-noise_stimulus_set.py +82 -0
- brainscore_vision/data/geirhos2021/geirhos_hashes.csv +52 -0
- brainscore_vision/data/geirhos2021/test.py +330 -0
- brainscore_vision/data/hebart2023/__init__.py +23 -0
- brainscore_vision/data/hebart2023/packaging/data_assembly.py +40 -0
- brainscore_vision/data/hebart2023/packaging/stimulus_set.py +72 -0
- brainscore_vision/data/hebart2023/test.py +42 -0
- brainscore_vision/data/hendrycks2019/__init__.py +45 -0
- brainscore_vision/data/hendrycks2019/test.py +26 -0
- brainscore_vision/data/igustibagus2024/__init__.py +23 -0
- brainscore_vision/data/igustibagus2024/dependencies/data_pico/stimulus_dicarlo_domain_transfer.csv +3139 -0
- brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +346 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/__init__.py +0 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +649 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +152 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/create_stimulus_set_with_background-id.py +45 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +849 -0
- brainscore_vision/data/igustibagus2024/merged_assembly/merged_stimulus_set.csv +3139 -0
- brainscore_vision/data/igustibagus2024/oleo_pico_exploration.ipynb +410 -0
- brainscore_vision/data/igustibagus2024/test.py +26 -0
- brainscore_vision/data/imagenetslim15000/ImageNetSlim15000.py +30 -0
- brainscore_vision/data/imagenetslim15000/__init__.py +11 -0
- brainscore_vision/data/imagenetslim15000/test.py +8 -0
- brainscore_vision/data/islam2021/__init__.py +18 -0
- brainscore_vision/data/islam2021/data_packaging.py +64 -0
- brainscore_vision/data/islam2021/test.py +11 -0
- brainscore_vision/data/kar2018/__init__.py +58 -0
- brainscore_vision/data/kar2018/data_packaging/kar_coco.py +97 -0
- brainscore_vision/data/kar2018/data_packaging/kar_hvm.py +77 -0
- brainscore_vision/data/kar2018/data_packaging/requirements.txt +1 -0
- brainscore_vision/data/kar2018/test.py +10 -0
- brainscore_vision/data/kar2019/__init__.py +43 -0
- brainscore_vision/data/kar2019/data_packaging.py +116 -0
- brainscore_vision/data/kar2019/test.py +8 -0
- brainscore_vision/data/kuzovkin2018/__init__.py +36 -0
- brainscore_vision/data/kuzovkin2018/createAssembliesBrainScore.py +103 -0
- brainscore_vision/data/kuzovkin2018/test.py +8 -0
- brainscore_vision/data/majajhong2015/__init__.py +113 -0
- brainscore_vision/data/majajhong2015/data_packaging/darren10ms.py +32 -0
- brainscore_vision/data/majajhong2015/data_packaging/data_packaging.py +65 -0
- brainscore_vision/data/majajhong2015/test.py +38 -0
- brainscore_vision/data/malania2007/__init__.py +254 -0
- brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py +79 -0
- brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py +79 -0
- brainscore_vision/data/malania2007/test.py +147 -0
- brainscore_vision/data/maniquet2024/__init__.py +57 -0
- brainscore_vision/data/maniquet2024/data_packaging.py +151 -0
- brainscore_vision/data/maniquet2024/test.py +16 -0
- brainscore_vision/data/marques2020/__init__.py +123 -0
- brainscore_vision/data/marques2020/data_packaging/marques_cavanaugh2002a.py +84 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982a.py +44 -0
- brainscore_vision/data/marques2020/data_packaging/marques_devalois1982b.py +54 -0
- brainscore_vision/data/marques2020/data_packaging/marques_freemanZiemba2013.py +252 -0
- brainscore_vision/data/marques2020/data_packaging/marques_gen_stim.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_ringach2002.py +95 -0
- brainscore_vision/data/marques2020/data_packaging/marques_schiller1976c.py +60 -0
- brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +389 -0
- brainscore_vision/data/marques2020/data_packaging/marques_utils.py +21 -0
- brainscore_vision/data/marques2020/data_packaging/setup.py +13 -0
- brainscore_vision/data/marques2020/test.py +54 -0
- brainscore_vision/data/rajalingham2018/__init__.py +56 -0
- brainscore_vision/data/rajalingham2018/rajalingham2018objectome.py +193 -0
- brainscore_vision/data/rajalingham2018/test.py +10 -0
- brainscore_vision/data/rajalingham2020/__init__.py +39 -0
- brainscore_vision/data/rajalingham2020/rajalingham2020orthographic_IT.py +97 -0
- brainscore_vision/data/rajalingham2020/test.py +8 -0
- brainscore_vision/data/rust2012/2020-12-28_rust.ipynb +3301 -0
- brainscore_vision/data/rust2012/__init__.py +45 -0
- brainscore_vision/data/rust2012/rust305.py +35 -0
- brainscore_vision/data/rust2012/test.py +47 -0
- brainscore_vision/data/sanghavi2020/__init__.py +119 -0
- brainscore_vision/data/sanghavi2020/data_packaging/environment.yml +36 -0
- brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +4 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavi2020.py +101 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavijozwik2020.py +148 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavikar2020.py +131 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020.py +120 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things.py +138 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things1.py +118 -0
- brainscore_vision/data/sanghavi2020/data_packaging/sanghavimurty2020things2.py +118 -0
- brainscore_vision/data/sanghavi2020/test.py +13 -0
- brainscore_vision/data/scialom2024/__init__.py +386 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_data_assembly.py +164 -0
- brainscore_vision/data/scialom2024/data_packaging/scialom_stimulus_set.py +117 -0
- brainscore_vision/data/scialom2024/test.py +301 -0
- brainscore_vision/data/seibert2019/__init__.py +25 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-10-13_juvenile.ipynb +35703 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-18_juvenile_scratch.txt +556 -0
- brainscore_vision/data/seibert2019/data_packaging/2020-11-22_juvenile_dldata.ipynb +3614 -0
- brainscore_vision/data/seibert2019/data_packaging/juvenile.py +103 -0
- brainscore_vision/data/seibert2019/test.py +35 -0
- brainscore_vision/data/zhang2018/__init__.py +38 -0
- brainscore_vision/data/zhang2018/test.py +29 -0
- brainscore_vision/data_helpers/__init__.py +0 -0
- brainscore_vision/data_helpers/lookup_legacy.py +15 -0
- brainscore_vision/data_helpers/s3.py +79 -0
- brainscore_vision/metric_helpers/__init__.py +5 -0
- brainscore_vision/metric_helpers/temporal.py +119 -0
- brainscore_vision/metric_helpers/transformations.py +379 -0
- brainscore_vision/metric_helpers/utils.py +71 -0
- brainscore_vision/metric_helpers/xarray_utils.py +151 -0
- brainscore_vision/metrics/__init__.py +7 -0
- brainscore_vision/metrics/accuracy/__init__.py +4 -0
- brainscore_vision/metrics/accuracy/metric.py +16 -0
- brainscore_vision/metrics/accuracy/test.py +11 -0
- brainscore_vision/metrics/accuracy_distance/__init__.py +4 -0
- brainscore_vision/metrics/accuracy_distance/metric.py +109 -0
- brainscore_vision/metrics/accuracy_distance/test.py +57 -0
- brainscore_vision/metrics/baker_accuracy_delta/__init__.py +4 -0
- brainscore_vision/metrics/baker_accuracy_delta/metric.py +94 -0
- brainscore_vision/metrics/baker_accuracy_delta/requirements.txt +1 -0
- brainscore_vision/metrics/baker_accuracy_delta/test.py +1 -0
- brainscore_vision/metrics/cka/__init__.py +14 -0
- brainscore_vision/metrics/cka/metric.py +105 -0
- brainscore_vision/metrics/cka/test.py +28 -0
- brainscore_vision/metrics/dimensionality/__init__.py +13 -0
- brainscore_vision/metrics/dimensionality/metric.py +45 -0
- brainscore_vision/metrics/distribution_similarity/__init__.py +14 -0
- brainscore_vision/metrics/distribution_similarity/metric.py +84 -0
- brainscore_vision/metrics/distribution_similarity/test.py +10 -0
- brainscore_vision/metrics/error_consistency/__init__.py +13 -0
- brainscore_vision/metrics/error_consistency/metric.py +93 -0
- brainscore_vision/metrics/error_consistency/test.py +39 -0
- brainscore_vision/metrics/i1i2/__init__.py +16 -0
- brainscore_vision/metrics/i1i2/metric.py +299 -0
- brainscore_vision/metrics/i1i2/requirements.txt +2 -0
- brainscore_vision/metrics/i1i2/test.py +36 -0
- brainscore_vision/metrics/i1i2/test_resources/alexnet-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet18-probabilities.nc +0 -0
- brainscore_vision/metrics/i1i2/test_resources/resnet34-probabilities.nc +0 -0
- brainscore_vision/metrics/internal_consistency/__init__.py +8 -0
- brainscore_vision/metrics/internal_consistency/ceiling.py +127 -0
- brainscore_vision/metrics/internal_consistency/requirements.txt +1 -0
- brainscore_vision/metrics/internal_consistency/test.py +39 -0
- brainscore_vision/metrics/maniquet2024_metrics/__init__.py +19 -0
- brainscore_vision/metrics/maniquet2024_metrics/metric.py +416 -0
- brainscore_vision/metrics/maniquet2024_metrics/test.py +8 -0
- brainscore_vision/metrics/mask_regression/__init__.py +16 -0
- brainscore_vision/metrics/mask_regression/metric.py +242 -0
- brainscore_vision/metrics/mask_regression/requirements.txt +1 -0
- brainscore_vision/metrics/mask_regression/test.py +0 -0
- brainscore_vision/metrics/ost/__init__.py +23 -0
- brainscore_vision/metrics/ost/metric.py +350 -0
- brainscore_vision/metrics/ost/requirements.txt +2 -0
- brainscore_vision/metrics/ost/test.py +0 -0
- brainscore_vision/metrics/rdm/__init__.py +14 -0
- brainscore_vision/metrics/rdm/metric.py +101 -0
- brainscore_vision/metrics/rdm/requirements.txt +2 -0
- brainscore_vision/metrics/rdm/test.py +63 -0
- brainscore_vision/metrics/regression_correlation/__init__.py +48 -0
- brainscore_vision/metrics/regression_correlation/mask_regression.py +232 -0
- brainscore_vision/metrics/regression_correlation/metric.py +125 -0
- brainscore_vision/metrics/regression_correlation/requirements.txt +3 -0
- brainscore_vision/metrics/regression_correlation/test.py +36 -0
- brainscore_vision/metrics/threshold/__init__.py +5 -0
- brainscore_vision/metrics/threshold/metric.py +481 -0
- brainscore_vision/metrics/threshold/test.py +71 -0
- brainscore_vision/metrics/value_delta/__init__.py +4 -0
- brainscore_vision/metrics/value_delta/metric.py +30 -0
- brainscore_vision/metrics/value_delta/requirements.txt +1 -0
- brainscore_vision/metrics/value_delta/test.py +40 -0
- brainscore_vision/model_helpers/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/__init__.py +1 -0
- brainscore_vision/model_helpers/activations/core.py +635 -0
- brainscore_vision/model_helpers/activations/pca.py +117 -0
- brainscore_vision/model_helpers/activations/pytorch.py +152 -0
- brainscore_vision/model_helpers/activations/temporal/__init__.py +0 -0
- brainscore_vision/model_helpers/activations/temporal/core/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/core/executor.py +219 -0
- brainscore_vision/model_helpers/activations/temporal/core/extractor.py +282 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/base.py +274 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/base.py +134 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/base.py +99 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/block.py +77 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/temporal_context/causal.py +86 -0
- brainscore_vision/model_helpers/activations/temporal/core/inferencer/video/time_aligner.py +73 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/__init__.py +3 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/base.py +17 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/image.py +50 -0
- brainscore_vision/model_helpers/activations/temporal/inputs/video.py +186 -0
- brainscore_vision/model_helpers/activations/temporal/model/__init__.py +2 -0
- brainscore_vision/model_helpers/activations/temporal/model/base.py +33 -0
- brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +107 -0
- brainscore_vision/model_helpers/activations/temporal/utils.py +228 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +97 -0
- brainscore_vision/model_helpers/brain_transformation/behavior.py +348 -0
- brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt +1000 -0
- brainscore_vision/model_helpers/brain_transformation/neural.py +159 -0
- brainscore_vision/model_helpers/brain_transformation/temporal.py +199 -0
- brainscore_vision/model_helpers/check_submission/__init__.py +0 -0
- brainscore_vision/model_helpers/check_submission/check_models.py +87 -0
- brainscore_vision/model_helpers/check_submission/images/1.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/10.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/11.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/12.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/13.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/14.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/15.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/16.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/17.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/18.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/19.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/2.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/20.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/3.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/4.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/5.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/6.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/7.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/8.png +0 -0
- brainscore_vision/model_helpers/check_submission/images/9.png +0 -0
- brainscore_vision/model_helpers/conftest.py +3 -0
- brainscore_vision/model_helpers/generic_plugin_tests.py +119 -0
- brainscore_vision/model_helpers/s3.py +62 -0
- brainscore_vision/model_helpers/utils/__init__.py +15 -0
- brainscore_vision/model_helpers/utils/s3.py +42 -0
- brainscore_vision/model_interface.py +214 -0
- brainscore_vision/models/AdvProp_efficientne_b6/__init__.py +5 -0
- brainscore_vision/models/AdvProp_efficientne_b6/model.py +75 -0
- brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt +1 -0
- brainscore_vision/models/AdvProp_efficientne_b6/test.py +9 -0
- brainscore_vision/models/AlexNet_SIN/__init__.py +8 -0
- brainscore_vision/models/AlexNet_SIN/model.py +29 -0
- brainscore_vision/models/AlexNet_SIN/requirements.txt +2 -0
- brainscore_vision/models/AlexNet_SIN/test.py +1 -0
- brainscore_vision/models/Soumyadeep_inf_1/__init__.py +5 -0
- brainscore_vision/models/Soumyadeep_inf_1/model.py +60 -0
- brainscore_vision/models/Soumyadeep_inf_1/setup.py +26 -0
- brainscore_vision/models/Soumyadeep_inf_1/test.py +1 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py +8 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/model.py +43 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +4 -0
- brainscore_vision/models/ViT_L_32_imagenet1k/test.py +8 -0
- brainscore_vision/models/__init__.py +0 -0
- brainscore_vision/models/alexnet/__init__.py +8 -0
- brainscore_vision/models/alexnet/model.py +28 -0
- brainscore_vision/models/alexnet/requirements.txt +2 -0
- brainscore_vision/models/alexnet/test.py +15 -0
- brainscore_vision/models/alexnet_7be5be79/__init__.py +7 -0
- brainscore_vision/models/alexnet_7be5be79/model.py +44 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +26 -0
- brainscore_vision/models/alexnet_7be5be79/test.py +1 -0
- brainscore_vision/models/alexnet_7be5be79_convs/__init__.py +5 -0
- brainscore_vision/models/alexnet_7be5be79_convs/model.py +42 -0
- brainscore_vision/models/alexnet_7be5be79_convs/setup.py +25 -0
- brainscore_vision/models/alexnet_7be5be79_convs/test.py +1 -0
- brainscore_vision/models/alexnet_ks_torevert/__init__.py +8 -0
- brainscore_vision/models/alexnet_ks_torevert/model.py +28 -0
- brainscore_vision/models/alexnet_ks_torevert/requirements.txt +2 -0
- brainscore_vision/models/alexnet_ks_torevert/test.py +15 -0
- brainscore_vision/models/alexnet_simclr_run1/__init__.py +7 -0
- brainscore_vision/models/alexnet_simclr_run1/model.py +267 -0
- brainscore_vision/models/alexnet_simclr_run1/requirements.txt +2 -0
- brainscore_vision/models/alexnet_simclr_run1/test.py +1 -0
- brainscore_vision/models/alexnet_testing/__init__.py +8 -0
- brainscore_vision/models/alexnet_testing/model.py +28 -0
- brainscore_vision/models/alexnet_testing/requirements.txt +2 -0
- brainscore_vision/models/alexnet_testing/setup.py +24 -0
- brainscore_vision/models/alexnet_testing/test.py +15 -0
- brainscore_vision/models/antialias_resnet152/__init__.py +7 -0
- brainscore_vision/models/antialias_resnet152/model.py +35 -0
- brainscore_vision/models/antialias_resnet152/requirements.txt +3 -0
- brainscore_vision/models/antialias_resnet152/test.py +8 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py +7 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/model.py +35 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt +1 -0
- brainscore_vision/models/antialiased_rnext101_32x8d/test.py +8 -0
- brainscore_vision/models/bp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/bp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/bp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/bp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/clip/__init__.py +5 -0
- brainscore_vision/models/clip/model.py +179 -0
- brainscore_vision/models/clip/requirements.txt +4 -0
- brainscore_vision/models/clip/test.py +1 -0
- brainscore_vision/models/clipvision/__init__.py +5 -0
- brainscore_vision/models/clipvision/model.py +179 -0
- brainscore_vision/models/clipvision/requirements.txt +4 -0
- brainscore_vision/models/clipvision/test.py +1 -0
- brainscore_vision/models/cornet_s/__init__.py +8 -0
- brainscore_vision/models/cornet_s/helpers/helpers.py +215 -0
- brainscore_vision/models/cornet_s/model.py +77 -0
- brainscore_vision/models/cornet_s/requirements.txt +7 -0
- brainscore_vision/models/cornet_s/test.py +8 -0
- brainscore_vision/models/cornet_s_ynshah/__init__.py +388 -0
- brainscore_vision/models/cornet_s_ynshah/model.py +192 -0
- brainscore_vision/models/cornet_s_ynshah/setup.py +24 -0
- brainscore_vision/models/cornet_s_ynshah/test.py +0 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py +7 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/model.py +75 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt +4 -0
- brainscore_vision/models/custom_model_cv_18_dagger_408/test.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/__init__.py +8 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/model.py +57 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/requirements.txt +3 -0
- brainscore_vision/models/cv_18_dagger_408_pretrained/test.py +25 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py +134 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/dbp_resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/dbp_resnet50_julios/model.py +52 -0
- brainscore_vision/models/dbp_resnet50_julios/setup.py +24 -0
- brainscore_vision/models/dbp_resnet50_julios/test.py +1 -0
- brainscore_vision/models/densenet_201_pytorch/__init__.py +7 -0
- brainscore_vision/models/densenet_201_pytorch/model.py +59 -0
- brainscore_vision/models/densenet_201_pytorch/requirements.txt +3 -0
- brainscore_vision/models/densenet_201_pytorch/test.py +8 -0
- brainscore_vision/models/eBarlow_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla/model.py +50 -0
- brainscore_vision/models/eBarlow_Vanilla/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_Vanilla/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/model.py +84 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_Vanilla_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eBarlow_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_linear_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_linear_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_linear_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_linear_1/test.py +1 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/model.py +65 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_augself_mlp_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_001_3/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_001_3/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_001_3/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_001_3/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01/model.py +50 -0
- brainscore_vision/models/eBarlow_lmda_01/requirements.txt +2 -0
- brainscore_vision/models/eBarlow_lmda_01/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/model.py +84 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1000ep/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_1_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/model.py +85 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/setup.py +25 -0
- brainscore_vision/models/eBarlow_lmda_02_200_full/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eBarlow_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eBarlow_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eBarlow_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eBarlow_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Mom_lmda_05_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla/model.py +50 -0
- brainscore_vision/models/eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_VanillaV2/model.py +50 -0
- brainscore_vision/models/eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eMMCR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eMMCR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01V2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01V2/model.py +50 -0
- brainscore_vision/models/eMMCR_lmda_01V2/requirements.txt +2 -0
- brainscore_vision/models/eMMCR_lmda_01V2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01V2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eMMCR_lmda_01_3/__init__.py +9 -0
- brainscore_vision/models/eMMCR_lmda_01_3/model.py +65 -0
- brainscore_vision/models/eMMCR_lmda_01_3/setup.py +24 -0
- brainscore_vision/models/eMMCR_lmda_01_3/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/model.py +64 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_Vanilla_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_0001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_001_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_01_2/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_02_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_03_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_04_1_1/test.py +1 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/__init__.py +9 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/model.py +65 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +24 -0
- brainscore_vision/models/eSimCLR_lmda_05_1/test.py +1 -0
- brainscore_vision/models/effnetb1_272x240/__init__.py +5 -0
- brainscore_vision/models/effnetb1_272x240/model.py +126 -0
- brainscore_vision/models/effnetb1_272x240/requirements.txt +3 -0
- brainscore_vision/models/effnetb1_272x240/test.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py +111 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt +6 -0
- brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +142 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py +9 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py +140 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt +5 -0
- brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py +8 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py +5 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/model.py +62 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt +3 -0
- brainscore_vision/models/focalnet_tiny_in1k_submission/test.py +8 -0
- brainscore_vision/models/hmax/__init__.py +7 -0
- brainscore_vision/models/hmax/helpers/hmax.py +438 -0
- brainscore_vision/models/hmax/helpers/pytorch.py +216 -0
- brainscore_vision/models/hmax/model.py +69 -0
- brainscore_vision/models/hmax/requirements.txt +5 -0
- brainscore_vision/models/hmax/test.py +8 -0
- brainscore_vision/models/inception_v3_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v3_pytorch/model.py +68 -0
- brainscore_vision/models/inception_v3_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v3_pytorch/test.py +8 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py +7 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +60 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt +3 -0
- brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py +8 -0
- brainscore_vision/models/mobilevit_small/__init__.py +7 -0
- brainscore_vision/models/mobilevit_small/model.py +49 -0
- brainscore_vision/models/mobilevit_small/requirements.txt +3 -0
- brainscore_vision/models/mobilevit_small/test.py +8 -0
- brainscore_vision/models/pixels/__init__.py +8 -0
- brainscore_vision/models/pixels/model.py +35 -0
- brainscore_vision/models/pixels/test.py +15 -0
- brainscore_vision/models/pnasnet_large_pytorch/__init__.py +7 -0
- brainscore_vision/models/pnasnet_large_pytorch/model.py +59 -0
- brainscore_vision/models/pnasnet_large_pytorch/requirements.txt +3 -0
- brainscore_vision/models/pnasnet_large_pytorch/test.py +8 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py +64 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py +65 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/__init__.py +9 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/model.py +67 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/setup.py +25 -0
- brainscore_vision/models/r101_eBarlow_lmda_02_1_copy/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_Vanilla_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_01_1/test.py +1 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/__init__.py +9 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/model.py +66 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/setup.py +25 -0
- brainscore_vision/models/r34_eMMCR_Mom_lmda_02_1/test.py +1 -0
- brainscore_vision/models/r50_tvpt/__init__.py +9 -0
- brainscore_vision/models/r50_tvpt/model.py +47 -0
- brainscore_vision/models/r50_tvpt/setup.py +24 -0
- brainscore_vision/models/r50_tvpt/test.py +1 -0
- brainscore_vision/models/regnet/__init__.py +14 -0
- brainscore_vision/models/regnet/model.py +17 -0
- brainscore_vision/models/regnet/requirements.txt +2 -0
- brainscore_vision/models/regnet/test.py +17 -0
- brainscore_vision/models/resnet18_imagenet21kP/__init__.py +6 -0
- brainscore_vision/models/resnet18_imagenet21kP/model.py +119 -0
- brainscore_vision/models/resnet18_imagenet21kP/setup.py +18 -0
- brainscore_vision/models/resnet18_imagenet21kP/test.py +0 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/__init__.py +5 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/model.py +59 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_Vanilla/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_VanillaV2/test.py +1 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/__init__.py +9 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/model.py +72 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +24 -0
- brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/test.py +1 -0
- brainscore_vision/models/resnet50_julios/__init__.py +5 -0
- brainscore_vision/models/resnet50_julios/model.py +54 -0
- brainscore_vision/models/resnet50_julios/setup.py +24 -0
- brainscore_vision/models/resnet50_julios/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/__init__.py +5 -0
- brainscore_vision/models/resnet50_tutorial/model.py +34 -0
- brainscore_vision/models/resnet50_tutorial/requirements.txt +2 -0
- brainscore_vision/models/resnet50_tutorial/test.py +8 -0
- brainscore_vision/models/resnet_152_v2_pytorch/__init__.py +7 -0
- brainscore_vision/models/resnet_152_v2_pytorch/model.py +59 -0
- brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt +2 -0
- brainscore_vision/models/resnet_152_v2_pytorch/test.py +8 -0
- brainscore_vision/models/resnet_50_robust/__init__.py +7 -0
- brainscore_vision/models/resnet_50_robust/model.py +55 -0
- brainscore_vision/models/resnet_50_robust/requirements.txt +3 -0
- brainscore_vision/models/resnet_50_robust/test.py +8 -0
- brainscore_vision/models/resnext101_32x16d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x16d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x16d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x32d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x32d_wsl/model.py +40 -0
- brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x32d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x48d_wsl/model.py +38 -0
- brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt +3 -0
- brainscore_vision/models/resnext101_32x48d_wsl/test.py +8 -0
- brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +7 -0
- brainscore_vision/models/resnext101_32x8d_wsl/model.py +44 -0
- brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +2 -0
- brainscore_vision/models/resnext101_32x8d_wsl/test.py +8 -0
- brainscore_vision/models/temporal_model_AVID_CMA/__init__.py +17 -0
- brainscore_vision/models/temporal_model_AVID_CMA/model.py +92 -0
- brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_AVID_CMA/test.py +18 -0
- brainscore_vision/models/temporal_model_GDT/__init__.py +16 -0
- brainscore_vision/models/temporal_model_GDT/model.py +72 -0
- brainscore_vision/models/temporal_model_GDT/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_GDT/test.py +17 -0
- brainscore_vision/models/temporal_model_S3D_text_video/__init__.py +14 -0
- brainscore_vision/models/temporal_model_S3D_text_video/model.py +65 -0
- brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt +1 -0
- brainscore_vision/models/temporal_model_S3D_text_video/test.py +15 -0
- brainscore_vision/models/temporal_model_SeLaVi/__init__.py +17 -0
- brainscore_vision/models/temporal_model_SeLaVi/model.py +68 -0
- brainscore_vision/models/temporal_model_SeLaVi/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_SeLaVi/test.py +18 -0
- brainscore_vision/models/temporal_model_VideoMAE/__init__.py +15 -0
- brainscore_vision/models/temporal_model_VideoMAE/model.py +100 -0
- brainscore_vision/models/temporal_model_VideoMAE/requirements.txt +6 -0
- brainscore_vision/models/temporal_model_VideoMAE/test.py +16 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py +14 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/model.py +109 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt +4 -0
- brainscore_vision/models/temporal_model_VideoMAEv2/test.py +16 -0
- brainscore_vision/models/temporal_model_mae_st/__init__.py +15 -0
- brainscore_vision/models/temporal_model_mae_st/model.py +120 -0
- brainscore_vision/models/temporal_model_mae_st/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_mae_st/test.py +16 -0
- brainscore_vision/models/temporal_model_mmaction2/__init__.py +23 -0
- brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv +24 -0
- brainscore_vision/models/temporal_model_mmaction2/model.py +226 -0
- brainscore_vision/models/temporal_model_mmaction2/requirements.txt +5 -0
- brainscore_vision/models/temporal_model_mmaction2/test.py +24 -0
- brainscore_vision/models/temporal_model_openstl/__init__.py +18 -0
- brainscore_vision/models/temporal_model_openstl/model.py +206 -0
- brainscore_vision/models/temporal_model_openstl/requirements.txt +3 -0
- brainscore_vision/models/temporal_model_openstl/test.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/__init__.py +19 -0
- brainscore_vision/models/temporal_model_torchvision/model.py +92 -0
- brainscore_vision/models/temporal_model_torchvision/requirements.txt +2 -0
- brainscore_vision/models/temporal_model_torchvision/test.py +20 -0
- brainscore_vision/models/tv_efficientnet_b1/__init__.py +5 -0
- brainscore_vision/models/tv_efficientnet_b1/model.py +54 -0
- brainscore_vision/models/tv_efficientnet_b1/setup.py +24 -0
- brainscore_vision/models/tv_efficientnet_b1/test.py +1 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py +7 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/model.py +104 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/test.py +8 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE +674 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +105 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py +136 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py +41 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py +383 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py +71 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py +337 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +126 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py +100 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py +32 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py +68 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb +352 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_0/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py +66 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py +7 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py +68 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +24 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py +1 -0
- brainscore_vision/submission/__init__.py +0 -0
- brainscore_vision/submission/actions_helpers.py +153 -0
- brainscore_vision/submission/config.py +7 -0
- brainscore_vision/submission/endpoints.py +58 -0
- brainscore_vision/utils/__init__.py +91 -0
- brainscore_vision-2.1.dist-info/LICENSE +11 -0
- brainscore_vision-2.1.dist-info/METADATA +152 -0
- brainscore_vision-2.1.dist-info/RECORD +1009 -0
- brainscore_vision-2.1.dist-info/WHEEL +5 -0
- brainscore_vision-2.1.dist-info/top_level.txt +4 -0
- docs/Makefile +20 -0
- docs/source/conf.py +78 -0
- docs/source/index.rst +21 -0
- docs/source/modules/api_reference.rst +10 -0
- docs/source/modules/benchmarks.rst +8 -0
- docs/source/modules/brainscore_submission.png +0 -0
- docs/source/modules/developer_clarifications.rst +36 -0
- docs/source/modules/metrics.rst +8 -0
- docs/source/modules/model_interface.rst +8 -0
- docs/source/modules/submission.rst +112 -0
- docs/source/modules/tutorial_screenshots/brain-score_logo.png +0 -0
- docs/source/modules/tutorial_screenshots/final_submit.png +0 -0
- docs/source/modules/tutorial_screenshots/init_py.png +0 -0
- docs/source/modules/tutorial_screenshots/mms.png +0 -0
- docs/source/modules/tutorial_screenshots/setup.png +0 -0
- docs/source/modules/tutorial_screenshots/sms.png +0 -0
- docs/source/modules/tutorial_screenshots/subfolders.png +0 -0
- docs/source/modules/utils.rst +22 -0
- migrations/2020-12-20_pkl_to_nc.py +90 -0
- tests/__init__.py +6 -0
- tests/conftest.py +26 -0
- tests/test_benchmark_helpers/__init__.py +0 -0
- tests/test_benchmark_helpers/test_screen.py +75 -0
- tests/test_examples.py +41 -0
- tests/test_integration.py +43 -0
- tests/test_metric_helpers/__init__.py +0 -0
- tests/test_metric_helpers/test_temporal.py +80 -0
- tests/test_metric_helpers/test_transformations.py +171 -0
- tests/test_metric_helpers/test_xarray_utils.py +85 -0
- tests/test_model_helpers/__init__.py +6 -0
- tests/test_model_helpers/activations/__init__.py +0 -0
- tests/test_model_helpers/activations/test___init__.py +404 -0
- tests/test_model_helpers/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/brain_transformation/test___init__.py +18 -0
- tests/test_model_helpers/brain_transformation/test_behavior.py +181 -0
- tests/test_model_helpers/brain_transformation/test_neural.py +70 -0
- tests/test_model_helpers/brain_transformation/test_temporal.py +66 -0
- tests/test_model_helpers/temporal/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/__init__.py +0 -0
- tests/test_model_helpers/temporal/activations/test_extractor.py +96 -0
- tests/test_model_helpers/temporal/activations/test_inferencer.py +189 -0
- tests/test_model_helpers/temporal/activations/test_inputs.py +103 -0
- tests/test_model_helpers/temporal/brain_transformation/__init__.py +0 -0
- tests/test_model_helpers/temporal/brain_transformation/test_temporal_ops.py +122 -0
- tests/test_model_helpers/temporal/test_utils.py +61 -0
- tests/test_model_helpers/test_generic_plugin_tests.py +310 -0
- tests/test_model_helpers/test_imports.py +10 -0
- tests/test_model_helpers/test_s3.py +38 -0
- tests/test_models.py +15 -0
- tests/test_stimuli.py +0 -0
- tests/test_submission/__init__.py +0 -0
- tests/test_submission/mock_config.py +3 -0
- tests/test_submission/test_actions_helpers.py +67 -0
- tests/test_submission/test_db.py +54 -0
- tests/test_submission/test_endpoints.py +125 -0
- tests/test_utils.py +21 -0
@@ -0,0 +1,635 @@
|
|
1
|
+
import copy
|
2
|
+
import os
|
3
|
+
import cv2
|
4
|
+
import tempfile
|
5
|
+
from typing import Dict, Tuple, List, Union
|
6
|
+
|
7
|
+
import functools
|
8
|
+
import logging
|
9
|
+
from collections import OrderedDict
|
10
|
+
from multiprocessing.pool import ThreadPool
|
11
|
+
|
12
|
+
import numpy as np
|
13
|
+
from tqdm.auto import tqdm
|
14
|
+
import xarray as xr
|
15
|
+
|
16
|
+
from brainio.assemblies import NeuroidAssembly, walk_coords
|
17
|
+
from brainio.stimuli import StimulusSet
|
18
|
+
from brainscore_vision.model_helpers.utils import fullname
|
19
|
+
from result_caching import store_xarray
|
20
|
+
|
21
|
+
|
22
|
+
class Defaults:
|
23
|
+
batch_size = 64
|
24
|
+
|
25
|
+
|
26
|
+
class ActivationsExtractorHelper:
|
27
|
+
def __init__(self, get_activations, preprocessing, identifier=False, batch_size=Defaults.batch_size):
|
28
|
+
"""
|
29
|
+
:param identifier: an activations identifier for the stored results file. False to disable saving.
|
30
|
+
"""
|
31
|
+
self._logger = logging.getLogger(fullname(self))
|
32
|
+
|
33
|
+
self._batch_size = batch_size
|
34
|
+
self.identifier = identifier
|
35
|
+
self.get_activations = get_activations
|
36
|
+
self.preprocess = preprocessing or (lambda x: x)
|
37
|
+
self._stimulus_set_hooks = {}
|
38
|
+
self._batch_activations_hooks = {}
|
39
|
+
self._microsaccade_helper = MicrosaccadeHelper()
|
40
|
+
|
41
|
+
def __call__(self, stimuli, layers, stimuli_identifier=None, number_of_trials: int = 1,
|
42
|
+
require_variance: bool = False):
|
43
|
+
"""
|
44
|
+
:param stimuli_identifier: a stimuli identifier for the stored results file. False to disable saving.
|
45
|
+
:param number_of_trials: An integer that determines how many repetitions of the same model performs.
|
46
|
+
:param require_variance: A bool that asks models to output different responses to the same stimuli (i.e.,
|
47
|
+
allows stochastic responses to identical stimuli, even in otherwise deterministic base models).
|
48
|
+
We here implement this using microsaccades. For more, see ...
|
49
|
+
|
50
|
+
"""
|
51
|
+
if require_variance:
|
52
|
+
self._microsaccade_helper.number_of_trials = number_of_trials # for use with microsaccades
|
53
|
+
if (self._microsaccade_helper.visual_degrees is None) and require_variance:
|
54
|
+
self._logger.debug("When using microsaccades for model commitments other than ModelCommitment, you should "
|
55
|
+
"set self.activations_model.set_visual_degrees(visual_degrees). Not doing so risks "
|
56
|
+
"breaking microsaccades.")
|
57
|
+
if isinstance(stimuli, StimulusSet):
|
58
|
+
function_call = functools.partial(self.from_stimulus_set, stimulus_set=stimuli)
|
59
|
+
else:
|
60
|
+
function_call = functools.partial(self.from_paths, stimuli_paths=stimuli)
|
61
|
+
return function_call(
|
62
|
+
layers=layers,
|
63
|
+
stimuli_identifier=stimuli_identifier,
|
64
|
+
require_variance=require_variance)
|
65
|
+
|
66
|
+
def from_stimulus_set(self, stimulus_set, layers, stimuli_identifier=None, require_variance: bool = False):
|
67
|
+
"""
|
68
|
+
:param stimuli_identifier: a stimuli identifier for the stored results file.
|
69
|
+
False to disable saving. None to use `stimulus_set.identifier`
|
70
|
+
"""
|
71
|
+
if stimuli_identifier is None and hasattr(stimulus_set, 'identifier'):
|
72
|
+
stimuli_identifier = stimulus_set.identifier
|
73
|
+
for hook in self._stimulus_set_hooks.copy().values(): # copy to avoid stale handles
|
74
|
+
stimulus_set = hook(stimulus_set)
|
75
|
+
stimuli_paths = [str(stimulus_set.get_stimulus(stimulus_id)) for stimulus_id in stimulus_set['stimulus_id']]
|
76
|
+
activations = self.from_paths(stimuli_paths=stimuli_paths, layers=layers, stimuli_identifier=stimuli_identifier,
|
77
|
+
require_variance=require_variance)
|
78
|
+
activations = attach_stimulus_set_meta(activations,
|
79
|
+
stimulus_set,
|
80
|
+
number_of_trials=self._microsaccade_helper.number_of_trials,
|
81
|
+
require_variance=require_variance)
|
82
|
+
return activations
|
83
|
+
|
84
|
+
def from_paths(self, stimuli_paths, layers, stimuli_identifier=None, require_variance=None):
|
85
|
+
if layers is None:
|
86
|
+
layers = ['logits']
|
87
|
+
if self.identifier and stimuli_identifier:
|
88
|
+
fnc = functools.partial(self._from_paths_stored,
|
89
|
+
identifier=self.identifier,
|
90
|
+
stimuli_identifier=stimuli_identifier,
|
91
|
+
require_variance=require_variance)
|
92
|
+
else:
|
93
|
+
self._logger.debug(f"self.identifier `{self.identifier}` or stimuli_identifier {stimuli_identifier} "
|
94
|
+
f"are not set, will not store")
|
95
|
+
fnc = self._from_paths
|
96
|
+
if require_variance:
|
97
|
+
activations = fnc(layers=layers, stimuli_paths=stimuli_paths, require_variance=require_variance)
|
98
|
+
else:
|
99
|
+
# When we are not asked for varying responses but receive `stimuli_paths` duplicates (e.g. multiple trials),
|
100
|
+
# we first reduce them to only the paths that need to be run individually, compute activations for those,
|
101
|
+
# and then expand the activations to all paths again. This is done here, before storing, so that we only
|
102
|
+
# store the reduced activations.
|
103
|
+
reduced_paths = self._reduce_paths(stimuli_paths)
|
104
|
+
activations = fnc(layers=layers, stimuli_paths=reduced_paths, require_variance=require_variance)
|
105
|
+
activations = self._expand_paths(activations, original_paths=stimuli_paths)
|
106
|
+
return activations
|
107
|
+
|
108
|
+
@store_xarray(identifier_ignore=['stimuli_paths', 'layers'], combine_fields={'layers': 'layer'})
|
109
|
+
def _from_paths_stored(self, identifier, layers, stimuli_identifier,
|
110
|
+
stimuli_paths, number_of_trials: int = 1, require_variance: bool = False):
|
111
|
+
return self._from_paths(layers=layers, stimuli_paths=stimuli_paths, require_variance=require_variance)
|
112
|
+
|
113
|
+
def _from_paths(self, layers, stimuli_paths, require_variance: bool = False):
|
114
|
+
if len(layers) == 0:
|
115
|
+
raise ValueError("No layers passed to retrieve activations from")
|
116
|
+
self._logger.info('Running stimuli')
|
117
|
+
layer_activations = self._get_activations_batched(stimuli_paths, layers=layers, batch_size=self._batch_size,
|
118
|
+
require_variance=require_variance)
|
119
|
+
self._logger.info('Packaging into assembly')
|
120
|
+
return self._package(layer_activations=layer_activations, stimuli_paths=stimuli_paths, require_variance=require_variance)
|
121
|
+
|
122
|
+
def _reduce_paths(self, stimuli_paths):
|
123
|
+
return list(set(stimuli_paths))
|
124
|
+
|
125
|
+
def _expand_paths(self, activations, original_paths):
|
126
|
+
activations_paths = activations['stimulus_path'].values
|
127
|
+
argsort_indices = np.argsort(activations_paths)
|
128
|
+
sorted_x = activations_paths[argsort_indices]
|
129
|
+
sorted_index = np.searchsorted(sorted_x, original_paths)
|
130
|
+
index = [argsort_indices[i] for i in sorted_index]
|
131
|
+
return activations[{'presentation': index}]
|
132
|
+
|
133
|
+
def register_batch_activations_hook(self, hook):
|
134
|
+
r"""
|
135
|
+
The hook will be called every time a batch of activations is retrieved.
|
136
|
+
The hook should have the following signature::
|
137
|
+
|
138
|
+
hook(batch_activations) -> batch_activations
|
139
|
+
|
140
|
+
The hook should return new batch_activations which will be used in place of the previous ones.
|
141
|
+
"""
|
142
|
+
|
143
|
+
handle = HookHandle(self._batch_activations_hooks)
|
144
|
+
self._batch_activations_hooks[handle.id] = hook
|
145
|
+
return handle
|
146
|
+
|
147
|
+
def register_stimulus_set_hook(self, hook):
|
148
|
+
r"""
|
149
|
+
The hook will be called every time before a stimulus set is processed.
|
150
|
+
The hook should have the following signature::
|
151
|
+
|
152
|
+
hook(stimulus_set) -> stimulus_set
|
153
|
+
|
154
|
+
The hook should return a new stimulus_set which will be used in place of the previous one.
|
155
|
+
"""
|
156
|
+
|
157
|
+
handle = HookHandle(self._stimulus_set_hooks)
|
158
|
+
self._stimulus_set_hooks[handle.id] = hook
|
159
|
+
return handle
|
160
|
+
|
161
|
+
def _get_activations_batched(self, paths, layers, batch_size: int, require_variance: bool):
|
162
|
+
layer_activations = OrderedDict()
|
163
|
+
for batch_start in tqdm(range(0, len(paths), batch_size), unit_scale=batch_size, desc="activations"):
|
164
|
+
batch_end = min(batch_start + batch_size, len(paths))
|
165
|
+
batch_inputs = paths[batch_start:batch_end]
|
166
|
+
|
167
|
+
batch_activations = OrderedDict()
|
168
|
+
# compute activations on the entire batch one microsaccade shift at a time.
|
169
|
+
for shift_number in range(self._microsaccade_helper.number_of_trials):
|
170
|
+
activations = self._get_batch_activations(inputs=batch_inputs,
|
171
|
+
layer_names=layers,
|
172
|
+
batch_size=batch_size,
|
173
|
+
require_variance=require_variance,
|
174
|
+
trial_number=shift_number)
|
175
|
+
|
176
|
+
for layer_name, layer_output in activations.items():
|
177
|
+
batch_activations.setdefault(layer_name, []).append(layer_output)
|
178
|
+
|
179
|
+
# concatenate all microsaccade shifts in this batch (for example, if the model microsaccaded 15 times,
|
180
|
+
# the 15 microsaccaded layer_outputs are concatenated to the batch here.
|
181
|
+
for layer_name, layer_outputs in batch_activations.items():
|
182
|
+
batch_activations[layer_name] = np.concatenate(layer_outputs)
|
183
|
+
|
184
|
+
for hook in self._batch_activations_hooks.copy().values():
|
185
|
+
batch_activations = hook(batch_activations)
|
186
|
+
|
187
|
+
# add this batch to layer_activations
|
188
|
+
for layer_name, layer_output in batch_activations.items():
|
189
|
+
layer_activations.setdefault(layer_name, []).append(layer_output)
|
190
|
+
|
191
|
+
# concat all batches
|
192
|
+
for layer_name, layer_outputs in layer_activations.items():
|
193
|
+
layer_activations[layer_name] = np.concatenate(layer_outputs)
|
194
|
+
|
195
|
+
return layer_activations # this is all batches
|
196
|
+
|
197
|
+
def _get_batch_activations(self, inputs, layer_names, batch_size: int, require_variance: bool = False,
|
198
|
+
trial_number: int = 1):
|
199
|
+
inputs, num_padding = self._pad(inputs, batch_size)
|
200
|
+
preprocessed_inputs = self.preprocess(inputs)
|
201
|
+
preprocessed_inputs = self._microsaccade_helper.translate_images(images=preprocessed_inputs,
|
202
|
+
image_paths=inputs,
|
203
|
+
trial_number=trial_number,
|
204
|
+
require_variance=require_variance)
|
205
|
+
activations = self.get_activations(preprocessed_inputs, layer_names)
|
206
|
+
assert isinstance(activations, OrderedDict)
|
207
|
+
activations = self._unpad(activations, num_padding)
|
208
|
+
if require_variance:
|
209
|
+
self._microsaccade_helper.remove_temporary_files(preprocessed_inputs)
|
210
|
+
return activations
|
211
|
+
|
212
|
+
def set_visual_degrees(self, visual_degrees: float):
|
213
|
+
"""
|
214
|
+
A method used by ModelCommitments to give the ActivationsExtractorHelper.MicrosaccadeHelper their visual
|
215
|
+
degrees for performing microsaccades.
|
216
|
+
"""
|
217
|
+
self._microsaccade_helper.visual_degrees = visual_degrees
|
218
|
+
|
219
|
+
|
220
|
+
def _pad(self, batch_images, batch_size):
|
221
|
+
num_images = len(batch_images)
|
222
|
+
if num_images % batch_size == 0:
|
223
|
+
return batch_images, 0
|
224
|
+
num_padding = batch_size - (num_images % batch_size)
|
225
|
+
padding = np.repeat(batch_images[-1:], repeats=num_padding, axis=0)
|
226
|
+
return np.concatenate((batch_images, padding)), num_padding
|
227
|
+
|
228
|
+
def _unpad(self, layer_activations, num_padding):
|
229
|
+
return change_dict(layer_activations, lambda values: values[:-num_padding or None])
|
230
|
+
|
231
|
+
def _package(self, layer_activations, stimuli_paths, require_variance: bool):
|
232
|
+
shapes = [a.shape for a in layer_activations.values()]
|
233
|
+
self._logger.debug(f"Activations shapes: {shapes}")
|
234
|
+
self._logger.debug("Packaging individual layers")
|
235
|
+
layer_assemblies = [self._package_layer(single_layer_activations,
|
236
|
+
layer=layer,
|
237
|
+
stimuli_paths=stimuli_paths,
|
238
|
+
require_variance=require_variance) for
|
239
|
+
layer, single_layer_activations in tqdm(layer_activations.items(), desc='layer packaging')]
|
240
|
+
# merge manually instead of using merge_data_arrays since `xarray.merge` is very slow with these large arrays
|
241
|
+
# complication: (non)neuroid_coords are taken from the structure of layer_assemblies[0] i.e. the 1st assembly;
|
242
|
+
# using these names/keys for all assemblies results in KeyError if the first layer contains flatten_coord_names
|
243
|
+
# (see _package_layer) not present in later layers, e.g. first layer = conv, later layer = transformer layer
|
244
|
+
self._logger.debug(f"Merging {len(layer_assemblies)} layer assemblies")
|
245
|
+
model_assembly = np.concatenate([a.values for a in layer_assemblies],
|
246
|
+
axis=layer_assemblies[0].dims.index('neuroid'))
|
247
|
+
nonneuroid_coords = {coord: (dims, values) for coord, dims, values in walk_coords(layer_assemblies[0])
|
248
|
+
if set(dims) != {'neuroid'}}
|
249
|
+
neuroid_coords = {coord: [dims, values] for coord, dims, values in walk_coords(layer_assemblies[0])
|
250
|
+
if set(dims) == {'neuroid'}}
|
251
|
+
for layer_assembly in layer_assemblies[1:]:
|
252
|
+
for coord in neuroid_coords:
|
253
|
+
neuroid_coords[coord][1] = np.concatenate((neuroid_coords[coord][1], layer_assembly[coord].values))
|
254
|
+
assert layer_assemblies[0].dims == layer_assembly.dims
|
255
|
+
for coord, dims, values in walk_coords(layer_assembly):
|
256
|
+
if set(dims) == {'neuroid'}:
|
257
|
+
continue
|
258
|
+
assert (values == nonneuroid_coords[coord][1]).all()
|
259
|
+
|
260
|
+
neuroid_coords = {coord: (dims_values[0], dims_values[1]) # re-package as tuple instead of list for xarray
|
261
|
+
for coord, dims_values in neuroid_coords.items()}
|
262
|
+
model_assembly = type(layer_assemblies[0])(model_assembly, coords={**nonneuroid_coords, **neuroid_coords},
|
263
|
+
dims=layer_assemblies[0].dims)
|
264
|
+
return model_assembly
|
265
|
+
|
266
|
+
def _package_layer(self, layer_activations: np.ndarray, layer: str, stimuli_paths: List[str], require_variance: bool = False):
|
267
|
+
# activation shape is larger if variance in responses is required from the model by a factor of number_of_trials
|
268
|
+
if require_variance:
|
269
|
+
runs_per_image = self._microsaccade_helper.number_of_trials
|
270
|
+
else:
|
271
|
+
runs_per_image = 1
|
272
|
+
assert layer_activations.shape[0] == len(stimuli_paths) * runs_per_image
|
273
|
+
stimuli_paths = np.repeat(stimuli_paths, runs_per_image)
|
274
|
+
activations, flatten_indices = flatten(layer_activations, return_index=True) # collapse for single neuroid dim
|
275
|
+
flatten_coord_names = None
|
276
|
+
if flatten_indices.shape[1] == 1: # fully connected, e.g. classifier
|
277
|
+
# see comment in _package for an explanation why we cannot simply have 'channel' for the FC layer
|
278
|
+
flatten_coord_names = ['channel', 'channel_x', 'channel_y']
|
279
|
+
elif flatten_indices.shape[1] == 2: # Transformer, e.g. ViT
|
280
|
+
flatten_coord_names = ['channel', 'embedding']
|
281
|
+
elif flatten_indices.shape[1] == 3: # 2DConv, e.g. resnet
|
282
|
+
flatten_coord_names = ['channel', 'channel_x', 'channel_y']
|
283
|
+
elif flatten_indices.shape[1] == 4: # temporal sliding window, e.g. omnivron
|
284
|
+
flatten_coord_names = ['channel_temporal', 'channel_x', 'channel_y', 'channel']
|
285
|
+
else:
|
286
|
+
# we still package the activations, but are unable to provide channel information
|
287
|
+
self._logger.debug(f"Unknown layer activations shape {layer_activations.shape}, not inferring channels")
|
288
|
+
|
289
|
+
# build assembly
|
290
|
+
coords = {'stimulus_path': ('presentation', stimuli_paths),
|
291
|
+
**self._microsaccade_helper.build_microsaccade_coords(stimuli_paths),
|
292
|
+
'neuroid_num': ('neuroid', list(range(activations.shape[1]))),
|
293
|
+
'model': ('neuroid', [self.identifier] * activations.shape[1]),
|
294
|
+
'layer': ('neuroid', [layer] * activations.shape[1]),
|
295
|
+
}
|
296
|
+
|
297
|
+
if flatten_coord_names:
|
298
|
+
flatten_coords = {flatten_coord_names[i]: [sample_index[i] if i < flatten_indices.shape[1] else np.nan
|
299
|
+
for sample_index in flatten_indices]
|
300
|
+
for i in range(len(flatten_coord_names))}
|
301
|
+
coords = {**coords, **{coord: ('neuroid', values) for coord, values in flatten_coords.items()}}
|
302
|
+
layer_assembly = NeuroidAssembly(activations, coords=coords, dims=['presentation', 'neuroid'])
|
303
|
+
neuroid_id = [".".join([f"{value}" for value in values]) for values in zip(*[
|
304
|
+
layer_assembly[coord].values for coord in ['model', 'layer', 'neuroid_num']])]
|
305
|
+
layer_assembly['neuroid_id'] = 'neuroid', neuroid_id
|
306
|
+
return layer_assembly
|
307
|
+
|
308
|
+
def insert_attrs(self, wrapper):
|
309
|
+
wrapper.from_stimulus_set = self.from_stimulus_set
|
310
|
+
wrapper.from_paths = self.from_paths
|
311
|
+
wrapper.register_batch_activations_hook = self.register_batch_activations_hook
|
312
|
+
wrapper.register_stimulus_set_hook = self.register_stimulus_set_hook
|
313
|
+
|
314
|
+
|
315
|
+
class MicrosaccadeHelper:
|
316
|
+
"""
|
317
|
+
A class that allows ActivationsExtractorHelper to implement microsaccades.
|
318
|
+
|
319
|
+
Human microsaccade amplitude varies by who you ask, an estimate might be <0.1 deg = 360 arcsec = 6arcmin.
|
320
|
+
Our motivation to make use of such microsaccades is to obtain multiple different neural activities to the
|
321
|
+
same input stimulus from non-stochastic models. This enables models to engage on e.g. psychophysical
|
322
|
+
functions which often require variance for the same stimulus. In the current implementation,
|
323
|
+
if `require_variance=True`, the model microsaccades in the preprocessed input space in sub-pixel increments,
|
324
|
+
the extent and position of which are determined by `self._visual_degrees`, and
|
325
|
+
`self.microsaccade_extent_degrees`.
|
326
|
+
|
327
|
+
More information:
|
328
|
+
--> Rolfs 2009 "Microsaccades: Small steps on a long way" Vision Research, Volume 49, Issue 20, 15
|
329
|
+
October 2009, Pages 2415-2441.
|
330
|
+
--> Haddad & Steinmann 1973 "The smallest voluntary saccade: Implications for fixation" Vision
|
331
|
+
Research Volume 13, Issue 6, June 1973, Pages 1075-1086, IN5-IN6.
|
332
|
+
Implemented by Ben Lonnqvist and Johannes Mehrer.
|
333
|
+
"""
|
334
|
+
def __init__(self):
|
335
|
+
self._logger = logging.getLogger(fullname(self))
|
336
|
+
self.number_of_trials = 1 # for use with microsaccades.
|
337
|
+
self.microsaccade_extent_degrees = 0.05 # how many degrees models microsaccade by default
|
338
|
+
|
339
|
+
# a dict that contains two dicts, one for representing microsaccades in pixels, and one in degrees.
|
340
|
+
# Each dict inside contain image paths and their respective microsaccades. For example
|
341
|
+
# {'pixels': {'abc.jpg': [(0, 0), (1.5, 2)]}, 'degrees': {'abc.jpg': [(0., 0.), (0.0075, 0.001)]}}
|
342
|
+
self.microsaccades = {'pixels': {}, 'degrees': {}}
|
343
|
+
# Model visual degrees. Used for computing microsaccades in the space of degrees rather than pixels
|
344
|
+
self.visual_degrees = None
|
345
|
+
|
346
|
+
def translate_images(self, images: List[Union[str, np.ndarray]], image_paths: List[str], trial_number: int,
|
347
|
+
require_variance: bool) -> List[str]:
|
348
|
+
"""
|
349
|
+
Translate images according to selected microsaccades, if microsaccades are required.
|
350
|
+
|
351
|
+
:param images: A list of arrays.
|
352
|
+
:param image_paths: A list of image paths. Both `image_paths` and `images` are needed since while both tf and
|
353
|
+
non-tf models preprocess images before this point, non-tf models' preprocessed images
|
354
|
+
are fixed as arrays when fed into here. As such, simply returning `image_paths` for
|
355
|
+
non-tf models would require double-loading of the images, which does not seem like a
|
356
|
+
good idea.
|
357
|
+
"""
|
358
|
+
output_images = []
|
359
|
+
for index, image_path in enumerate(image_paths):
|
360
|
+
# When microsaccades are not used, skip computing them and return the base images.
|
361
|
+
# This iteration could be entirely skipped, but recording microsaccades for all images regardless
|
362
|
+
# of whether variance is required or not is convenient for adding an extra presentation dimension
|
363
|
+
# in the layer assembly later to keep track of as much metadata as possible, to avoid layer assembly
|
364
|
+
# collapse, or to avoid otherwise extraneous mock dims.
|
365
|
+
# The method could further be streamlined by calling `self.get_image_with_shape()` and
|
366
|
+
# `self.select_microsaccade` for all images regardless of require_variance, but it seems like a bad
|
367
|
+
# idea to introduce cv2 image loading for all models and images, regardless of whether they are actually
|
368
|
+
# microsaccading.
|
369
|
+
if not require_variance:
|
370
|
+
self.microsaccades['pixels'][image_path] = [(0., 0.)]
|
371
|
+
self.microsaccades['degrees'][image_path] = [(0., 0.)]
|
372
|
+
output_images.append(images[index])
|
373
|
+
else:
|
374
|
+
# translate images according to microsaccades if we are using microsaccades
|
375
|
+
image, image_shape, image_is_channels_first = self.get_image_with_shape(images[index])
|
376
|
+
microsaccade_location_pixels = self.select_microsaccade(image_path=image_path,
|
377
|
+
trial_number=trial_number,
|
378
|
+
image_shape=image_shape)
|
379
|
+
return_string = True if isinstance(images[index], str) else False
|
380
|
+
output_images.append(self.translate_image(image=image,
|
381
|
+
microsaccade_location=microsaccade_location_pixels,
|
382
|
+
image_shape=image_shape,
|
383
|
+
return_string=return_string,
|
384
|
+
image_is_channels_first=image_is_channels_first))
|
385
|
+
return self.reshape_microsaccaded_images(output_images)
|
386
|
+
|
387
|
+
def translate_image(self, image: str, microsaccade_location: Tuple[float, float], image_shape: Tuple[int, int],
|
388
|
+
return_string: bool, image_is_channels_first: bool) -> str:
|
389
|
+
"""Translates and saves a temporary image to temporary_fp."""
|
390
|
+
translated_image = self.translate(image=image, shift=microsaccade_location, image_shape=image_shape,
|
391
|
+
image_is_channels_first=image_is_channels_first)
|
392
|
+
if not return_string: # if the model accepts ndarrays after preprocessing, return one
|
393
|
+
return translated_image
|
394
|
+
else: # if the model accepts strings after preprocessing, write temp file
|
395
|
+
temp_file_descriptor, temporary_fp = tempfile.mkstemp(suffix=".png")
|
396
|
+
os.close(temp_file_descriptor)
|
397
|
+
if not cv2.imwrite(temporary_fp, translated_image):
|
398
|
+
raise Exception(f"cv2.imwrite failed: {temporary_fp}")
|
399
|
+
return temporary_fp
|
400
|
+
|
401
|
+
def select_microsaccade(self, image_path: str, trial_number: int, image_shape: Tuple[int, int]
|
402
|
+
) -> Tuple[float, float]:
|
403
|
+
"""
|
404
|
+
A function for generating a microsaccade location. The function returns a tuple of pixel shifts expanding from
|
405
|
+
the center of the image.
|
406
|
+
|
407
|
+
Microsaccade locations are placed within a circle, evenly distributed across the entire area in a spiral,
|
408
|
+
from the center to the circumference. We keep track of microsaccades both on a pixel and visual angle basis,
|
409
|
+
but only pixel values are returned. This is because shifting the image using cv2 requires pixel representation.
|
410
|
+
"""
|
411
|
+
# if we did not already compute `self.microsaccades`, we build them first.
|
412
|
+
if image_path not in self.microsaccades.keys():
|
413
|
+
self.build_microsaccades(image_path=image_path, image_shape=image_shape)
|
414
|
+
return self.microsaccades['pixels'][image_path][trial_number]
|
415
|
+
|
416
|
+
def build_microsaccades(self, image_path: str, image_shape: Tuple[int, int]):
|
417
|
+
if image_shape[0] != image_shape[1]:
|
418
|
+
self._logger.debug('Input image is not a square. Image dimension 0 is used to calculate the '
|
419
|
+
'extent of microsaccades.')
|
420
|
+
|
421
|
+
assert self.visual_degrees is not None, (
|
422
|
+
'self._visual_degrees is not set by the ModelCommitment, but microsaccades '
|
423
|
+
'are in use. Set activations_model visual degrees in your commitment after defining '
|
424
|
+
'your activations_model. For example, self.activations_model.set_visual_degrees'
|
425
|
+
'(visual_degrees). For detailed information, see '
|
426
|
+
':meth:`~brainscore_vision.model_helpers.activations.ActivationsExtractorHelper.'
|
427
|
+
'__call__`,')
|
428
|
+
# compute the maximum radius of microsaccade extent in pixel space
|
429
|
+
radius_ratio = self.microsaccade_extent_degrees / self.visual_degrees
|
430
|
+
max_radius = radius_ratio * image_shape[0] # maximum radius in pixels, set in self.microsaccade_extent_degrees
|
431
|
+
|
432
|
+
selected_microsaccades = {'pixels': [], 'degrees': []}
|
433
|
+
# microsaccades are placed in a spiral at sub-pixel increments
|
434
|
+
a = max_radius / np.sqrt(self.number_of_trials) # spiral coefficient to space microsaccades evenly
|
435
|
+
for i in range(self.number_of_trials):
|
436
|
+
r = np.sqrt(i / self.number_of_trials) * max_radius # compute radial distance for the i-th point
|
437
|
+
theta = a * np.sqrt(i) * 2 * np.pi / max_radius # compute angle for the i-th point
|
438
|
+
|
439
|
+
# convert polar coordinates to Cartesian, centered on the image
|
440
|
+
x = r * np.cos(theta)
|
441
|
+
y = r * np.sin(theta)
|
442
|
+
|
443
|
+
pixels_per_degree = self.calculate_pixels_per_degree_in_image(image_shape[0])
|
444
|
+
selected_microsaccades['pixels'].append((x, y))
|
445
|
+
selected_microsaccades['degrees'].append(self.convert_pixels_to_degrees((x, y), pixels_per_degree))
|
446
|
+
|
447
|
+
# to keep consistent with number_of_trials, we count trial_number from 1 instead of from 0
|
448
|
+
self.microsaccades['pixels'][image_path] = selected_microsaccades['pixels']
|
449
|
+
self.microsaccades['degrees'][image_path] = selected_microsaccades['degrees']
|
450
|
+
|
451
|
+
def unpack_microsaccade_coords(self, stimuli_paths: np.ndarray, pixels_or_degrees: str, dim: int):
|
452
|
+
"""Unpacks microsaccades from stimuli_paths into a single list to conform with coord requirements."""
|
453
|
+
assert pixels_or_degrees == 'pixels' or pixels_or_degrees == 'degrees'
|
454
|
+
unpacked_microsaccades = []
|
455
|
+
for stimulus_path in stimuli_paths:
|
456
|
+
for microsaccade in self.microsaccades[pixels_or_degrees][stimulus_path]:
|
457
|
+
unpacked_microsaccades.append(microsaccade[dim])
|
458
|
+
return unpacked_microsaccades
|
459
|
+
|
460
|
+
def calculate_pixels_per_degree_in_image(self, image_width_pixels: int) -> float:
|
461
|
+
"""Calculates the pixels per degree in the image, assuming the calculation based on image width."""
|
462
|
+
pixels_per_degree = image_width_pixels / self.visual_degrees
|
463
|
+
return pixels_per_degree
|
464
|
+
|
465
|
+
def build_microsaccade_coords(self, stimuli_paths: np.array) -> Dict:
|
466
|
+
return {
|
467
|
+
'microsaccade_shift_x_pixels': ('presentation', self.unpack_microsaccade_coords(
|
468
|
+
np.unique(stimuli_paths),
|
469
|
+
pixels_or_degrees='pixels',
|
470
|
+
dim=0)),
|
471
|
+
'microsaccade_shift_y_pixels': ('presentation', self.unpack_microsaccade_coords(
|
472
|
+
np.unique(stimuli_paths),
|
473
|
+
pixels_or_degrees='pixels',
|
474
|
+
dim=1)),
|
475
|
+
'microsaccade_shift_x_degrees': ('presentation', self.unpack_microsaccade_coords(
|
476
|
+
np.unique(stimuli_paths),
|
477
|
+
pixels_or_degrees='degrees',
|
478
|
+
dim=0)),
|
479
|
+
'microsaccade_shift_y_degrees': ('presentation', self.unpack_microsaccade_coords(
|
480
|
+
np.unique(stimuli_paths),
|
481
|
+
pixels_or_degrees='degrees',
|
482
|
+
dim=1))
|
483
|
+
}
|
484
|
+
|
485
|
+
@staticmethod
|
486
|
+
def convert_pixels_to_degrees(pixel_coords: Tuple[float, float], pixels_per_degree: float) -> Tuple[float, float]:
|
487
|
+
degrees_x = pixel_coords[0] / pixels_per_degree
|
488
|
+
degrees_y = pixel_coords[1] / pixels_per_degree
|
489
|
+
return degrees_x, degrees_y
|
490
|
+
|
491
|
+
@staticmethod
|
492
|
+
def remove_temporary_files(temporary_file_paths: List[str]) -> None:
|
493
|
+
"""
|
494
|
+
This function is used to manually remove all temporary file paths. We do this instead of using implicit
|
495
|
+
python garbage collection to 1) ensure that tensorflow models have access to temporary files when needed;
|
496
|
+
2) to make the point at which temporary files are removed explicit.
|
497
|
+
"""
|
498
|
+
for temporary_file_path in temporary_file_paths:
|
499
|
+
if isinstance(temporary_file_path, str): # do not try to remove loaded images
|
500
|
+
try:
|
501
|
+
os.remove(temporary_file_path)
|
502
|
+
except FileNotFoundError:
|
503
|
+
pass
|
504
|
+
|
505
|
+
@staticmethod
|
506
|
+
def translate(image: np.array, shift: Tuple[float, float], image_shape: Tuple[int, int],
|
507
|
+
image_is_channels_first: bool) -> np.array:
|
508
|
+
rows, cols = image_shape
|
509
|
+
# translation matrix
|
510
|
+
M = np.float32([[1, 0, shift[0]], [0, 1, shift[1]]])
|
511
|
+
|
512
|
+
if image_is_channels_first:
|
513
|
+
image = np.transpose(image, (1, 2, 0)) # cv2 expects channels last
|
514
|
+
# Apply translation, filling new line(s) with line(s) closest to it(them).
|
515
|
+
translated_image = cv2.warpAffine(image, M, (cols, rows), flags=cv2.INTER_LINEAR, # for sub-pixel shifts
|
516
|
+
borderMode=cv2.BORDER_REPLICATE)
|
517
|
+
if image_is_channels_first:
|
518
|
+
translated_image = np.transpose(translated_image, (2, 0, 1)) # convert the image back to channels-first
|
519
|
+
return translated_image
|
520
|
+
|
521
|
+
@staticmethod
|
522
|
+
def get_image_with_shape(image: np.ndarray) -> Tuple[np.array, Tuple[int, int], bool]:
|
523
|
+
_, rows, cols, = image.shape # pytorch uses channels, height, width
|
524
|
+
image_is_channels_first = True
|
525
|
+
return image, (rows, cols), image_is_channels_first
|
526
|
+
|
527
|
+
@staticmethod
|
528
|
+
def reshape_microsaccaded_images(images: List) -> Union[List[str], np.ndarray]:
|
529
|
+
if any(isinstance(image, str) for image in images):
|
530
|
+
return images
|
531
|
+
return np.stack(images, axis=0)
|
532
|
+
|
533
|
+
|
534
|
+
def change_dict(d, change_function, keep_name=False, multithread=False):
|
535
|
+
if not multithread:
|
536
|
+
map_fnc = map
|
537
|
+
else:
|
538
|
+
pool = ThreadPool()
|
539
|
+
map_fnc = pool.map
|
540
|
+
|
541
|
+
def apply_change(layer_values):
|
542
|
+
layer, values = layer_values
|
543
|
+
values = change_function(values) if not keep_name else change_function(layer, values)
|
544
|
+
return layer, values
|
545
|
+
|
546
|
+
results = map_fnc(apply_change, d.items())
|
547
|
+
results = OrderedDict(results)
|
548
|
+
if multithread:
|
549
|
+
pool.close()
|
550
|
+
return results
|
551
|
+
|
552
|
+
|
553
|
+
def lstrip_local(path):
|
554
|
+
parts = path.split(os.sep)
|
555
|
+
try:
|
556
|
+
start_index = parts.index('.brainio')
|
557
|
+
except ValueError: # not in list -- perhaps custom directory
|
558
|
+
return path
|
559
|
+
path = os.sep.join(parts[start_index:])
|
560
|
+
return path
|
561
|
+
|
562
|
+
|
563
|
+
def attach_stimulus_set_meta(assembly, stimulus_set, number_of_trials: int, require_variance: bool = False):
|
564
|
+
stimulus_paths = [str(stimulus_set.get_stimulus(stimulus_id)) for stimulus_id in stimulus_set['stimulus_id']]
|
565
|
+
stimulus_paths = [lstrip_local(path) for path in stimulus_paths]
|
566
|
+
assembly_paths = [lstrip_local(path) for path in assembly['stimulus_path'].values]
|
567
|
+
|
568
|
+
# when microsaccades are used, we repeat stimulus_paths number_of_trials times to correctly populate the dim
|
569
|
+
if require_variance:
|
570
|
+
replication_factor = number_of_trials
|
571
|
+
else:
|
572
|
+
replication_factor = 1
|
573
|
+
repeated_stimulus_paths = np.repeat(stimulus_paths, replication_factor)
|
574
|
+
assert (np.array(assembly_paths) == np.array(repeated_stimulus_paths)).all()
|
575
|
+
repeated_stimulus_ids = np.repeat(stimulus_set['stimulus_id'].values, replication_factor)
|
576
|
+
|
577
|
+
assembly = assembly.reset_index('presentation')
|
578
|
+
assembly['stimulus_path'] = ('presentation', repeated_stimulus_ids)
|
579
|
+
assembly = assembly.rename({'stimulus_path': 'stimulus_id'})
|
580
|
+
|
581
|
+
all_columns = []
|
582
|
+
for column in stimulus_set.columns:
|
583
|
+
repeated_values = np.repeat(stimulus_set[column].values, replication_factor)
|
584
|
+
assembly = assembly.assign_coords({column: ('presentation', repeated_values)}) # assign multiple coords at once
|
585
|
+
all_columns.append(column)
|
586
|
+
|
587
|
+
presentation_coords = all_columns + [coord for coord, dims, values in walk_coords(assembly['presentation'])]
|
588
|
+
assembly = assembly.set_index(presentation=list(set(presentation_coords))) # assign MultiIndex
|
589
|
+
return assembly
|
590
|
+
|
591
|
+
|
592
|
+
class HookHandle:
|
593
|
+
next_id = 0
|
594
|
+
|
595
|
+
def __init__(self, hook_dict):
|
596
|
+
self.hook_dict = hook_dict
|
597
|
+
self.id = HookHandle.next_id
|
598
|
+
HookHandle.next_id += 1
|
599
|
+
self._saved_hook = None
|
600
|
+
|
601
|
+
def remove(self):
|
602
|
+
hook = self.hook_dict[self.id]
|
603
|
+
del self.hook_dict[self.id]
|
604
|
+
return hook
|
605
|
+
|
606
|
+
def disable(self):
|
607
|
+
self._saved_hook = self.remove()
|
608
|
+
|
609
|
+
def enable(self):
|
610
|
+
self.hook_dict[self.id] = self._saved_hook
|
611
|
+
self._saved_hook = None
|
612
|
+
|
613
|
+
|
614
|
+
def flatten(layer_output, return_index=False):
|
615
|
+
flattened = layer_output.reshape(layer_output.shape[0], -1)
|
616
|
+
if not return_index:
|
617
|
+
return flattened
|
618
|
+
|
619
|
+
def cartesian_product_broadcasted(*arrays):
|
620
|
+
"""
|
621
|
+
http://stackoverflow.com/a/11146645/190597
|
622
|
+
"""
|
623
|
+
broadcastable = np.ix_(*arrays)
|
624
|
+
broadcasted = np.broadcast_arrays(*broadcastable)
|
625
|
+
dtype = np.result_type(*arrays)
|
626
|
+
rows, cols = functools.reduce(np.multiply, broadcasted[0].shape), len(broadcasted)
|
627
|
+
out = np.empty(rows * cols, dtype=dtype)
|
628
|
+
start, end = 0, rows
|
629
|
+
for a in broadcasted:
|
630
|
+
out[start:end] = a.reshape(-1)
|
631
|
+
start, end = end, end + rows
|
632
|
+
return out.reshape(cols, rows).T
|
633
|
+
|
634
|
+
index = cartesian_product_broadcasted(*[np.arange(s, dtype='int') for s in layer_output.shape[1:]])
|
635
|
+
return flattened, index
|