brainscore-vision 2.1__py3-none-any.whl → 2.2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +2 -1
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +2 -2
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +4 -4
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +2 -2
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50000 -50000
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +1 -1
- brainscore_vision/benchmarks/lonnqvist2024/__init__.py +8 -0
- brainscore_vision/benchmarks/lonnqvist2024/benchmark.py +125 -0
- brainscore_vision/benchmarks/lonnqvist2024/test.py +61 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +3 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +1 -1
- brainscore_vision/data/lonnqvist2024/__init__.py +47 -0
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +53 -0
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +61 -0
- brainscore_vision/data/lonnqvist2024/test.py +127 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +33 -0
- brainscore_vision/models/alexnet/region_layer_map/alexnet.json +1 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +4 -4
- brainscore_vision/models/alexnet_random/__init__.py +7 -0
- brainscore_vision/models/alexnet_random/model.py +46 -0
- brainscore_vision/models/alexnet_random/setup.py +26 -0
- brainscore_vision/models/alexnet_random/test.py +1 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/region_layer_map/cvt_cvt-13-224-in1k_4.json +6 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/region_layer_map/cvt_cvt-13-224-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/region_layer_map/cvt_cvt-13-384-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/region_layer_map/cvt_cvt-13-384-in22k_finetuned-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/region_layer_map/cvt_cvt-21-224-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/region_layer_map/cvt_cvt-21-384-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/region_layer_map/cvt_cvt-21-384-in22k_finetuned-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/fixres_resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/fixres_resnext101_32x48d_wsl/model.py +57 -0
- brainscore_vision/models/fixres_resnext101_32x48d_wsl/requirements.txt +5 -0
- brainscore_vision/models/fixres_resnext101_32x48d_wsl/test.py +7 -0
- brainscore_vision/models/inception_v4_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v4_pytorch/model.py +64 -0
- brainscore_vision/models/inception_v4_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v4_pytorch/test.py +8 -0
- brainscore_vision/models/mvimgnet_ms_05/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ms_05/model.py +64 -0
- brainscore_vision/models/mvimgnet_ms_05/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ms_05/test.py +1 -0
- brainscore_vision/models/mvimgnet_rf/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_rf/model.py +64 -0
- brainscore_vision/models/mvimgnet_rf/setup.py +25 -0
- brainscore_vision/models/mvimgnet_rf/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_00/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_00/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_00/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_00/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_02/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_02/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_02/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_02/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_03/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_03/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_03/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_03/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_04/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_04/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_04/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_04/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_05/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_05/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_05/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_05/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/region_layer_map/resnet50_tutorial.json +1 -0
- brainscore_vision/models/sam_test_resnet/__init__.py +5 -0
- brainscore_vision/models/sam_test_resnet/model.py +26 -0
- brainscore_vision/models/sam_test_resnet/requirements.txt +2 -0
- brainscore_vision/models/sam_test_resnet/test.py +8 -0
- brainscore_vision/models/sam_test_resnet_4/__init__.py +5 -0
- brainscore_vision/models/sam_test_resnet_4/model.py +26 -0
- brainscore_vision/models/sam_test_resnet_4/requirements.txt +2 -0
- brainscore_vision/models/sam_test_resnet_4/test.py +8 -0
- brainscore_vision/models/scaling_models/__init__.py +265 -0
- brainscore_vision/models/scaling_models/model.py +148 -0
- brainscore_vision/models/scaling_models/model_configs.json +869 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_base_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_large_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_100_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_10_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_1_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_base_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_large_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_100_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_10_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_1_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b0_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b1_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b2_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet101_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet101_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet152_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet18_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet18_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet34_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet34_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_100_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_10_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_1_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/requirements.txt +4 -0
- brainscore_vision/models/scaling_models/test.py +0 -0
- brainscore_vision/models/vitb14_dinov2_imagenet1k/__init__.py +5 -0
- brainscore_vision/models/vitb14_dinov2_imagenet1k/model.py +852 -0
- brainscore_vision/models/vitb14_dinov2_imagenet1k/setup.py +25 -0
- brainscore_vision/models/vitb14_dinov2_imagenet1k/test.py +0 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/region_layer_map/voneresnet-50-non_stochastic.json +1 -0
- brainscore_vision/submission/actions_helpers.py +2 -2
- brainscore_vision/submission/endpoints.py +3 -4
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/METADATA +2 -2
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/RECORD +143 -18
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/WHEEL +1 -1
- tests/test_model_helpers/temporal/activations/test_inferencer.py +2 -2
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/top_level.txt +0 -0
File without changes
|
@@ -0,0 +1,5 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry['vitb14_dinov2_imagenet1k'] = lambda: ModelCommitment(identifier='vitb14_dinov2_imagenet1k', activations_model=get_model('vitb14_dinov2_imagenet1k'), layers=get_layers('vitb14_dinov2_imagenet1k'))
|