brainscore-vision 2.1__py3-none-any.whl → 2.2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +2 -1
- brainscore_vision/benchmarks/coggan2024_behavior/test.py +2 -2
- brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +4 -4
- brainscore_vision/benchmarks/coggan2024_fMRI/test.py +2 -2
- brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50000 -50000
- brainscore_vision/benchmarks/imagenet_c/benchmark.py +1 -1
- brainscore_vision/benchmarks/lonnqvist2024/__init__.py +8 -0
- brainscore_vision/benchmarks/lonnqvist2024/benchmark.py +125 -0
- brainscore_vision/benchmarks/lonnqvist2024/test.py +61 -0
- brainscore_vision/benchmarks/malania2007/benchmark.py +3 -0
- brainscore_vision/benchmarks/maniquet2024/benchmark.py +1 -1
- brainscore_vision/data/lonnqvist2024/__init__.py +47 -0
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +53 -0
- brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +61 -0
- brainscore_vision/data/lonnqvist2024/test.py +127 -0
- brainscore_vision/model_helpers/brain_transformation/__init__.py +33 -0
- brainscore_vision/models/alexnet/region_layer_map/alexnet.json +1 -0
- brainscore_vision/models/alexnet_7be5be79/setup.py +4 -4
- brainscore_vision/models/alexnet_random/__init__.py +7 -0
- brainscore_vision/models/alexnet_random/model.py +46 -0
- brainscore_vision/models/alexnet_random/setup.py +26 -0
- brainscore_vision/models/alexnet_random/test.py +1 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/region_layer_map/cvt_cvt-13-224-in1k_4.json +6 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/region_layer_map/cvt_cvt-13-224-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_13_224_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/region_layer_map/cvt_cvt-13-384-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_13_384_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/region_layer_map/cvt_cvt-13-384-in22k_finetuned-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/region_layer_map/cvt_cvt-21-224-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_21_224_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/region_layer_map/cvt_cvt-21-384-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_21_384_in1k_4/test.py +8 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/__init__.py +9 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/model.py +142 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/region_layer_map/cvt_cvt-21-384-in22k_finetuned-in1k_4_LucyV4.json +6 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
- brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/test.py +8 -0
- brainscore_vision/models/fixres_resnext101_32x48d_wsl/__init__.py +7 -0
- brainscore_vision/models/fixres_resnext101_32x48d_wsl/model.py +57 -0
- brainscore_vision/models/fixres_resnext101_32x48d_wsl/requirements.txt +5 -0
- brainscore_vision/models/fixres_resnext101_32x48d_wsl/test.py +7 -0
- brainscore_vision/models/inception_v4_pytorch/__init__.py +7 -0
- brainscore_vision/models/inception_v4_pytorch/model.py +64 -0
- brainscore_vision/models/inception_v4_pytorch/requirements.txt +3 -0
- brainscore_vision/models/inception_v4_pytorch/test.py +8 -0
- brainscore_vision/models/mvimgnet_ms_05/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ms_05/model.py +64 -0
- brainscore_vision/models/mvimgnet_ms_05/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ms_05/test.py +1 -0
- brainscore_vision/models/mvimgnet_rf/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_rf/model.py +64 -0
- brainscore_vision/models/mvimgnet_rf/setup.py +25 -0
- brainscore_vision/models/mvimgnet_rf/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_00/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_00/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_00/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_00/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_02/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_02/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_02/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_02/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_03/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_03/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_03/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_03/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_04/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_04/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_04/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_04/test.py +1 -0
- brainscore_vision/models/mvimgnet_ss_05/__init__.py +9 -0
- brainscore_vision/models/mvimgnet_ss_05/model.py +64 -0
- brainscore_vision/models/mvimgnet_ss_05/setup.py +25 -0
- brainscore_vision/models/mvimgnet_ss_05/test.py +1 -0
- brainscore_vision/models/resnet50_tutorial/region_layer_map/resnet50_tutorial.json +1 -0
- brainscore_vision/models/sam_test_resnet/__init__.py +5 -0
- brainscore_vision/models/sam_test_resnet/model.py +26 -0
- brainscore_vision/models/sam_test_resnet/requirements.txt +2 -0
- brainscore_vision/models/sam_test_resnet/test.py +8 -0
- brainscore_vision/models/sam_test_resnet_4/__init__.py +5 -0
- brainscore_vision/models/sam_test_resnet_4/model.py +26 -0
- brainscore_vision/models/sam_test_resnet_4/requirements.txt +2 -0
- brainscore_vision/models/sam_test_resnet_4/test.py +8 -0
- brainscore_vision/models/scaling_models/__init__.py +265 -0
- brainscore_vision/models/scaling_models/model.py +148 -0
- brainscore_vision/models/scaling_models/model_configs.json +869 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_base_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_large_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_100_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_10_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_1_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_base_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_large_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_100_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_10_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_1_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_full_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b0_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b1_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b2_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet101_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet101_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet152_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet18_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet18_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet34_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet34_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_ecoset_full.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_100_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_10_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_1_seed-0.json +6 -0
- brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_full.json +6 -0
- brainscore_vision/models/scaling_models/requirements.txt +4 -0
- brainscore_vision/models/scaling_models/test.py +0 -0
- brainscore_vision/models/vitb14_dinov2_imagenet1k/__init__.py +5 -0
- brainscore_vision/models/vitb14_dinov2_imagenet1k/model.py +852 -0
- brainscore_vision/models/vitb14_dinov2_imagenet1k/setup.py +25 -0
- brainscore_vision/models/vitb14_dinov2_imagenet1k/test.py +0 -0
- brainscore_vision/models/voneresnet_50_non_stochastic/region_layer_map/voneresnet-50-non_stochastic.json +1 -0
- brainscore_vision/submission/actions_helpers.py +2 -2
- brainscore_vision/submission/endpoints.py +3 -4
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/METADATA +2 -2
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/RECORD +143 -18
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/WHEEL +1 -1
- tests/test_model_helpers/temporal/activations/test_inferencer.py +2 -2
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["mvimgnet_ms_05"] = lambda: ModelCommitment(
|
6
|
+
identifier="mvimgnet_ms_05",
|
7
|
+
activations_model=get_model("mvimgnet_ms_05"),
|
8
|
+
layers=get_layers("mvimgnet_ms_05"),
|
9
|
+
)
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import functools
|
3
|
+
import os
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
import torchvision.models
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
8
|
+
from pathlib import Path
|
9
|
+
from brainscore_vision.model_helpers import download_weights
|
10
|
+
import torch
|
11
|
+
from collections import OrderedDict
|
12
|
+
|
13
|
+
# This is an example implementation for submitting resnet-50 as a pytorch model
|
14
|
+
|
15
|
+
# Attention: It is important, that the wrapper identifier is unique per model!
|
16
|
+
# The results will otherwise be the same due to brain-scores internal result caching mechanism.
|
17
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
18
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
19
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
20
|
+
|
21
|
+
|
22
|
+
def get_model_list():
|
23
|
+
return ["mvimgnet_ms_05"]
|
24
|
+
|
25
|
+
|
26
|
+
def get_model(name):
|
27
|
+
assert name == "mvimgnet_ms_05"
|
28
|
+
url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/multiscale/512_10/lmda_0.5/latest-rank0.pt"
|
29
|
+
fh = urlretrieve(url)
|
30
|
+
state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"]
|
31
|
+
model = load_composer_classifier(state_dict)
|
32
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
33
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
34
|
+
wrapper.image_size = 224
|
35
|
+
return wrapper
|
36
|
+
|
37
|
+
def load_composer_classifier(sd):
|
38
|
+
model = torchvision.models.resnet.resnet50()
|
39
|
+
new_sd = OrderedDict()
|
40
|
+
for k, v in sd.items():
|
41
|
+
if 'lin_cls' in k:
|
42
|
+
new_sd['fc.' + k.split('.')[-1]] = v
|
43
|
+
if ".f." not in k:
|
44
|
+
continue
|
45
|
+
parts = k.split(".")
|
46
|
+
idx = parts.index("f")
|
47
|
+
new_k = ".".join(parts[idx + 1 :])
|
48
|
+
new_sd[new_k] = v
|
49
|
+
model.load_state_dict(new_sd, strict=True)
|
50
|
+
return model
|
51
|
+
|
52
|
+
def get_layers(name):
|
53
|
+
assert name == "mvimgnet_ms_05"
|
54
|
+
|
55
|
+
outs = ["layer1", "layer2", "layer3", "layer4"]
|
56
|
+
return outs
|
57
|
+
|
58
|
+
|
59
|
+
def get_bibtex(model_identifier):
|
60
|
+
return """xx"""
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["mvimgnet_rf"] = lambda: ModelCommitment(
|
6
|
+
identifier="mvimgnet_rf",
|
7
|
+
activations_model=get_model("mvimgnet_rf"),
|
8
|
+
layers=get_layers("mvimgnet_rf"),
|
9
|
+
)
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import functools
|
3
|
+
import os
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
import torchvision.models
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
8
|
+
from pathlib import Path
|
9
|
+
from brainscore_vision.model_helpers import download_weights
|
10
|
+
import torch
|
11
|
+
from collections import OrderedDict
|
12
|
+
|
13
|
+
# This is an example implementation for submitting resnet-50 as a pytorch model
|
14
|
+
|
15
|
+
# Attention: It is important, that the wrapper identifier is unique per model!
|
16
|
+
# The results will otherwise be the same due to brain-scores internal result caching mechanism.
|
17
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
18
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
19
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
20
|
+
|
21
|
+
|
22
|
+
def get_model_list():
|
23
|
+
return ["mvimgnet_rf"]
|
24
|
+
|
25
|
+
|
26
|
+
def get_model(name):
|
27
|
+
assert name == "mvimgnet_rf"
|
28
|
+
url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/rf/latest-rank0.pt"
|
29
|
+
fh = urlretrieve(url)
|
30
|
+
state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"]
|
31
|
+
model = load_composer_classifier(state_dict)
|
32
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
33
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
34
|
+
wrapper.image_size = 224
|
35
|
+
return wrapper
|
36
|
+
|
37
|
+
def load_composer_classifier(sd):
|
38
|
+
model = torchvision.models.resnet.resnet50()
|
39
|
+
new_sd = OrderedDict()
|
40
|
+
for k, v in sd.items():
|
41
|
+
if 'lin_cls' in k:
|
42
|
+
new_sd['fc.' + k.split('.')[-1]] = v
|
43
|
+
if ".f." not in k:
|
44
|
+
continue
|
45
|
+
parts = k.split(".")
|
46
|
+
idx = parts.index("f")
|
47
|
+
new_k = ".".join(parts[idx + 1 :])
|
48
|
+
new_sd[new_k] = v
|
49
|
+
model.load_state_dict(new_sd, strict=True)
|
50
|
+
return model
|
51
|
+
|
52
|
+
def get_layers(name):
|
53
|
+
assert name == "mvimgnet_rf"
|
54
|
+
|
55
|
+
outs = ["layer1", "layer2", "layer3", "layer4"]
|
56
|
+
return outs
|
57
|
+
|
58
|
+
|
59
|
+
def get_bibtex(model_identifier):
|
60
|
+
return """xx"""
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["mvimgnet_ss_00"] = lambda: ModelCommitment(
|
6
|
+
identifier="mvimgnet_ss_00",
|
7
|
+
activations_model=get_model("mvimgnet_ss_00"),
|
8
|
+
layers=get_layers("mvimgnet_ss_00"),
|
9
|
+
)
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import functools
|
3
|
+
import os
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
import torchvision.models
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
8
|
+
from pathlib import Path
|
9
|
+
from brainscore_vision.model_helpers import download_weights
|
10
|
+
import torch
|
11
|
+
from collections import OrderedDict
|
12
|
+
|
13
|
+
# This is an example implementation for submitting resnet-50 as a pytorch model
|
14
|
+
|
15
|
+
# Attention: It is important, that the wrapper identifier is unique per model!
|
16
|
+
# The results will otherwise be the same due to brain-scores internal result caching mechanism.
|
17
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
18
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
19
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
20
|
+
|
21
|
+
|
22
|
+
def get_model_list():
|
23
|
+
return ["mvimgnet_ss_00"]
|
24
|
+
|
25
|
+
|
26
|
+
def get_model(name):
|
27
|
+
assert name == "mvimgnet_ss_00"
|
28
|
+
url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/lmda_0.0/latest-rank0.pt"
|
29
|
+
fh = urlretrieve(url)
|
30
|
+
state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"]
|
31
|
+
model = load_composer_classifier(state_dict)
|
32
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
33
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
34
|
+
wrapper.image_size = 224
|
35
|
+
return wrapper
|
36
|
+
|
37
|
+
def load_composer_classifier(sd):
|
38
|
+
model = torchvision.models.resnet.resnet50()
|
39
|
+
new_sd = OrderedDict()
|
40
|
+
for k, v in sd.items():
|
41
|
+
if 'lin_cls' in k:
|
42
|
+
new_sd['fc.' + k.split('.')[-1]] = v
|
43
|
+
if ".f." not in k:
|
44
|
+
continue
|
45
|
+
parts = k.split(".")
|
46
|
+
idx = parts.index("f")
|
47
|
+
new_k = ".".join(parts[idx + 1 :])
|
48
|
+
new_sd[new_k] = v
|
49
|
+
model.load_state_dict(new_sd, strict=True)
|
50
|
+
return model
|
51
|
+
|
52
|
+
def get_layers(name):
|
53
|
+
assert name == "mvimgnet_ss_00"
|
54
|
+
|
55
|
+
outs = ["layer1", "layer2", "layer3", "layer4"]
|
56
|
+
return outs
|
57
|
+
|
58
|
+
|
59
|
+
def get_bibtex(model_identifier):
|
60
|
+
return """xx"""
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["mvimgnet_ss_02"] = lambda: ModelCommitment(
|
6
|
+
identifier="mvimgnet_ss_02",
|
7
|
+
activations_model=get_model("mvimgnet_ss_02"),
|
8
|
+
layers=get_layers("mvimgnet_ss_02"),
|
9
|
+
)
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import functools
|
3
|
+
import os
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
import torchvision.models
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
8
|
+
from pathlib import Path
|
9
|
+
from brainscore_vision.model_helpers import download_weights
|
10
|
+
import torch
|
11
|
+
from collections import OrderedDict
|
12
|
+
|
13
|
+
# This is an example implementation for submitting resnet-50 as a pytorch model
|
14
|
+
|
15
|
+
# Attention: It is important, that the wrapper identifier is unique per model!
|
16
|
+
# The results will otherwise be the same due to brain-scores internal result caching mechanism.
|
17
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
18
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
19
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
20
|
+
|
21
|
+
|
22
|
+
def get_model_list():
|
23
|
+
return ["mvimgnet_ss_02"]
|
24
|
+
|
25
|
+
|
26
|
+
def get_model(name):
|
27
|
+
assert name == "mvimgnet_ss_02"
|
28
|
+
url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/lmda_0.2/latest-rank0.pt"
|
29
|
+
fh = urlretrieve(url)
|
30
|
+
state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"]
|
31
|
+
model = load_composer_classifier(state_dict)
|
32
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
33
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
34
|
+
wrapper.image_size = 224
|
35
|
+
return wrapper
|
36
|
+
|
37
|
+
def load_composer_classifier(sd):
|
38
|
+
model = torchvision.models.resnet.resnet50()
|
39
|
+
new_sd = OrderedDict()
|
40
|
+
for k, v in sd.items():
|
41
|
+
if 'lin_cls' in k:
|
42
|
+
new_sd['fc.' + k.split('.')[-1]] = v
|
43
|
+
if ".f." not in k:
|
44
|
+
continue
|
45
|
+
parts = k.split(".")
|
46
|
+
idx = parts.index("f")
|
47
|
+
new_k = ".".join(parts[idx + 1 :])
|
48
|
+
new_sd[new_k] = v
|
49
|
+
model.load_state_dict(new_sd, strict=True)
|
50
|
+
return model
|
51
|
+
|
52
|
+
def get_layers(name):
|
53
|
+
assert name == "mvimgnet_ss_02"
|
54
|
+
|
55
|
+
outs = ["layer1", "layer2", "layer3", "layer4"]
|
56
|
+
return outs
|
57
|
+
|
58
|
+
|
59
|
+
def get_bibtex(model_identifier):
|
60
|
+
return """xx"""
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["mvimgnet_ss_03"] = lambda: ModelCommitment(
|
6
|
+
identifier="mvimgnet_ss_03",
|
7
|
+
activations_model=get_model("mvimgnet_ss_03"),
|
8
|
+
layers=get_layers("mvimgnet_ss_03"),
|
9
|
+
)
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import functools
|
3
|
+
import os
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
import torchvision.models
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
8
|
+
from pathlib import Path
|
9
|
+
from brainscore_vision.model_helpers import download_weights
|
10
|
+
import torch
|
11
|
+
from collections import OrderedDict
|
12
|
+
|
13
|
+
# This is an example implementation for submitting resnet-50 as a pytorch model
|
14
|
+
|
15
|
+
# Attention: It is important, that the wrapper identifier is unique per model!
|
16
|
+
# The results will otherwise be the same due to brain-scores internal result caching mechanism.
|
17
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
18
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
19
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
20
|
+
|
21
|
+
|
22
|
+
def get_model_list():
|
23
|
+
return ["mvimgnet_ss_03"]
|
24
|
+
|
25
|
+
|
26
|
+
def get_model(name):
|
27
|
+
assert name == "mvimgnet_ss_03"
|
28
|
+
url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/lmda_0.3/latest-rank0.pt"
|
29
|
+
fh = urlretrieve(url)
|
30
|
+
state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"]
|
31
|
+
model = load_composer_classifier(state_dict)
|
32
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
33
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
34
|
+
wrapper.image_size = 224
|
35
|
+
return wrapper
|
36
|
+
|
37
|
+
def load_composer_classifier(sd):
|
38
|
+
model = torchvision.models.resnet.resnet50()
|
39
|
+
new_sd = OrderedDict()
|
40
|
+
for k, v in sd.items():
|
41
|
+
if 'lin_cls' in k:
|
42
|
+
new_sd['fc.' + k.split('.')[-1]] = v
|
43
|
+
if ".f." not in k:
|
44
|
+
continue
|
45
|
+
parts = k.split(".")
|
46
|
+
idx = parts.index("f")
|
47
|
+
new_k = ".".join(parts[idx + 1 :])
|
48
|
+
new_sd[new_k] = v
|
49
|
+
model.load_state_dict(new_sd, strict=True)
|
50
|
+
return model
|
51
|
+
|
52
|
+
def get_layers(name):
|
53
|
+
assert name == "mvimgnet_ss_03"
|
54
|
+
|
55
|
+
outs = ["layer1", "layer2", "layer3", "layer4"]
|
56
|
+
return outs
|
57
|
+
|
58
|
+
|
59
|
+
def get_bibtex(model_identifier):
|
60
|
+
return """xx"""
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["mvimgnet_ss_04"] = lambda: ModelCommitment(
|
6
|
+
identifier="mvimgnet_ss_04",
|
7
|
+
activations_model=get_model("mvimgnet_ss_04"),
|
8
|
+
layers=get_layers("mvimgnet_ss_04"),
|
9
|
+
)
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
2
|
+
import functools
|
3
|
+
import os
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
import torchvision.models
|
6
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
|
7
|
+
from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
|
8
|
+
from pathlib import Path
|
9
|
+
from brainscore_vision.model_helpers import download_weights
|
10
|
+
import torch
|
11
|
+
from collections import OrderedDict
|
12
|
+
|
13
|
+
# This is an example implementation for submitting resnet-50 as a pytorch model
|
14
|
+
|
15
|
+
# Attention: It is important, that the wrapper identifier is unique per model!
|
16
|
+
# The results will otherwise be the same due to brain-scores internal result caching mechanism.
|
17
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
18
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
19
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
20
|
+
|
21
|
+
|
22
|
+
def get_model_list():
|
23
|
+
return ["mvimgnet_ss_04"]
|
24
|
+
|
25
|
+
|
26
|
+
def get_model(name):
|
27
|
+
assert name == "mvimgnet_ss_04"
|
28
|
+
url = "https://users.flatironinstitute.org/~tyerxa/slow_steady/training_checkpoints/slow_steady/r2/LARS/lmda_0.4/latest-rank0.pt"
|
29
|
+
fh = urlretrieve(url)
|
30
|
+
state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"]
|
31
|
+
model = load_composer_classifier(state_dict)
|
32
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
33
|
+
wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
|
34
|
+
wrapper.image_size = 224
|
35
|
+
return wrapper
|
36
|
+
|
37
|
+
def load_composer_classifier(sd):
|
38
|
+
model = torchvision.models.resnet.resnet50()
|
39
|
+
new_sd = OrderedDict()
|
40
|
+
for k, v in sd.items():
|
41
|
+
if 'lin_cls' in k:
|
42
|
+
new_sd['fc.' + k.split('.')[-1]] = v
|
43
|
+
if ".f." not in k:
|
44
|
+
continue
|
45
|
+
parts = k.split(".")
|
46
|
+
idx = parts.index("f")
|
47
|
+
new_k = ".".join(parts[idx + 1 :])
|
48
|
+
new_sd[new_k] = v
|
49
|
+
model.load_state_dict(new_sd, strict=True)
|
50
|
+
return model
|
51
|
+
|
52
|
+
def get_layers(name):
|
53
|
+
assert name == "mvimgnet_ss_04"
|
54
|
+
|
55
|
+
outs = ["layer1", "layer2", "layer3", "layer4"]
|
56
|
+
return outs
|
57
|
+
|
58
|
+
|
59
|
+
def get_bibtex(model_identifier):
|
60
|
+
return """xx"""
|
61
|
+
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
model_registry["mvimgnet_ss_05"] = lambda: ModelCommitment(
|
6
|
+
identifier="mvimgnet_ss_05",
|
7
|
+
activations_model=get_model("mvimgnet_ss_05"),
|
8
|
+
layers=get_layers("mvimgnet_ss_05"),
|
9
|
+
)
|