brainscore-vision 2.2.1__py3-none-any.whl → 2.2.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- brainscore_vision/model_helpers/brain_transformation/__init__.py +1 -2
- brainscore_vision/models/yudixie_resnet18_240719_0/region_layer_map/yudixie_resnet18_distance_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_1/region_layer_map/yudixie_resnet18_translation_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_10/region_layer_map/yudixie_resnet18_imagenet1kpret_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/region_layer_map/yudixie_resnet18_random_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_11/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_2/region_layer_map/yudixie_resnet18_rotation_reg_0_240719.json +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/region_layer_map/yudixie_resnet18_distance_translation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_3/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/__init__.py +12 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/region_layer_map/yudixie_resnet18_distance_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_4/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/__init__.py +13 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/region_layer_map/yudixie_resnet18_translation_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_5/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/__init__.py +12 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/region_layer_map/yudixie_resnet18_distance_translation_rotation_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_6/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/region_layer_map/yudixie_resnet18_category_class_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_7/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/region_layer_map/yudixie_resnet18_object_class_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_8/test.py +1 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/__init__.py +11 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/model.py +60 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/region_layer_map/yudixie_resnet18_cat_obj_class_all_latents_0_240719.json +6 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/setup.py +25 -0
- brainscore_vision/models/yudixie_resnet18_240719_9/test.py +1 -0
- brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240312.json +1 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.2.dist-info}/METADATA +3 -2
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.2.dist-info}/RECORD +55 -10
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.2.dist-info}/WHEEL +1 -1
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/__init__.py +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/model.py +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/requirements.txt +0 -0
- /brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/test.py +0 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.2.dist-info}/LICENSE +0 -0
- {brainscore_vision-2.2.1.dist-info → brainscore_vision-2.2.2.dist-info}/top_level.txt +0 -0
@@ -79,10 +79,9 @@ class ModelCommitment(BrainModel):
|
|
79
79
|
self._logger.info(f"Successfully loaded region_layer_map for {identifier}")
|
80
80
|
return json.load(region_layer_map_file)
|
81
81
|
else:
|
82
|
-
self._logger.info(f"No region_layer_map file found for {identifier}
|
82
|
+
self._logger.info(f"No region_layer_map file found for {identifier}. Will proceed with default layer mapping")
|
83
83
|
return None
|
84
84
|
except Exception as e:
|
85
|
-
self._logger.error(f"Error importing model to search for region_layer_map: {e}")
|
86
85
|
return None
|
87
86
|
|
88
87
|
def visual_degrees(self) -> int:
|
@@ -0,0 +1 @@
|
|
1
|
+
{"IT": "layer3", "V4": "layer1", "V2": "layer1", "V1": "layer1"}
|
@@ -0,0 +1 @@
|
|
1
|
+
{"V4": "layer1", "IT": "layer3", "V2": "layer2", "V1": "layer1"}
|
@@ -0,0 +1 @@
|
|
1
|
+
{"V4": "layer2", "IT": "layer3", "V2": "layer2", "V1": "layer2"}
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_random_0_240719'] = lambda: commit_model('yudixie_resnet18_random_0_240719')
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1 @@
|
|
1
|
+
{"V4": "layer1", "IT": "layer3", "V2": "layer1", "V1": "layer1"}
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_distance_translation_0_240719'] = lambda: commit_model('yudixie_resnet18_distance_translation_0_240719')
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,12 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
|
12
|
+
model_registry['yudixie_resnet18_distance_rotation_0_240719'] = lambda: commit_model('yudixie_resnet18_distance_rotation_0_240719')
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,13 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
|
12
|
+
model_registry['yudixie_resnet18_translation_rotation_0_240719'] = lambda: commit_model('yudixie_resnet18_translation_rotation_0_240719')
|
13
|
+
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,12 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_distance_translation_rotation_0_240719'] = lambda: commit_model('yudixie_resnet18_distance_translation_rotation_0_240719')
|
12
|
+
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_category_class_0_240719'] = lambda: commit_model('yudixie_resnet18_category_class_0_240719')
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_object_class_0_240719'] = lambda: commit_model('yudixie_resnet18_object_class_0_240719')
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from brainscore_vision import model_registry
|
2
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
3
|
+
from .model import get_model, get_layers
|
4
|
+
|
5
|
+
|
6
|
+
def commit_model(identifier):
|
7
|
+
return ModelCommitment(identifier=identifier,
|
8
|
+
activations_model=get_model(identifier),
|
9
|
+
layers=get_layers(identifier))
|
10
|
+
|
11
|
+
model_registry['yudixie_resnet18_cat_obj_class_all_latents_0_240719'] = lambda: commit_model('yudixie_resnet18_cat_obj_class_all_latents_0_240719')
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import functools
|
4
|
+
from urllib.request import urlretrieve
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
from torchvision.models import resnet18
|
10
|
+
|
11
|
+
from brainscore_vision.model_helpers.check_submission import check_models
|
12
|
+
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
|
13
|
+
from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, load_preprocess_images
|
14
|
+
|
15
|
+
|
16
|
+
# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
|
17
|
+
# If the model requires a GPU, contact the brain-score team directly.
|
18
|
+
|
19
|
+
|
20
|
+
def get_model(name):
|
21
|
+
pytorch_device = torch.device('cpu')
|
22
|
+
|
23
|
+
weigth_url = f'https://yudi-brainscore-models.s3.amazonaws.com/{name}.pth'
|
24
|
+
fh = urlretrieve(weigth_url, f'{name}_weights.pth')
|
25
|
+
load_path = fh[0]
|
26
|
+
|
27
|
+
pytorch_model = resnet18()
|
28
|
+
pytorch_model.fc = nn.Linear(pytorch_model.fc.in_features, 674)
|
29
|
+
pytorch_model = pytorch_model.to(pytorch_device)
|
30
|
+
|
31
|
+
# load model from saved weights
|
32
|
+
saved_state_dict = torch.load(load_path, map_location=pytorch_device)
|
33
|
+
state_dict = {}
|
34
|
+
for k, v in saved_state_dict.items():
|
35
|
+
if k.startswith('_orig_mod.'):
|
36
|
+
# for compiled models
|
37
|
+
state_dict[k[10:]] = v
|
38
|
+
else:
|
39
|
+
state_dict[k] = v
|
40
|
+
pytorch_model.load_state_dict(state_dict, strict=True)
|
41
|
+
print(f'Loaded model from {load_path}')
|
42
|
+
|
43
|
+
preprocessing = functools.partial(load_preprocess_images, image_size=224)
|
44
|
+
wrapper = PytorchWrapper(identifier=name,
|
45
|
+
model=pytorch_model,
|
46
|
+
preprocessing=preprocessing)
|
47
|
+
wrapper.image_size = 224
|
48
|
+
return wrapper
|
49
|
+
|
50
|
+
|
51
|
+
def get_layers(name):
|
52
|
+
return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc']
|
53
|
+
|
54
|
+
|
55
|
+
def get_bibtex(model_identifier):
|
56
|
+
return """xx"""
|
57
|
+
|
58
|
+
|
59
|
+
if __name__ == '__main__':
|
60
|
+
check_models.check_base_models(__name__)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
from setuptools import setup, find_packages
|
5
|
+
|
6
|
+
requirements = [ "torchvision",
|
7
|
+
"torch"
|
8
|
+
]
|
9
|
+
|
10
|
+
setup(
|
11
|
+
packages=find_packages(exclude=['tests']),
|
12
|
+
include_package_data=True,
|
13
|
+
install_requires=requirements,
|
14
|
+
license="MIT license",
|
15
|
+
zip_safe=False,
|
16
|
+
keywords='brain-score template',
|
17
|
+
classifiers=[
|
18
|
+
'Development Status :: 2 - Pre-Alpha',
|
19
|
+
'Intended Audience :: Developers',
|
20
|
+
'License :: OSI Approved :: MIT License',
|
21
|
+
'Natural Language :: English',
|
22
|
+
'Programming Language :: Python :: 3.7',
|
23
|
+
],
|
24
|
+
test_suite='tests',
|
25
|
+
)
|
@@ -0,0 +1 @@
|
|
1
|
+
# Left empty as part of 2023 models migration
|
@@ -0,0 +1 @@
|
|
1
|
+
{"V4": "layer2", "IT": "layer3", "V2": "layer2", "V1": "layer2"}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: brainscore_vision
|
3
|
-
Version: 2.2.
|
3
|
+
Version: 2.2.2
|
4
4
|
Summary: The Brain-Score library enables model comparisons to behavioral and neural experiments
|
5
5
|
License:
|
6
6
|
MIT License
|
@@ -37,12 +37,13 @@ Requires-Dist: eva-decord
|
|
37
37
|
Requires-Dist: psutil
|
38
38
|
Provides-Extra: test
|
39
39
|
Requires-Dist: pytest; extra == "test"
|
40
|
-
Requires-Dist:
|
40
|
+
Requires-Dist: pytest_check; extra == "test"
|
41
41
|
Requires-Dist: pytest-mock; extra == "test"
|
42
42
|
Requires-Dist: pytest-timeout; extra == "test"
|
43
43
|
Requires-Dist: torch; extra == "test"
|
44
44
|
Requires-Dist: torchvision; extra == "test"
|
45
45
|
Requires-Dist: matplotlib; extra == "test"
|
46
|
+
Requires-Dist: pytest-mock; extra == "test"
|
46
47
|
|
47
48
|
[](https://app.travis-ci.com/brain-score/vision)
|
48
49
|
[](https://brain-score.readthedocs.io/en/latest/?badge=latest)
|
@@ -407,7 +407,7 @@ brainscore_vision/model_helpers/activations/temporal/inputs/video.py,sha256=9XDR
|
|
407
407
|
brainscore_vision/model_helpers/activations/temporal/model/__init__.py,sha256=xVvxYuSDMMSG6KA9IWFpN6SKvLhA--tQp-R3ogm-SJo,71
|
408
408
|
brainscore_vision/model_helpers/activations/temporal/model/base.py,sha256=2i9ZEFnTqovWAlGDoINuLoltbnMxbJIMm1OTpg9dK1k,1305
|
409
409
|
brainscore_vision/model_helpers/activations/temporal/model/pytorch.py,sha256=G95gDfHjiNTmDYelpFYiQ36EuH2u8ePGOLRE_fpwIkI,3769
|
410
|
-
brainscore_vision/model_helpers/brain_transformation/__init__.py,sha256=
|
410
|
+
brainscore_vision/model_helpers/brain_transformation/__init__.py,sha256=RXvH_bha048bHeBOM5ndy70zjawi2Nh8rA2MTDPJm-o,6562
|
411
411
|
brainscore_vision/model_helpers/brain_transformation/behavior.py,sha256=3Ov2GO1_euWSJLYucm6A-Y6jWTIaJUJ4Snk22uk8Lro,16255
|
412
412
|
brainscore_vision/model_helpers/brain_transformation/imagenet_classes.txt,sha256=OF4CQEmUJsAit3O-GuTaeA8Fe4nJaUx_RxmfYnRrbt0,9999
|
413
413
|
brainscore_vision/model_helpers/brain_transformation/neural.py,sha256=PN-1BlAl_4LvTWzTSJycBgFO1IZjzdaMgzwO9ZJfisg,7998
|
@@ -437,10 +437,10 @@ brainscore_vision/model_helpers/check_submission/images/9.png,sha256=G-2MmcsNn6m
|
|
437
437
|
brainscore_vision/model_helpers/utils/__init__.py,sha256=9ugXlyO3EN7LDVDvrKGPyDy2B5-BoNgGe84LNlsDHg8,354
|
438
438
|
brainscore_vision/model_helpers/utils/s3.py,sha256=gtvjq2tdITTQEl3PUXst2YV0pQFnbuBrXMJ2UgEOs_g,1618
|
439
439
|
brainscore_vision/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
440
|
-
brainscore_vision/models/
|
441
|
-
brainscore_vision/models/
|
442
|
-
brainscore_vision/models/
|
443
|
-
brainscore_vision/models/
|
440
|
+
brainscore_vision/models/AdvProp_efficientnet_b6/__init__.py,sha256=JIEjGKi_uj07SJEXEsco9wheoAMlcFcNtqWu1mkcH2E,376
|
441
|
+
brainscore_vision/models/AdvProp_efficientnet_b6/model.py,sha256=FmfG73MK6n6vu_VCaDvZVUu-1aV7OdQBFf7RuAYxvw0,2978
|
442
|
+
brainscore_vision/models/AdvProp_efficientnet_b6/requirements.txt,sha256=RT5ZFF_hCWZpZUlpRYDea9rhrdCLMh-ROARiY_tfR-8,20
|
443
|
+
brainscore_vision/models/AdvProp_efficientnet_b6/test.py,sha256=ZA15svTwS5KYFHdPTNrMULsRztjWRnkDAyH8UYJjQBI,263
|
444
444
|
brainscore_vision/models/AlexNet_SIN/__init__.py,sha256=r7vZRfMSJ_RCbhPYd-l1FtYn3jw2v6M93L_YuX1j4aM,313
|
445
445
|
brainscore_vision/models/AlexNet_SIN/model.py,sha256=ZEdPuDm1cXYSs5nMtTV11teAMRb6BN7uedQKfBbF0EY,1443
|
446
446
|
brainscore_vision/models/AlexNet_SIN/requirements.txt,sha256=orJOreWHAlirw2pXheiMRhuyBMPgFXzPommauloIHQA,18
|
@@ -1046,18 +1046,62 @@ brainscore_vision/models/yudixie_resnet18_240719_0/__init__.py,sha256=ucSQ7c-LE2
|
|
1046
1046
|
brainscore_vision/models/yudixie_resnet18_240719_0/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1047
1047
|
brainscore_vision/models/yudixie_resnet18_240719_0/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1048
1048
|
brainscore_vision/models/yudixie_resnet18_240719_0/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1049
|
+
brainscore_vision/models/yudixie_resnet18_240719_0/region_layer_map/yudixie_resnet18_distance_reg_0_240719.json,sha256=4f0eENxP5wzFPQMcdwCdzjFRN1-nekSVwVUDJ1bTarg,64
|
1049
1050
|
brainscore_vision/models/yudixie_resnet18_240719_1/__init__.py,sha256=QaO6kQNw2jqHR2Ka8WbOyNOALp3rYn5SsXK4lSxeVyA,504
|
1050
1051
|
brainscore_vision/models/yudixie_resnet18_240719_1/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1051
1052
|
brainscore_vision/models/yudixie_resnet18_240719_1/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1052
1053
|
brainscore_vision/models/yudixie_resnet18_240719_1/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1054
|
+
brainscore_vision/models/yudixie_resnet18_240719_1/region_layer_map/yudixie_resnet18_translation_reg_0_240719.json,sha256=9K6PiKPgJ6rGPWRXpFZKe2ifx-FhtHkRDl8a-_VCnNc,64
|
1053
1055
|
brainscore_vision/models/yudixie_resnet18_240719_10/__init__.py,sha256=U3xZqszhlvCUQLFckHStyb7nHbhEof3FyCyA0nZFcFw,502
|
1054
1056
|
brainscore_vision/models/yudixie_resnet18_240719_10/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1055
1057
|
brainscore_vision/models/yudixie_resnet18_240719_10/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1056
1058
|
brainscore_vision/models/yudixie_resnet18_240719_10/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1059
|
+
brainscore_vision/models/yudixie_resnet18_240719_10/region_layer_map/yudixie_resnet18_imagenet1kpret_0_240719.json,sha256=FnxU11hsVITQ0tvh0R2Nmbmw987RSRV2iDYG-WiMYt8,64
|
1060
|
+
brainscore_vision/models/yudixie_resnet18_240719_11/__init__.py,sha256=vrIDmLfXs-WP9qpflTGxXQ8iKv-QQW8I9By053wD3uA,486
|
1061
|
+
brainscore_vision/models/yudixie_resnet18_240719_11/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1062
|
+
brainscore_vision/models/yudixie_resnet18_240719_11/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1063
|
+
brainscore_vision/models/yudixie_resnet18_240719_11/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1064
|
+
brainscore_vision/models/yudixie_resnet18_240719_11/region_layer_map/yudixie_resnet18_random_0_240719.json,sha256=y_ItnkhB5VP1SuotW1Z0RRdUk373yKOVbz_tnAfeqOw,82
|
1057
1065
|
brainscore_vision/models/yudixie_resnet18_240719_2/__init__.py,sha256=pMy6X81PSeZkca5CrK8i_YuXsjFtJdEJpeU6yLxAEXg,498
|
1058
1066
|
brainscore_vision/models/yudixie_resnet18_240719_2/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1059
1067
|
brainscore_vision/models/yudixie_resnet18_240719_2/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1060
1068
|
brainscore_vision/models/yudixie_resnet18_240719_2/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1069
|
+
brainscore_vision/models/yudixie_resnet18_240719_2/region_layer_map/yudixie_resnet18_rotation_reg_0_240719.json,sha256=ZmVEt_bJoUZmPLreQFQ1rX3yl-WQNu6Vij0GbHW42WQ,64
|
1070
|
+
brainscore_vision/models/yudixie_resnet18_240719_3/__init__.py,sha256=5BFOHPa41U-F_OwRJpFBvOSqDm6M0KX0ZXctxzjsCpw,514
|
1071
|
+
brainscore_vision/models/yudixie_resnet18_240719_3/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1072
|
+
brainscore_vision/models/yudixie_resnet18_240719_3/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1073
|
+
brainscore_vision/models/yudixie_resnet18_240719_3/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1074
|
+
brainscore_vision/models/yudixie_resnet18_240719_3/region_layer_map/yudixie_resnet18_distance_translation_0_240719.json,sha256=nAcMQrwQH7BNdERBWJUq9wUEn7VzyVaJpZkbWqLlP-c,82
|
1075
|
+
brainscore_vision/models/yudixie_resnet18_240719_4/__init__.py,sha256=zaMeUpOgh0Yud11Tr0bBQiY6Vfk1F3ZHEVZ8z7yTqQU,509
|
1076
|
+
brainscore_vision/models/yudixie_resnet18_240719_4/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1077
|
+
brainscore_vision/models/yudixie_resnet18_240719_4/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1078
|
+
brainscore_vision/models/yudixie_resnet18_240719_4/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1079
|
+
brainscore_vision/models/yudixie_resnet18_240719_4/region_layer_map/yudixie_resnet18_distance_rotation_0_240719.json,sha256=j-JN-lskvI4MkSHD0x0ao8NRsVGpHHJrsb67_DBC9h8,82
|
1080
|
+
brainscore_vision/models/yudixie_resnet18_240719_5/__init__.py,sha256=yhzJOXB_ImSM19_t-5Jqw9RqgT1EEFZrqFJ-sRciZy8,516
|
1081
|
+
brainscore_vision/models/yudixie_resnet18_240719_5/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1082
|
+
brainscore_vision/models/yudixie_resnet18_240719_5/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1083
|
+
brainscore_vision/models/yudixie_resnet18_240719_5/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1084
|
+
brainscore_vision/models/yudixie_resnet18_240719_5/region_layer_map/yudixie_resnet18_translation_rotation_0_240719.json,sha256=nAcMQrwQH7BNdERBWJUq9wUEn7VzyVaJpZkbWqLlP-c,82
|
1085
|
+
brainscore_vision/models/yudixie_resnet18_240719_6/__init__.py,sha256=xdbXNVl0dTOH8GSPXFyqEUO8UgwQ_n4D-sqx9-gI1ps,533
|
1086
|
+
brainscore_vision/models/yudixie_resnet18_240719_6/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1087
|
+
brainscore_vision/models/yudixie_resnet18_240719_6/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1088
|
+
brainscore_vision/models/yudixie_resnet18_240719_6/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1089
|
+
brainscore_vision/models/yudixie_resnet18_240719_6/region_layer_map/yudixie_resnet18_distance_translation_rotation_0_240719.json,sha256=nAcMQrwQH7BNdERBWJUq9wUEn7VzyVaJpZkbWqLlP-c,82
|
1090
|
+
brainscore_vision/models/yudixie_resnet18_240719_7/__init__.py,sha256=2LMihNQJ1wh3lAZVb7OLcNJEl9wazWwXx0O598fbjP0,502
|
1091
|
+
brainscore_vision/models/yudixie_resnet18_240719_7/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1092
|
+
brainscore_vision/models/yudixie_resnet18_240719_7/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1093
|
+
brainscore_vision/models/yudixie_resnet18_240719_7/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1094
|
+
brainscore_vision/models/yudixie_resnet18_240719_7/region_layer_map/yudixie_resnet18_category_class_0_240719.json,sha256=rq77HTTMiw_5dqvIqv_kMTBZ4NAFEpQTpCZXpCm7XCY,82
|
1095
|
+
brainscore_vision/models/yudixie_resnet18_240719_8/__init__.py,sha256=tEE3QUnAsVy8H1NuMU5L0hIUn15c8RnKj9uMLzf7CUM,498
|
1096
|
+
brainscore_vision/models/yudixie_resnet18_240719_8/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1097
|
+
brainscore_vision/models/yudixie_resnet18_240719_8/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1098
|
+
brainscore_vision/models/yudixie_resnet18_240719_8/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1099
|
+
brainscore_vision/models/yudixie_resnet18_240719_8/region_layer_map/yudixie_resnet18_object_class_0_240719.json,sha256=_hpeYTtc_PrlqFc4ODfNrVYStHYUp-0O3DfsdmbIbR8,82
|
1100
|
+
brainscore_vision/models/yudixie_resnet18_240719_9/__init__.py,sha256=IH1lsHyJwoVLOtkjxa2FKLYqF78sASOkPOmMSrn1odY,524
|
1101
|
+
brainscore_vision/models/yudixie_resnet18_240719_9/model.py,sha256=Ey3vEedDVaCkIblXmNuUtVJiwfdMY16PZA0qP78ZNQo,1930
|
1102
|
+
brainscore_vision/models/yudixie_resnet18_240719_9/setup.py,sha256=LwzJsD4_YtQ1OL8LTZbjvUISbVgmglGPwyJgbc4guy8,635
|
1103
|
+
brainscore_vision/models/yudixie_resnet18_240719_9/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1104
|
+
brainscore_vision/models/yudixie_resnet18_240719_9/region_layer_map/yudixie_resnet18_cat_obj_class_all_latents_0_240719.json,sha256=sp9vBSxPPsOmKz1Es9QcQhQmcwD_QoCmYNbDpqcVL3A,82
|
1061
1105
|
brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/__init__.py,sha256=sXf28YXiqak4ne7kMHn0kUU6_F8F4ufbIb3P0FhZyHY,615
|
1062
1106
|
brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/model.py,sha256=7CNunLY4q5E1cuB-Trr4xilBpCym10e4T1YqsUBy5-Q,2239
|
1063
1107
|
brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py,sha256=99yTZYi6gEbBOwFkvEZXUICmJuPYg6JzfWmkmyKB3qs,586
|
@@ -1066,6 +1110,7 @@ brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/__init__.py,sh
|
|
1066
1110
|
brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/model.py,sha256=LTVmYqMxNiko8WcdM5AQdfS4sko6YlMTrQ5yvTEwQ1I,2195
|
1067
1111
|
brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py,sha256=99yTZYi6gEbBOwFkvEZXUICmJuPYg6JzfWmkmyKB3qs,586
|
1068
1112
|
brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/test.py,sha256=RYP7tU-soYBR0i2lMGqQ2elFt9TfqJjo1XVgOGoDufs,46
|
1113
|
+
brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/region_layer_map/yudixie_resnet50_imagenet1kpret_0_240312.json,sha256=FnxU11hsVITQ0tvh0R2Nmbmw987RSRV2iDYG-WiMYt8,64
|
1069
1114
|
brainscore_vision/submission/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
1070
1115
|
brainscore_vision/submission/actions_helpers.py,sha256=_kcVGTjKru6GBlxdUVwAIUlJjBS45Y85zBaieoLSZPo,5535
|
1071
1116
|
brainscore_vision/submission/config.py,sha256=wpvds5MzNbiVGFaf8fSNEpoB-1jCBEUiu5ZFSusOXW8,200
|
@@ -1127,8 +1172,8 @@ tests/test_submission/mock_config.py,sha256=BCNn1w-voDsLY9snKhZ0CAtmCkImqEJM449O
|
|
1127
1172
|
tests/test_submission/test_actions_helpers.py,sha256=QScwm-V8Pcc6SVShhTGg_ClZ_-VThLKDUZYD0bbUHu8,2941
|
1128
1173
|
tests/test_submission/test_db.py,sha256=iRdLCENuEYE_oRVU4EEEGfHdH3OvjMIl5C8ga9e6lEw,1943
|
1129
1174
|
tests/test_submission/test_endpoints.py,sha256=x1eRP3UAxR6esvnKYCb7cpPPUDMJ4jZepjDBP-4_8pw,5919
|
1130
|
-
brainscore_vision-2.2.
|
1131
|
-
brainscore_vision-2.2.
|
1132
|
-
brainscore_vision-2.2.
|
1133
|
-
brainscore_vision-2.2.
|
1134
|
-
brainscore_vision-2.2.
|
1175
|
+
brainscore_vision-2.2.2.dist-info/LICENSE,sha256=XTtffTkAM5Ng19jReCLLwCI48jADRuHeGAZGMn-M4hY,1096
|
1176
|
+
brainscore_vision-2.2.2.dist-info/METADATA,sha256=P7WMEcUsy2MvzmCjLNwPw5zq1B5OyXx8JpBXcX1evhA,7777
|
1177
|
+
brainscore_vision-2.2.2.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
1178
|
+
brainscore_vision-2.2.2.dist-info/top_level.txt,sha256=KvGBBL9JaITeDGNsicf1l8iH5-dao2RWQdo369jlrBk,40
|
1179
|
+
brainscore_vision-2.2.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
/brainscore_vision/models/{AdvProp_efficientne_b6 → AdvProp_efficientnet_b6}/requirements.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|