brainscore-vision 2.1__py3-none-any.whl → 2.2.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (143) hide show
  1. brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +2 -1
  2. brainscore_vision/benchmarks/coggan2024_behavior/test.py +2 -2
  3. brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +4 -4
  4. brainscore_vision/benchmarks/coggan2024_fMRI/test.py +2 -2
  5. brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50000 -50000
  6. brainscore_vision/benchmarks/imagenet_c/benchmark.py +1 -1
  7. brainscore_vision/benchmarks/lonnqvist2024/__init__.py +8 -0
  8. brainscore_vision/benchmarks/lonnqvist2024/benchmark.py +125 -0
  9. brainscore_vision/benchmarks/lonnqvist2024/test.py +61 -0
  10. brainscore_vision/benchmarks/malania2007/benchmark.py +3 -0
  11. brainscore_vision/benchmarks/maniquet2024/benchmark.py +1 -1
  12. brainscore_vision/data/lonnqvist2024/__init__.py +47 -0
  13. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +53 -0
  14. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +61 -0
  15. brainscore_vision/data/lonnqvist2024/test.py +127 -0
  16. brainscore_vision/model_helpers/brain_transformation/__init__.py +33 -0
  17. brainscore_vision/models/alexnet/region_layer_map/alexnet.json +1 -0
  18. brainscore_vision/models/alexnet_7be5be79/setup.py +4 -4
  19. brainscore_vision/models/alexnet_random/__init__.py +7 -0
  20. brainscore_vision/models/alexnet_random/model.py +46 -0
  21. brainscore_vision/models/alexnet_random/setup.py +26 -0
  22. brainscore_vision/models/alexnet_random/test.py +1 -0
  23. brainscore_vision/models/cvt_cvt_13_224_in1k_4/__init__.py +9 -0
  24. brainscore_vision/models/cvt_cvt_13_224_in1k_4/model.py +142 -0
  25. brainscore_vision/models/cvt_cvt_13_224_in1k_4/region_layer_map/cvt_cvt-13-224-in1k_4.json +6 -0
  26. brainscore_vision/models/cvt_cvt_13_224_in1k_4/region_layer_map/cvt_cvt-13-224-in1k_4_LucyV4.json +6 -0
  27. brainscore_vision/models/cvt_cvt_13_224_in1k_4/requirements.txt +4 -0
  28. brainscore_vision/models/cvt_cvt_13_224_in1k_4/test.py +8 -0
  29. brainscore_vision/models/cvt_cvt_13_384_in1k_4/__init__.py +9 -0
  30. brainscore_vision/models/cvt_cvt_13_384_in1k_4/model.py +142 -0
  31. brainscore_vision/models/cvt_cvt_13_384_in1k_4/region_layer_map/cvt_cvt-13-384-in1k_4_LucyV4.json +6 -0
  32. brainscore_vision/models/cvt_cvt_13_384_in1k_4/requirements.txt +4 -0
  33. brainscore_vision/models/cvt_cvt_13_384_in1k_4/test.py +8 -0
  34. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/__init__.py +9 -0
  35. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/model.py +142 -0
  36. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/region_layer_map/cvt_cvt-13-384-in22k_finetuned-in1k_4_LucyV4.json +6 -0
  37. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
  38. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/test.py +8 -0
  39. brainscore_vision/models/cvt_cvt_21_224_in1k_4/__init__.py +9 -0
  40. brainscore_vision/models/cvt_cvt_21_224_in1k_4/model.py +142 -0
  41. brainscore_vision/models/cvt_cvt_21_224_in1k_4/region_layer_map/cvt_cvt-21-224-in1k_4_LucyV4.json +6 -0
  42. brainscore_vision/models/cvt_cvt_21_224_in1k_4/requirements.txt +4 -0
  43. brainscore_vision/models/cvt_cvt_21_224_in1k_4/test.py +8 -0
  44. brainscore_vision/models/cvt_cvt_21_384_in1k_4/__init__.py +9 -0
  45. brainscore_vision/models/cvt_cvt_21_384_in1k_4/model.py +142 -0
  46. brainscore_vision/models/cvt_cvt_21_384_in1k_4/region_layer_map/cvt_cvt-21-384-in1k_4_LucyV4.json +6 -0
  47. brainscore_vision/models/cvt_cvt_21_384_in1k_4/requirements.txt +4 -0
  48. brainscore_vision/models/cvt_cvt_21_384_in1k_4/test.py +8 -0
  49. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/__init__.py +9 -0
  50. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/model.py +142 -0
  51. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/region_layer_map/cvt_cvt-21-384-in22k_finetuned-in1k_4_LucyV4.json +6 -0
  52. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
  53. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/test.py +8 -0
  54. brainscore_vision/models/fixres_resnext101_32x48d_wsl/__init__.py +7 -0
  55. brainscore_vision/models/fixres_resnext101_32x48d_wsl/model.py +57 -0
  56. brainscore_vision/models/fixres_resnext101_32x48d_wsl/requirements.txt +5 -0
  57. brainscore_vision/models/fixres_resnext101_32x48d_wsl/test.py +7 -0
  58. brainscore_vision/models/inception_v4_pytorch/__init__.py +7 -0
  59. brainscore_vision/models/inception_v4_pytorch/model.py +64 -0
  60. brainscore_vision/models/inception_v4_pytorch/requirements.txt +3 -0
  61. brainscore_vision/models/inception_v4_pytorch/test.py +8 -0
  62. brainscore_vision/models/mvimgnet_ms_05/__init__.py +9 -0
  63. brainscore_vision/models/mvimgnet_ms_05/model.py +64 -0
  64. brainscore_vision/models/mvimgnet_ms_05/setup.py +25 -0
  65. brainscore_vision/models/mvimgnet_ms_05/test.py +1 -0
  66. brainscore_vision/models/mvimgnet_rf/__init__.py +9 -0
  67. brainscore_vision/models/mvimgnet_rf/model.py +64 -0
  68. brainscore_vision/models/mvimgnet_rf/setup.py +25 -0
  69. brainscore_vision/models/mvimgnet_rf/test.py +1 -0
  70. brainscore_vision/models/mvimgnet_ss_00/__init__.py +9 -0
  71. brainscore_vision/models/mvimgnet_ss_00/model.py +64 -0
  72. brainscore_vision/models/mvimgnet_ss_00/setup.py +25 -0
  73. brainscore_vision/models/mvimgnet_ss_00/test.py +1 -0
  74. brainscore_vision/models/mvimgnet_ss_02/__init__.py +9 -0
  75. brainscore_vision/models/mvimgnet_ss_02/model.py +64 -0
  76. brainscore_vision/models/mvimgnet_ss_02/setup.py +25 -0
  77. brainscore_vision/models/mvimgnet_ss_02/test.py +1 -0
  78. brainscore_vision/models/mvimgnet_ss_03/__init__.py +9 -0
  79. brainscore_vision/models/mvimgnet_ss_03/model.py +64 -0
  80. brainscore_vision/models/mvimgnet_ss_03/setup.py +25 -0
  81. brainscore_vision/models/mvimgnet_ss_03/test.py +1 -0
  82. brainscore_vision/models/mvimgnet_ss_04/__init__.py +9 -0
  83. brainscore_vision/models/mvimgnet_ss_04/model.py +64 -0
  84. brainscore_vision/models/mvimgnet_ss_04/setup.py +25 -0
  85. brainscore_vision/models/mvimgnet_ss_04/test.py +1 -0
  86. brainscore_vision/models/mvimgnet_ss_05/__init__.py +9 -0
  87. brainscore_vision/models/mvimgnet_ss_05/model.py +64 -0
  88. brainscore_vision/models/mvimgnet_ss_05/setup.py +25 -0
  89. brainscore_vision/models/mvimgnet_ss_05/test.py +1 -0
  90. brainscore_vision/models/resnet50_tutorial/region_layer_map/resnet50_tutorial.json +1 -0
  91. brainscore_vision/models/sam_test_resnet/__init__.py +5 -0
  92. brainscore_vision/models/sam_test_resnet/model.py +26 -0
  93. brainscore_vision/models/sam_test_resnet/requirements.txt +2 -0
  94. brainscore_vision/models/sam_test_resnet/test.py +8 -0
  95. brainscore_vision/models/sam_test_resnet_4/__init__.py +5 -0
  96. brainscore_vision/models/sam_test_resnet_4/model.py +26 -0
  97. brainscore_vision/models/sam_test_resnet_4/requirements.txt +2 -0
  98. brainscore_vision/models/sam_test_resnet_4/test.py +8 -0
  99. brainscore_vision/models/scaling_models/__init__.py +265 -0
  100. brainscore_vision/models/scaling_models/model.py +148 -0
  101. brainscore_vision/models/scaling_models/model_configs.json +869 -0
  102. brainscore_vision/models/scaling_models/region_layer_map/convnext_base_imagenet_full_seed-0.json +6 -0
  103. brainscore_vision/models/scaling_models/region_layer_map/convnext_large_imagenet_full_seed-0.json +6 -0
  104. brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_100_seed-0.json +6 -0
  105. brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_10_seed-0.json +6 -0
  106. brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_1_seed-0.json +6 -0
  107. brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_full_seed-0.json +6 -0
  108. brainscore_vision/models/scaling_models/region_layer_map/deit_base_imagenet_full_seed-0.json +6 -0
  109. brainscore_vision/models/scaling_models/region_layer_map/deit_large_imagenet_full_seed-0.json +6 -0
  110. brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_100_seed-0.json +6 -0
  111. brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_10_seed-0.json +6 -0
  112. brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_1_seed-0.json +6 -0
  113. brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_full_seed-0.json +6 -0
  114. brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b0_imagenet_full.json +6 -0
  115. brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b1_imagenet_full.json +6 -0
  116. brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b2_imagenet_full.json +6 -0
  117. brainscore_vision/models/scaling_models/region_layer_map/resnet101_ecoset_full.json +6 -0
  118. brainscore_vision/models/scaling_models/region_layer_map/resnet101_imagenet_full.json +6 -0
  119. brainscore_vision/models/scaling_models/region_layer_map/resnet152_ecoset_full.json +6 -0
  120. brainscore_vision/models/scaling_models/region_layer_map/resnet18_ecoset_full.json +6 -0
  121. brainscore_vision/models/scaling_models/region_layer_map/resnet18_imagenet_full.json +6 -0
  122. brainscore_vision/models/scaling_models/region_layer_map/resnet34_ecoset_full.json +6 -0
  123. brainscore_vision/models/scaling_models/region_layer_map/resnet34_imagenet_full.json +6 -0
  124. brainscore_vision/models/scaling_models/region_layer_map/resnet50_ecoset_full.json +6 -0
  125. brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_100_seed-0.json +6 -0
  126. brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_10_seed-0.json +6 -0
  127. brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_1_seed-0.json +6 -0
  128. brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_full.json +6 -0
  129. brainscore_vision/models/scaling_models/requirements.txt +4 -0
  130. brainscore_vision/models/scaling_models/test.py +0 -0
  131. brainscore_vision/models/vitb14_dinov2_imagenet1k/__init__.py +5 -0
  132. brainscore_vision/models/vitb14_dinov2_imagenet1k/model.py +852 -0
  133. brainscore_vision/models/vitb14_dinov2_imagenet1k/setup.py +25 -0
  134. brainscore_vision/models/vitb14_dinov2_imagenet1k/test.py +0 -0
  135. brainscore_vision/models/voneresnet_50_non_stochastic/region_layer_map/voneresnet-50-non_stochastic.json +1 -0
  136. brainscore_vision/submission/actions_helpers.py +2 -2
  137. brainscore_vision/submission/endpoints.py +3 -4
  138. {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/METADATA +2 -2
  139. {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/RECORD +143 -18
  140. {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/WHEEL +1 -1
  141. tests/test_model_helpers/temporal/activations/test_inferencer.py +2 -2
  142. {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/LICENSE +0 -0
  143. {brainscore_vision-2.1.dist-info → brainscore_vision-2.2.1.dist-info}/top_level.txt +0 -0
@@ -15,7 +15,7 @@ from brainscore_vision.benchmarks.imagenet.benchmark import NUMBER_OF_TRIALS
15
15
  from brainscore_vision.model_interface import BrainModel
16
16
 
17
17
  _logger = logging.getLogger(__name__)
18
- LOCAL_STIMULUS_DIRECTORY = '/braintree/data2/active/common/imagenet-c-brainscore-stimuli/'
18
+ LOCAL_STIMULUS_DIRECTORY = '/mnt/brainscore-ami/imagenet-c-brainscore-stimuli/'
19
19
 
20
20
  BIBTEX = """@ARTICLE{Hendrycks2019-di,
21
21
  title = "Benchmarking Neural Network Robustness to Common Corruptions
@@ -0,0 +1,8 @@
1
+ from brainscore_vision import benchmark_registry
2
+ from . import benchmark
3
+
4
+ benchmark_registry['Lonnqvist2024_InlabInstructionsBehavioralAccuracyDistance'] = lambda: benchmark._Lonnqvist2024BehavioralAccuracyDistanceInlabInstructions()
5
+ benchmark_registry['Lonnqvist2024_InlabNoInstructionsBehavioralAccuracyDistance'] = lambda: benchmark._Lonnqvist2024BehavioralAccuracyDistanceInlabNoInstructions()
6
+ benchmark_registry['Lonnqvist2024_OnlineNoInstructionsBehavioralAccuracyDistance'] = lambda: benchmark._Lonnqvist2024BehavioralAccuracyDistanceOnlineNoInstructions()
7
+
8
+ benchmark_registry['Lonnqvist2024_EngineeringAccuracy'] = lambda: benchmark._Lonnqvist2024EngineeringAccuracy()
@@ -0,0 +1,125 @@
1
+ from pathlib import Path
2
+
3
+ import numpy as np
4
+
5
+ from brainio.assemblies import BehavioralAssembly
6
+ from brainscore_vision import load_metric, load_stimulus_set, load_dataset
7
+ from brainscore_vision.benchmark_helpers.screen import place_on_screen
8
+ from brainscore_vision.benchmarks import BenchmarkBase
9
+ from brainscore_vision.metrics import Score
10
+ from brainscore_vision.model_interface import BrainModel
11
+
12
+ BIBTEX = "" # to appear in a future article
13
+
14
+
15
+ class _Lonnqvist2024Base(BenchmarkBase):
16
+ def __init__(self, identifier, dataset, ceiling_func, metric):
17
+ self._metric = metric
18
+ self._stimulus_set = load_stimulus_set('Lonnqvist2024_test')
19
+ self._fitting_stimuli = load_stimulus_set('Lonnqvist2024_train')
20
+ self._visual_degrees = 17.70753
21
+ self.assembly = load_dataset(f'Lonnqvist2024_{dataset}')
22
+
23
+ super(_Lonnqvist2024Base, self).__init__(
24
+ identifier=identifier, version=1,
25
+ ceiling_func=ceiling_func,
26
+ parent='Lonnqvist2024',
27
+ bibtex=BIBTEX)
28
+
29
+ def __call__(self, candidate: BrainModel, return_raw_responses: bool = False):
30
+ fitting_stimulus_set = place_on_screen(
31
+ self._fitting_stimuli,
32
+ target_visual_degrees=candidate.visual_degrees(),
33
+ source_visual_degrees=self._visual_degrees
34
+ )
35
+ candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=fitting_stimulus_set, number_of_trials=1)
36
+ stimulus_set = place_on_screen(
37
+ self._stimulus_set,
38
+ target_visual_degrees=candidate.visual_degrees(),
39
+ source_visual_degrees=self._visual_degrees
40
+ )
41
+ model_response = candidate.look_at(stimulus_set, number_of_trials=1)
42
+ model_response = convert_proba_to_choices(model_response)
43
+ raw_score = self._metric(model_response, self.assembly)
44
+ # Adjust score to ceiling
45
+ ceiling = self.ceiling
46
+ score = raw_score / ceiling
47
+ # ensure score <= 1.0
48
+ if score.values > 1:
49
+ score = Score(np.array(1.))
50
+ score.attrs['raw'] = raw_score
51
+ score.attrs['ceiling'] = ceiling
52
+ if return_raw_responses:
53
+ return score, model_response
54
+ return score
55
+
56
+
57
+ class _Lonnqvist2024BehavioralAccuracyDistanceInlabInstructions(_Lonnqvist2024Base):
58
+ def __init__(self):
59
+ metric = load_metric('accuracy_distance')
60
+ ceiling_func = lambda: metric.ceiling(self.assembly)
61
+ super(_Lonnqvist2024BehavioralAccuracyDistanceInlabInstructions, self).__init__(
62
+ identifier='Lonnqvist2024-inlab-instructions_behavioral_accuracy_distance', dataset='inlab-instructions',
63
+ ceiling_func=ceiling_func,
64
+ metric=metric)
65
+
66
+
67
+ class _Lonnqvist2024BehavioralAccuracyDistanceInlabNoInstructions(_Lonnqvist2024Base):
68
+ def __init__(self):
69
+ metric = load_metric('accuracy_distance')
70
+ ceiling_func = lambda: metric.ceiling(self.assembly)
71
+ super(_Lonnqvist2024BehavioralAccuracyDistanceInlabNoInstructions, self).__init__(
72
+ identifier='Lonnqvist2024-inlab-no-instructions_behavioral_accuracy_distance', dataset='inlab-no-instructions',
73
+ ceiling_func=ceiling_func,
74
+ metric=metric)
75
+
76
+
77
+ class _Lonnqvist2024BehavioralAccuracyDistanceOnlineNoInstructions(_Lonnqvist2024Base):
78
+ def __init__(self):
79
+ metric = load_metric('accuracy_distance')
80
+ ceiling_func = lambda: metric.ceiling(self.assembly)
81
+ super(_Lonnqvist2024BehavioralAccuracyDistanceOnlineNoInstructions, self).__init__(
82
+ identifier='Lonnqvist2024-online-no-instructions_behavioral_accuracy_distance', dataset='online-no-instructions',
83
+ ceiling_func=ceiling_func,
84
+ metric=metric)
85
+
86
+
87
+ class _Lonnqvist2024EngineeringAccuracy(_Lonnqvist2024Base):
88
+ def __init__(self):
89
+ metric = load_metric('accuracy')
90
+ ceiling_func = lambda: Score(1)
91
+ super(_Lonnqvist2024EngineeringAccuracy, self).__init__(
92
+ identifier='Lonnqvist2024-engineering_accuracy', dataset='inlab-instructions',
93
+ ceiling_func=ceiling_func,
94
+ metric=metric)
95
+
96
+ def __call__(self, candidate: BrainModel, return_raw_responses: bool = False):
97
+ fitting_stimulus_set = place_on_screen(
98
+ self._fitting_stimuli,
99
+ target_visual_degrees=candidate.visual_degrees(),
100
+ source_visual_degrees=self._visual_degrees
101
+ )
102
+ candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=fitting_stimulus_set, number_of_trials=1)
103
+ stimulus_set = place_on_screen(
104
+ self._stimulus_set,
105
+ target_visual_degrees=candidate.visual_degrees(),
106
+ source_visual_degrees=self._visual_degrees
107
+ )
108
+ model_response = candidate.look_at(stimulus_set, number_of_trials=1)
109
+ model_response = convert_proba_to_choices(model_response)
110
+ raw_score = self._metric(model_response, stimulus_set['truth'])
111
+ # Adjust score to ceiling
112
+ ceiling = self.ceiling
113
+ score = raw_score / ceiling
114
+ score.attrs['raw'] = raw_score
115
+ score.attrs['ceiling'] = ceiling
116
+ if return_raw_responses:
117
+ return score, model_response
118
+ return score
119
+
120
+
121
+ def convert_proba_to_choices(source: BehavioralAssembly) -> np.array:
122
+ """Converts the probability values returned by models doing probability tasks to behavioral choices."""
123
+ decisions = np.argmax(source.values, axis=1)
124
+ choices = [source['choice'].values[decision] for decision in decisions]
125
+ return BehavioralAssembly(choices, coords={'presentation': source['presentation']})
@@ -0,0 +1,61 @@
1
+ import pytest
2
+ from pytest import approx
3
+
4
+ from brainscore_vision import benchmark_registry, load_benchmark, load_model
5
+
6
+
7
+ @pytest.mark.parametrize('benchmark', [
8
+ 'Lonnqvist2024_InlabInstructionsBehavioralAccuracyDistance',
9
+ 'Lonnqvist2024_InlabNoInstructionsBehavioralAccuracyDistance',
10
+ 'Lonnqvist2024_InlabInstructionsBehavioralAccuracyDistance',
11
+ 'Lonnqvist2024_EngineeringAccuracy',
12
+ ])
13
+ def test_benchmark_registry(benchmark):
14
+ assert benchmark in benchmark_registry
15
+
16
+
17
+ class TestBehavioral:
18
+ @pytest.mark.private_access
19
+ @pytest.mark.parametrize('dataset, expected_ceiling', [
20
+ ('InlabInstructionsBehavioralAccuracyDistance', approx(0.95646366, abs=0.001)),
21
+ ('InlabNoInstructionsBehavioralAccuracyDistance', approx(0.84258475, abs=0.001)),
22
+ ('OnlineNoInstructionsBehavioralAccuracyDistance', approx(0.79752907, abs=0.001)),
23
+ ])
24
+ def test_dataset_ceiling(self, dataset, expected_ceiling):
25
+ benchmark = f"Lonnqvist2024_{dataset}"
26
+ benchmark = load_benchmark(benchmark)
27
+ ceiling = benchmark.ceiling
28
+ assert ceiling == expected_ceiling
29
+
30
+ @pytest.mark.private_access
31
+ @pytest.mark.parametrize('dataset, expected_raw_score', [
32
+ ('InlabInstructionsBehavioralAccuracyDistance', approx(0.58568247, abs=0.001)),
33
+ ('InlabNoInstructionsBehavioralAccuracyDistance', approx(0.62883828, abs=0.001)),
34
+ ('OnlineNoInstructionsBehavioralAccuracyDistance', approx(0.78192183, abs=0.001)),
35
+ ])
36
+ def test_model(self, dataset, expected_raw_score):
37
+ if 'all' in dataset:
38
+ benchmark = f"Lonnqvist2024_{dataset}"
39
+ else:
40
+ benchmark = f"Lonnqvist2024_{dataset}"
41
+ benchmark = load_benchmark(benchmark)
42
+ model = load_model('alexnet')
43
+ score = benchmark(model)
44
+ raw_score = score.raw
45
+ # division by ceiling <= 1 should result in higher score
46
+ assert score >= raw_score
47
+ assert raw_score == expected_raw_score
48
+
49
+
50
+ class TestEngineering:
51
+ @pytest.mark.parametrize('dataset, expected_accuracy', [
52
+ ('EngineeringAccuracy', approx(0.45, abs=0.001)),
53
+ ])
54
+ def test_accuracy(self, dataset, expected_accuracy):
55
+ benchmark = load_benchmark(f"Lonnqvist2024_{dataset}")
56
+ model = load_model('alexnet')
57
+ score = benchmark(model)
58
+ raw_score = score.raw
59
+ # division by ceiling <= 1 should result in higher score
60
+ assert score >= raw_score
61
+ assert raw_score == expected_accuracy
@@ -186,6 +186,9 @@ class _Malania2007VernierAcuity(BenchmarkBase):
186
186
  # Adjust score to ceiling
187
187
  ceiling = self.ceiling
188
188
  score = raw_score / ceiling
189
+ # ensure score <= 1.0
190
+ if score.values > 1:
191
+ score = Score(np.array(1.))
189
192
  score.attrs['error'] = raw_score.error
190
193
 
191
194
  score.attrs['raw'] = raw_score
@@ -63,7 +63,7 @@ class _Maniquet2024ConfusionSimilarity(BenchmarkBase):
63
63
 
64
64
  # Call the parent class constructor to complete initialization
65
65
  super(_Maniquet2024ConfusionSimilarity, self).__init__(
66
- identifier="Maniquet2024-confusion_similarity'",
66
+ identifier="Maniquet2024-confusion_similarity",
67
67
  version=1,
68
68
  ceiling_func=lambda: Score(0.53526), # use pre-computed from `self._metric._ceiling(self._human_assembly)`
69
69
  parent="Maniquet2024",
@@ -0,0 +1,47 @@
1
+ from brainio.assemblies import BehavioralAssembly
2
+
3
+ from brainscore_vision import data_registry, stimulus_set_registry, load_stimulus_set
4
+ from brainscore_vision.data_helpers.s3 import load_assembly_from_s3, load_stimulus_set_from_s3
5
+
6
+
7
+ data_registry['Lonnqvist2024_inlab-instructions'] = lambda: load_assembly_from_s3(
8
+ identifier='Lonnqvist2024_inlab-instructions',
9
+ version_id='nTcvZkZedprOMYKkZ8kONUxy4M.__F_C',
10
+ sha1='64ec603ebc852d193e7437980eaabe8fc482d88b',
11
+ bucket="brainio-brainscore",
12
+ cls=BehavioralAssembly,
13
+ stimulus_set_loader=lambda: load_stimulus_set('Lonnqvist2024_test'))
14
+
15
+ data_registry['Lonnqvist2024_inlab-no-instructions'] = lambda: load_assembly_from_s3(
16
+ identifier='Lonnqvist2024_inlab-no-instructions',
17
+ version_id='XtRi6xl6cJJ_71mAzfqqm3fSKujI8O4C',
18
+ sha1='ff248ca2058d4e36eee44dbc6f8ea6a79c70b715',
19
+ bucket="brainio-brainscore",
20
+ cls=BehavioralAssembly,
21
+ stimulus_set_loader=lambda: load_stimulus_set('Lonnqvist2024_test'))
22
+
23
+ data_registry['Lonnqvist2024_online-no-instructions'] = lambda: load_assembly_from_s3(
24
+ identifier='Lonnqvist2024_online-no-instructions',
25
+ version_id='VRMgNb4mYSf_S6S81LGK2LFVVrfnCI26',
26
+ sha1='04240330eaf371d160ab418fd5560a72ed42cecb',
27
+ bucket="brainio-brainscore",
28
+ cls=BehavioralAssembly,
29
+ stimulus_set_loader=lambda: load_stimulus_set('Lonnqvist2024_test'))
30
+
31
+ stimulus_set_registry['Lonnqvist2024_train'] = lambda: load_stimulus_set_from_s3(
32
+ identifier='Lonnqvist2024_train',
33
+ bucket="brainio-brainscore",
34
+ csv_sha1='2d6a95a8239aa647ddc6aedd449eabcebdf882cf',
35
+ zip_sha1='8adbf4de94524892042d3e43629a4be2beeedcaf',
36
+ csv_version_id='92o14bqbYd5Xut2zoTADmT4S_FEPmuW6',
37
+ zip_version_id='oZM7Fe446bq15A1fpoatEsvWHZrstdXJ',
38
+ filename_prefix='stimulus_')
39
+
40
+ stimulus_set_registry['Lonnqvist2024_test'] = lambda: load_stimulus_set_from_s3(
41
+ identifier='Lonnqvist2024_test',
42
+ bucket="brainio-brainscore",
43
+ csv_sha1='8bc98dfc9f334e5c21b68f6787b3255da0d8644a',
44
+ zip_sha1='cf94b5341d956d250e7f7798044cf71bbd100721',
45
+ csv_version_id='VCzpiY0ZMySfDTPIT8zauLZuV0QJmgwZ',
46
+ zip_version_id='ZjEM4Es91H1aGyGo73VcVoL1id6FdWiO',
47
+ filename_prefix='stimulus_')
@@ -0,0 +1,53 @@
1
+ from pathlib import Path
2
+ import numpy as np
3
+ import xarray as xr
4
+
5
+ from brainio.assemblies import BehavioralAssembly
6
+ from brainio.packaging import package_data_assembly
7
+ import pandas as pd
8
+
9
+
10
+ DATASETS = ['inlab-instructions', 'inlab-no-instructions', 'online-no-instructions']
11
+
12
+
13
+ def collect_lonnqvist_data_assembly(root_directory, dataset):
14
+ """
15
+ Experiment Information:
16
+ """
17
+ data = pd.read_csv(Path(rf'{root_directory}/{dataset}.csv'))
18
+
19
+ assembly = BehavioralAssembly(data['subject_answer'],
20
+ coords={
21
+ 'subject': ('presentation', data['subject_id']),
22
+ 'visual_degrees': ('presentation', data['visual_degrees']),
23
+ 'image_duration': ('presentation', data['image_duration']),
24
+ 'is_correct': ('presentation', data['is_correct']),
25
+ 'subject_answer': ('presentation', data['subject_answer']),
26
+ 'curve_length': ('presentation', data['curve_length']),
27
+ 'n_cross': ('presentation', data['n_cross']),
28
+ 'image_path': ('presentation', data['image_path']),
29
+ 'stimulus_id': ('presentation', data['stimulus_id']),
30
+ 'truth': ('presentation', data['truth']),
31
+ 'image_label': ('presentation', data['truth'])
32
+ },
33
+ dims=['presentation']
34
+ )
35
+
36
+ # give the assembly an identifier name
37
+ assembly.name = f'Lonnqvist2024_{dataset}'
38
+
39
+ return assembly
40
+
41
+
42
+ if __name__ == '__main__':
43
+ root_directory = Path(r'./local')
44
+ for dataset in DATASETS:
45
+ assembly = collect_lonnqvist_data_assembly(root_directory, dataset)
46
+ # upload to S3
47
+ prints = package_data_assembly(catalog_identifier=None,
48
+ proto_data_assembly=assembly,
49
+ assembly_identifier=assembly.name,
50
+ stimulus_set_identifier=assembly.name,
51
+ assembly_class_name="BehavioralAssembly",
52
+ bucket_name="brainio-brainscore")
53
+ print(prints)
@@ -0,0 +1,61 @@
1
+ from pathlib import Path
2
+ import csv
3
+
4
+ from brainio.stimuli import StimulusSet
5
+ from brainio.packaging import package_stimulus_set
6
+
7
+ '''
8
+ dataset Meta Info
9
+
10
+ - curve length: values from 20-200 in steps of 10
11
+ - n_cross: number of times the lines intercept, range from 1-7
12
+ - condition: same or diff
13
+ '''
14
+
15
+
16
+ def collect_lonnqvist_stimulus_set(dataset, stimuli_directory, metadata_filepath):
17
+ stimuli = []
18
+ stimulus_paths = {}
19
+
20
+ with open(metadata_filepath, 'r') as metadata:
21
+ reader = csv.DictReader(metadata)
22
+ for row in reader:
23
+ stimulus_meta = {
24
+ 'curve_length': int(row['curve_length']),
25
+ 'n_cross': int(row['n_cross']),
26
+ 'image_path': str(row['path']),
27
+ 'stimulus_id': str(row['idx']),
28
+ 'truth': str(row['correct_response_key']),
29
+ 'image_label': str(row['correct_response_key'])
30
+ }
31
+
32
+ stimuli.append(stimulus_meta)
33
+ stimulus_paths[str(row['idx'])] = Path(f'{row["path"]}')
34
+
35
+ stimuli = StimulusSet(stimuli)
36
+ stimuli.stimulus_paths = stimulus_paths
37
+
38
+ stimuli.name = f'Lonnqvist2024_{dataset}'
39
+ stimuli.identifier = f'Lonnqvist2024_{dataset}'
40
+ return stimuli
41
+
42
+
43
+ if __name__ == '__main__':
44
+ datasets = ['train', 'test']
45
+ stimulus_directories = {'train': Path(r'stimuli/images_examples'),
46
+ 'test': Path(r'stimuli/images')}
47
+ metadata_filepaths = {'train': Path('stimuli/metadata_examples.csv'),
48
+ 'test': Path('stimuli/metadata.csv')}
49
+ for dataset in datasets:
50
+ stimulus_set = collect_lonnqvist_stimulus_set(dataset,
51
+ stimulus_directories[dataset],
52
+ metadata_filepaths[dataset])
53
+ if dataset == 'train':
54
+ assert len(stimulus_set) == 185
55
+ else:
56
+ assert len(stimulus_set) == 380
57
+ prints = package_stimulus_set(catalog_name=None,
58
+ proto_stimulus_set=stimulus_set,
59
+ stimulus_set_identifier=stimulus_set.name,
60
+ bucket_name="brainio-brainscore")
61
+ print(prints)
@@ -0,0 +1,127 @@
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from brainscore_vision import load_stimulus_set, load_dataset
5
+
6
+
7
+ @pytest.mark.private_access
8
+ @pytest.mark.parametrize('assembly_identifier', [
9
+ 'Lonnqvist2024_inlab-instructions',
10
+ 'Lonnqvist2024_inlab-no-instructions',
11
+ 'Lonnqvist2024_online-no-instructions'
12
+ ])
13
+ def test_existence(assembly_identifier):
14
+ assert load_dataset(assembly_identifier) is not None
15
+
16
+
17
+ @pytest.mark.private_access
18
+ class TestAssemblies:
19
+ @pytest.mark.parametrize('assembly', [
20
+ 'Lonnqvist2024_inlab-instructions',
21
+ 'Lonnqvist2024_inlab-no-instructions',
22
+ 'Lonnqvist2024_online-no-instructions'
23
+ ])
24
+ @pytest.mark.parametrize('identifier', [
25
+ 'Lonnqvist2024_test'
26
+ ])
27
+ @pytest.mark.parametrize('field', [
28
+ 'stimulus_id',
29
+ 'truth'
30
+ ])
31
+ def test_stimulus_set_assembly_alignment(self, assembly, identifier, field):
32
+ assembly = load_dataset(assembly)
33
+ assert assembly.stimulus_set is not None
34
+ assert assembly.stimulus_set.identifier == identifier
35
+ assert set(assembly.stimulus_set[field]) == set(assembly[field].values)
36
+
37
+ # test the number of subjects
38
+ @pytest.mark.parametrize('identifier, num_subjects', [
39
+ ('Lonnqvist2024_inlab-instructions', 10),
40
+ ('Lonnqvist2024_inlab-no-instructions', 10),
41
+ ('Lonnqvist2024_online-no-instructions', 92),
42
+ ])
43
+ def test_num_subjects(self, identifier, num_subjects):
44
+ assembly = load_dataset(identifier)
45
+ assert len(np.unique(assembly['subject'].values)) == num_subjects
46
+
47
+ # test number of unique images
48
+ @pytest.mark.parametrize('identifier, num_unique_images', [
49
+ ('Lonnqvist2024_inlab-instructions', 380),
50
+ ('Lonnqvist2024_inlab-no-instructions', 380),
51
+ ('Lonnqvist2024_online-no-instructions', 380),
52
+ ])
53
+ def test_num_unique_images(self, identifier, num_unique_images):
54
+ assembly = load_dataset(identifier)
55
+ assert len(np.unique(assembly['stimulus_id'].values)) == num_unique_images
56
+
57
+ # tests assembly dim for ALL datasets
58
+ @pytest.mark.parametrize('identifier, length', [
59
+ ('Lonnqvist2024_inlab-instructions', 3800),
60
+ ('Lonnqvist2024_inlab-no-instructions', 3800),
61
+ ('Lonnqvist2024_online-no-instructions', 34960),
62
+ ])
63
+ def test_length(self, identifier, length):
64
+ assembly = load_dataset(identifier)
65
+ assert len(assembly['presentation']) == length
66
+
67
+ # test assembly coords present in ALL 17 sets:
68
+ @pytest.mark.parametrize('identifier', [
69
+ 'Lonnqvist2024_inlab-instructions',
70
+ 'Lonnqvist2024_inlab-no-instructions',
71
+ 'Lonnqvist2024_online-no-instructions'
72
+ ])
73
+ @pytest.mark.parametrize('field', [
74
+ 'subject',
75
+ 'visual_degrees',
76
+ 'image_duration',
77
+ 'is_correct',
78
+ 'subject_answer',
79
+ 'curve_length',
80
+ 'n_cross',
81
+ 'image_path',
82
+ 'stimulus_id',
83
+ 'truth',
84
+ 'image_label'
85
+ ])
86
+ def test_fields_present(self, identifier, field):
87
+ assembly = load_dataset(identifier)
88
+ assert hasattr(assembly, field)
89
+
90
+
91
+ @pytest.mark.private_access
92
+ @pytest.mark.slow
93
+ class TestStimulusSets:
94
+ # test stimulus_set data:
95
+ @pytest.mark.parametrize('identifier', [
96
+ 'Lonnqvist2024_train',
97
+ 'Lonnqvist2024_test',
98
+ ])
99
+ def test_stimulus_set_exists(self, identifier):
100
+ stimulus_set = load_stimulus_set(identifier)
101
+ assert stimulus_set is not None
102
+ assert stimulus_set.identifier == identifier
103
+
104
+ @pytest.mark.parametrize('identifier, num_images', [
105
+ ('Lonnqvist2024_train', 185),
106
+ ('Lonnqvist2024_test', 380),
107
+ ])
108
+ def test_number_of_images(self, identifier, num_images):
109
+ stimulus_set = load_stimulus_set(identifier)
110
+ assert len(np.unique(stimulus_set['stimulus_id'].values)) == num_images
111
+
112
+ # test assembly coords present in ALL 17 sets:
113
+ @pytest.mark.parametrize('identifier', [
114
+ 'Lonnqvist2024_train',
115
+ 'Lonnqvist2024_test',
116
+ ])
117
+ @pytest.mark.parametrize('field', [
118
+ 'curve_length',
119
+ 'n_cross',
120
+ 'image_path',
121
+ 'stimulus_id',
122
+ 'truth',
123
+ 'image_label'
124
+ ])
125
+ def test_fields_present(self, identifier, field):
126
+ stimulus_set = load_stimulus_set(identifier)
127
+ assert hasattr(stimulus_set, field)
@@ -1,3 +1,8 @@
1
+ import json
2
+ from pathlib import Path
3
+ import logging
4
+ from brainscore_vision.utils import fullname
5
+ from brainscore_core.plugin_management import import_plugin
1
6
  from brainscore_vision import load_benchmark
2
7
  from brainscore_vision.model_helpers.brain_transformation.temporal import TemporalAligned
3
8
  from brainscore_vision.model_interface import BrainModel
@@ -22,6 +27,7 @@ class ModelCommitment(BrainModel):
22
27
  def __init__(self, identifier,
23
28
  activations_model, layers, behavioral_readout_layer=None, region_layer_map=None,
24
29
  visual_degrees=8):
30
+ self._logger = logging.getLogger(fullname(self))
25
31
  self.layers = layers
26
32
  self.activations_model = activations_model
27
33
  # We set the visual degrees of the ActivationsExtractorHelper here to avoid changing its signature.
@@ -30,12 +36,18 @@ class ModelCommitment(BrainModel):
30
36
  self.activations_model._extractor.set_visual_degrees(visual_degrees) # for microsaccades
31
37
  self._visual_degrees = visual_degrees
32
38
  # region-layer mapping
39
+
40
+ # Attempt to load region_layer_map from JSON, if available
41
+ region_layer_map = self.load_region_layer_map_json(identifier) if region_layer_map is None else region_layer_map
42
+
43
+ # If region_layer_map is unavailable
33
44
  if region_layer_map is None:
34
45
  layer_selection = LayerSelection(model_identifier=identifier,
35
46
  activations_model=activations_model, layers=layers,
36
47
  visual_degrees=visual_degrees)
37
48
  region_layer_map = RegionLayerMap(layer_selection=layer_selection,
38
49
  region_benchmarks=STANDARD_REGION_BENCHMARKS)
50
+
39
51
  # neural
40
52
  layer_model = LayerMappedModel(identifier=identifier, activations_model=activations_model,
41
53
  region_layer_map=region_layer_map)
@@ -52,6 +64,27 @@ class ModelCommitment(BrainModel):
52
64
  })
53
65
  self.do_behavior = False
54
66
 
67
+ def load_region_layer_map_json(self, identifier):
68
+ '''
69
+ Attempts to load the region_layer_map from a JSON file in the model's directory
70
+ If file exists, load JSON. Otherwise, return None and proceed with legacy layer mapping
71
+ '''
72
+ try:
73
+ importer = import_plugin.ImportPlugin(library_root='brainscore_vision', plugin_type='models', identifier=identifier)
74
+ model_dir = importer.locate_plugin()
75
+ project_root = Path(__file__).resolve().parent.parent
76
+ region_layer_map_path = project_root / 'vision' / 'models' / model_dir / 'region_layer_map' / f'{identifier}.json'
77
+ if region_layer_map_path.exists():
78
+ with region_layer_map_path.open('r') as region_layer_map_file:
79
+ self._logger.info(f"Successfully loaded region_layer_map for {identifier}")
80
+ return json.load(region_layer_map_file)
81
+ else:
82
+ self._logger.info(f"No region_layer_map file found for {identifier}, proceeding with default layer mapping")
83
+ return None
84
+ except Exception as e:
85
+ self._logger.error(f"Error importing model to search for region_layer_map: {e}")
86
+ return None
87
+
55
88
  def visual_degrees(self) -> int:
56
89
  return self._visual_degrees
57
90
 
@@ -0,0 +1 @@
1
+ {"IT": "features.12", "V4": "features.7", "V2": "features.7", "V1": "features.2"}
@@ -3,10 +3,10 @@
3
3
 
4
4
  from setuptools import setup, find_packages
5
5
 
6
- requirements = ["torchvision",
7
- "torch",
8
- "fire"
9
- ]
6
+ requirements = [ "torchvision",
7
+ "torch",
8
+ "fire"
9
+ ]
10
10
 
11
11
  setup(
12
12
  packages=find_packages(exclude=['tests']),
@@ -0,0 +1,7 @@
1
+ from brainscore_vision import model_registry
2
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
3
+ from .model import get_model, get_layers
4
+
5
+ model_registry['alexnet_random'] = lambda: ModelCommitment(identifier='alexnet_random',
6
+ activations_model=get_model('alexnet_random'),
7
+ layers=get_layers('alexnet_random'))
@@ -0,0 +1,46 @@
1
+ from brainscore_vision.model_helpers.check_submission import check_models
2
+ import functools
3
+ import os
4
+ import torchvision.models
5
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
6
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
7
+ from pathlib import Path
8
+ from brainscore_vision.model_helpers import download_weights
9
+ import torch
10
+
11
+ # This is an example implementation for submitting resnet-50 as a pytorch model
12
+
13
+ # Attention: It is important, that the wrapper identifier is unique per model!
14
+ # The results will otherwise be the same due to brain-scores internal result caching mechanism.
15
+ # Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model.
16
+ # If the model requires a GPU, contact the brain-score team directly.
17
+ from brainscore_vision.model_helpers.check_submission import check_models
18
+
19
+
20
+ def get_model_list():
21
+ return ['alexnet_random']
22
+
23
+
24
+ def get_model(name):
25
+ assert name == 'alexnet_random'
26
+ model = torchvision.models.alexnet(pretrained=False)
27
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
28
+ wrapper = PytorchWrapper(identifier='alexnet_random',
29
+ model=model,
30
+ preprocessing=preprocessing)
31
+ wrapper.image_size = 224
32
+ return wrapper
33
+
34
+
35
+ def get_layers(name):
36
+ assert name == 'alexnet_random'
37
+ return ['features.0','features.3', 'features.6', 'features.8', 'features.10', 'classifier.1',
38
+ 'classifier.4', 'classifier.6']
39
+
40
+
41
+ def get_bibtex(model_identifier):
42
+ return """xx"""
43
+
44
+
45
+ if __name__ == '__main__':
46
+ check_models.check_base_models(__name__)