brainscore-vision 2.1__py3-none-any.whl → 2.1.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (143) hide show
  1. brainscore_vision/benchmarks/coggan2024_behavior/__init__.py +2 -1
  2. brainscore_vision/benchmarks/coggan2024_behavior/test.py +2 -2
  3. brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py +4 -4
  4. brainscore_vision/benchmarks/coggan2024_fMRI/test.py +2 -2
  5. brainscore_vision/benchmarks/imagenet/imagenet2012.csv +50000 -50000
  6. brainscore_vision/benchmarks/imagenet_c/benchmark.py +1 -1
  7. brainscore_vision/benchmarks/lonnqvist2024/__init__.py +8 -0
  8. brainscore_vision/benchmarks/lonnqvist2024/benchmark.py +125 -0
  9. brainscore_vision/benchmarks/lonnqvist2024/test.py +61 -0
  10. brainscore_vision/benchmarks/malania2007/benchmark.py +3 -0
  11. brainscore_vision/benchmarks/maniquet2024/benchmark.py +1 -1
  12. brainscore_vision/data/lonnqvist2024/__init__.py +47 -0
  13. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_data_assembly.py +53 -0
  14. brainscore_vision/data/lonnqvist2024/data_packaging/lonnqvist_stimulus_set.py +61 -0
  15. brainscore_vision/data/lonnqvist2024/test.py +127 -0
  16. brainscore_vision/model_helpers/brain_transformation/__init__.py +33 -0
  17. brainscore_vision/models/alexnet/region_layer_map/alexnet.json +1 -0
  18. brainscore_vision/models/alexnet_7be5be79/setup.py +4 -4
  19. brainscore_vision/models/alexnet_random/__init__.py +7 -0
  20. brainscore_vision/models/alexnet_random/model.py +46 -0
  21. brainscore_vision/models/alexnet_random/setup.py +26 -0
  22. brainscore_vision/models/alexnet_random/test.py +1 -0
  23. brainscore_vision/models/cvt_cvt_13_224_in1k_4/__init__.py +9 -0
  24. brainscore_vision/models/cvt_cvt_13_224_in1k_4/model.py +142 -0
  25. brainscore_vision/models/cvt_cvt_13_224_in1k_4/region_layer_map/cvt_cvt-13-224-in1k_4.json +6 -0
  26. brainscore_vision/models/cvt_cvt_13_224_in1k_4/region_layer_map/cvt_cvt-13-224-in1k_4_LucyV4.json +6 -0
  27. brainscore_vision/models/cvt_cvt_13_224_in1k_4/requirements.txt +4 -0
  28. brainscore_vision/models/cvt_cvt_13_224_in1k_4/test.py +8 -0
  29. brainscore_vision/models/cvt_cvt_13_384_in1k_4/__init__.py +9 -0
  30. brainscore_vision/models/cvt_cvt_13_384_in1k_4/model.py +142 -0
  31. brainscore_vision/models/cvt_cvt_13_384_in1k_4/region_layer_map/cvt_cvt-13-384-in1k_4_LucyV4.json +6 -0
  32. brainscore_vision/models/cvt_cvt_13_384_in1k_4/requirements.txt +4 -0
  33. brainscore_vision/models/cvt_cvt_13_384_in1k_4/test.py +8 -0
  34. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/__init__.py +9 -0
  35. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/model.py +142 -0
  36. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/region_layer_map/cvt_cvt-13-384-in22k_finetuned-in1k_4_LucyV4.json +6 -0
  37. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
  38. brainscore_vision/models/cvt_cvt_13_384_in22k_finetuned_in1k_4/test.py +8 -0
  39. brainscore_vision/models/cvt_cvt_21_224_in1k_4/__init__.py +9 -0
  40. brainscore_vision/models/cvt_cvt_21_224_in1k_4/model.py +142 -0
  41. brainscore_vision/models/cvt_cvt_21_224_in1k_4/region_layer_map/cvt_cvt-21-224-in1k_4_LucyV4.json +6 -0
  42. brainscore_vision/models/cvt_cvt_21_224_in1k_4/requirements.txt +4 -0
  43. brainscore_vision/models/cvt_cvt_21_224_in1k_4/test.py +8 -0
  44. brainscore_vision/models/cvt_cvt_21_384_in1k_4/__init__.py +9 -0
  45. brainscore_vision/models/cvt_cvt_21_384_in1k_4/model.py +142 -0
  46. brainscore_vision/models/cvt_cvt_21_384_in1k_4/region_layer_map/cvt_cvt-21-384-in1k_4_LucyV4.json +6 -0
  47. brainscore_vision/models/cvt_cvt_21_384_in1k_4/requirements.txt +4 -0
  48. brainscore_vision/models/cvt_cvt_21_384_in1k_4/test.py +8 -0
  49. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/__init__.py +9 -0
  50. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/model.py +142 -0
  51. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/region_layer_map/cvt_cvt-21-384-in22k_finetuned-in1k_4_LucyV4.json +6 -0
  52. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/requirements.txt +4 -0
  53. brainscore_vision/models/cvt_cvt_21_384_in22k_finetuned_in1k_4/test.py +8 -0
  54. brainscore_vision/models/fixres_resnext101_32x48d_wsl/__init__.py +7 -0
  55. brainscore_vision/models/fixres_resnext101_32x48d_wsl/model.py +57 -0
  56. brainscore_vision/models/fixres_resnext101_32x48d_wsl/requirements.txt +5 -0
  57. brainscore_vision/models/fixres_resnext101_32x48d_wsl/test.py +7 -0
  58. brainscore_vision/models/inception_v4_pytorch/__init__.py +7 -0
  59. brainscore_vision/models/inception_v4_pytorch/model.py +64 -0
  60. brainscore_vision/models/inception_v4_pytorch/requirements.txt +3 -0
  61. brainscore_vision/models/inception_v4_pytorch/test.py +8 -0
  62. brainscore_vision/models/mvimgnet_ms_05/__init__.py +9 -0
  63. brainscore_vision/models/mvimgnet_ms_05/model.py +64 -0
  64. brainscore_vision/models/mvimgnet_ms_05/setup.py +25 -0
  65. brainscore_vision/models/mvimgnet_ms_05/test.py +1 -0
  66. brainscore_vision/models/mvimgnet_rf/__init__.py +9 -0
  67. brainscore_vision/models/mvimgnet_rf/model.py +64 -0
  68. brainscore_vision/models/mvimgnet_rf/setup.py +25 -0
  69. brainscore_vision/models/mvimgnet_rf/test.py +1 -0
  70. brainscore_vision/models/mvimgnet_ss_00/__init__.py +9 -0
  71. brainscore_vision/models/mvimgnet_ss_00/model.py +64 -0
  72. brainscore_vision/models/mvimgnet_ss_00/setup.py +25 -0
  73. brainscore_vision/models/mvimgnet_ss_00/test.py +1 -0
  74. brainscore_vision/models/mvimgnet_ss_02/__init__.py +9 -0
  75. brainscore_vision/models/mvimgnet_ss_02/model.py +64 -0
  76. brainscore_vision/models/mvimgnet_ss_02/setup.py +25 -0
  77. brainscore_vision/models/mvimgnet_ss_02/test.py +1 -0
  78. brainscore_vision/models/mvimgnet_ss_03/__init__.py +9 -0
  79. brainscore_vision/models/mvimgnet_ss_03/model.py +64 -0
  80. brainscore_vision/models/mvimgnet_ss_03/setup.py +25 -0
  81. brainscore_vision/models/mvimgnet_ss_03/test.py +1 -0
  82. brainscore_vision/models/mvimgnet_ss_04/__init__.py +9 -0
  83. brainscore_vision/models/mvimgnet_ss_04/model.py +64 -0
  84. brainscore_vision/models/mvimgnet_ss_04/setup.py +25 -0
  85. brainscore_vision/models/mvimgnet_ss_04/test.py +1 -0
  86. brainscore_vision/models/mvimgnet_ss_05/__init__.py +9 -0
  87. brainscore_vision/models/mvimgnet_ss_05/model.py +64 -0
  88. brainscore_vision/models/mvimgnet_ss_05/setup.py +25 -0
  89. brainscore_vision/models/mvimgnet_ss_05/test.py +1 -0
  90. brainscore_vision/models/resnet50_tutorial/region_layer_map/resnet50_tutorial.json +1 -0
  91. brainscore_vision/models/sam_test_resnet/__init__.py +5 -0
  92. brainscore_vision/models/sam_test_resnet/model.py +26 -0
  93. brainscore_vision/models/sam_test_resnet/requirements.txt +2 -0
  94. brainscore_vision/models/sam_test_resnet/test.py +8 -0
  95. brainscore_vision/models/sam_test_resnet_4/__init__.py +5 -0
  96. brainscore_vision/models/sam_test_resnet_4/model.py +26 -0
  97. brainscore_vision/models/sam_test_resnet_4/requirements.txt +2 -0
  98. brainscore_vision/models/sam_test_resnet_4/test.py +8 -0
  99. brainscore_vision/models/scaling_models/__init__.py +265 -0
  100. brainscore_vision/models/scaling_models/model.py +148 -0
  101. brainscore_vision/models/scaling_models/model_configs.json +869 -0
  102. brainscore_vision/models/scaling_models/region_layer_map/convnext_base_imagenet_full_seed-0.json +6 -0
  103. brainscore_vision/models/scaling_models/region_layer_map/convnext_large_imagenet_full_seed-0.json +6 -0
  104. brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_100_seed-0.json +6 -0
  105. brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_10_seed-0.json +6 -0
  106. brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_1_seed-0.json +6 -0
  107. brainscore_vision/models/scaling_models/region_layer_map/convnext_small_imagenet_full_seed-0.json +6 -0
  108. brainscore_vision/models/scaling_models/region_layer_map/deit_base_imagenet_full_seed-0.json +6 -0
  109. brainscore_vision/models/scaling_models/region_layer_map/deit_large_imagenet_full_seed-0.json +6 -0
  110. brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_100_seed-0.json +6 -0
  111. brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_10_seed-0.json +6 -0
  112. brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_1_seed-0.json +6 -0
  113. brainscore_vision/models/scaling_models/region_layer_map/deit_small_imagenet_full_seed-0.json +6 -0
  114. brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b0_imagenet_full.json +6 -0
  115. brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b1_imagenet_full.json +6 -0
  116. brainscore_vision/models/scaling_models/region_layer_map/efficientnet_b2_imagenet_full.json +6 -0
  117. brainscore_vision/models/scaling_models/region_layer_map/resnet101_ecoset_full.json +6 -0
  118. brainscore_vision/models/scaling_models/region_layer_map/resnet101_imagenet_full.json +6 -0
  119. brainscore_vision/models/scaling_models/region_layer_map/resnet152_ecoset_full.json +6 -0
  120. brainscore_vision/models/scaling_models/region_layer_map/resnet18_ecoset_full.json +6 -0
  121. brainscore_vision/models/scaling_models/region_layer_map/resnet18_imagenet_full.json +6 -0
  122. brainscore_vision/models/scaling_models/region_layer_map/resnet34_ecoset_full.json +6 -0
  123. brainscore_vision/models/scaling_models/region_layer_map/resnet34_imagenet_full.json +6 -0
  124. brainscore_vision/models/scaling_models/region_layer_map/resnet50_ecoset_full.json +6 -0
  125. brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_100_seed-0.json +6 -0
  126. brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_10_seed-0.json +6 -0
  127. brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_1_seed-0.json +6 -0
  128. brainscore_vision/models/scaling_models/region_layer_map/resnet50_imagenet_full.json +6 -0
  129. brainscore_vision/models/scaling_models/requirements.txt +4 -0
  130. brainscore_vision/models/scaling_models/test.py +0 -0
  131. brainscore_vision/models/vitb14_dinov2_imagenet1k/__init__.py +5 -0
  132. brainscore_vision/models/vitb14_dinov2_imagenet1k/model.py +852 -0
  133. brainscore_vision/models/vitb14_dinov2_imagenet1k/setup.py +25 -0
  134. brainscore_vision/models/vitb14_dinov2_imagenet1k/test.py +0 -0
  135. brainscore_vision/models/voneresnet_50_non_stochastic/region_layer_map/voneresnet-50-non_stochastic.json +1 -0
  136. brainscore_vision/submission/actions_helpers.py +2 -2
  137. brainscore_vision/submission/endpoints.py +3 -4
  138. {brainscore_vision-2.1.dist-info → brainscore_vision-2.1.0.dist-info}/METADATA +2 -2
  139. {brainscore_vision-2.1.dist-info → brainscore_vision-2.1.0.dist-info}/RECORD +143 -18
  140. {brainscore_vision-2.1.dist-info → brainscore_vision-2.1.0.dist-info}/WHEEL +1 -1
  141. tests/test_model_helpers/temporal/activations/test_inferencer.py +2 -2
  142. {brainscore_vision-2.1.dist-info → brainscore_vision-2.1.0.dist-info}/LICENSE +0 -0
  143. {brainscore_vision-2.1.dist-info → brainscore_vision-2.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.4.0",
3
+ "V2": "features.5.7.block.0",
4
+ "V4": "features.4.0",
5
+ "IT": "features.5.11.block.0"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.4.1",
3
+ "V2": "features.5.7.block.0",
4
+ "V4": "features.4.1",
5
+ "IT": "features.5.11.block.0"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.4.0",
3
+ "V2": "features.5.9.block.0",
4
+ "V4": "features.5.2.block.0",
5
+ "IT": "features.5.17.block.0"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.5.9.block.0",
3
+ "V2": "features.5.17.block.0",
4
+ "V4": "features.5.17.block.0",
5
+ "IT": "features.5.17.block.0"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.5.2.block.0",
3
+ "V2": "features.5.9.block.0",
4
+ "V4": "features.5.17.block.0",
5
+ "IT": "features.5.17.block.0"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.5.2.block.0",
3
+ "V2": "features.5.9.block.0",
4
+ "V4": "features.4.0",
5
+ "IT": "features.5.9.block.0"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "blocks.3.mlp.fc1",
3
+ "V2": "blocks.3.mlp.act",
4
+ "V4": "blocks.3.mlp.fc1",
5
+ "IT": "blocks.9.norm2"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "blocks.4.norm1",
3
+ "V2": "blocks.9.norm1",
4
+ "V4": "blocks.9.norm1",
5
+ "IT": "blocks.20.norm2"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "blocks.9.norm2",
3
+ "V2": "blocks.9.norm2",
4
+ "V4": "blocks.9.norm2",
5
+ "IT": "blocks.9.norm2"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "blocks.2.norm1",
3
+ "V2": "blocks.2.norm1",
4
+ "V4": "blocks.2.norm1",
5
+ "IT": "blocks.2.norm1"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "blocks.5.norm1",
3
+ "V2": "blocks.2.norm1",
4
+ "V4": "blocks.6.norm2",
5
+ "IT": "blocks.6.norm2"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "blocks.2.norm1",
3
+ "V2": "blocks.6.norm2",
4
+ "V4": "blocks.6.norm2",
5
+ "IT": "blocks.9.norm2"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.4.0.block.1.0",
3
+ "V2": "features.4.1.block.3.1",
4
+ "V4": "features.4.0.block.1.0",
5
+ "IT": "features.6.0.block.3.0"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.4.0.block.0.1",
3
+ "V2": "features.4.0.block.1.0",
4
+ "V4": "features.4.0.block.1.0",
5
+ "IT": "features.6.0.block.3.0"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "features.4.0.block.3.0",
3
+ "V2": "features.4.0.block.3.0",
4
+ "V4": "features.4.0.block.3.0",
5
+ "IT": "features.6.0.block.3.0"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer3.4.relu",
3
+ "V2": "layer3.4.relu",
4
+ "V4": "layer3.0.bn3",
5
+ "IT": "layer4.0.relu"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer3.0.bn3",
3
+ "V2": "layer3.0.bn3",
4
+ "V4": "layer3.0.bn3",
5
+ "IT": "layer4.0.relu"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer3.3.bn3",
3
+ "V2": "layer3.3.bn3",
4
+ "V4": "layer3.0.bn3",
5
+ "IT": "layer4.0.relu"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer1.0.conv1",
3
+ "V2": "layer3.0.conv1",
4
+ "V4": "layer3.0.conv1",
5
+ "IT": "layer4.0.bn1"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer1.0.bn1",
3
+ "V2": "layer2.0.bn2",
4
+ "V4": "layer3.0.conv2",
5
+ "IT": "layer4.0.bn1"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer1.0.bn1",
3
+ "V2": "layer3.1.conv1",
4
+ "V4": "layer3.0.conv1",
5
+ "IT": "layer4.0.conv1"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer1.0.bn1",
3
+ "V2": "layer3.1.conv1",
4
+ "V4": "layer3.1.conv1",
5
+ "IT": "layer4.0.bn1"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer1.0.bn1",
3
+ "V2": "layer4.0.conv2",
4
+ "V4": "layer3.0.conv1",
5
+ "IT": "layer4.0.conv2"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer3.0.conv1",
3
+ "V2": "layer3.5.bn3",
4
+ "V4": "layer3.0.conv1",
5
+ "IT": "layer4.0.relu"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer1.0.conv1",
3
+ "V2": "layer3.5.bn3",
4
+ "V4": "layer1.0.conv1",
5
+ "IT": "layer4.0.relu"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer3.0.conv1",
3
+ "V2": "layer3.5.bn3",
4
+ "V4": "layer3.0.conv1",
5
+ "IT": "layer4.0.relu"
6
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "layer1.0.conv1",
3
+ "V2": "layer3.5.bn3",
4
+ "V4": "layer3.0.conv1",
5
+ "IT": "layer4.0.relu"
6
+ }
@@ -0,0 +1,4 @@
1
+ torch
2
+ torchvision
3
+ albumentations
4
+ timm
File without changes
@@ -0,0 +1,5 @@
1
+ from brainscore_vision import model_registry
2
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
3
+ from .model import get_model, get_layers
4
+
5
+ model_registry['vitb14_dinov2_imagenet1k'] = lambda: ModelCommitment(identifier='vitb14_dinov2_imagenet1k', activations_model=get_model('vitb14_dinov2_imagenet1k'), layers=get_layers('vitb14_dinov2_imagenet1k'))