brainscore-vision 2.2.2__py3-none-any.whl → 2.2.4__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (213) hide show
  1. brainscore_vision/models/alexnet_less_variation_1/__init__.py +6 -0
  2. brainscore_vision/models/alexnet_less_variation_1/model.py +200 -0
  3. brainscore_vision/models/alexnet_less_variation_1/region_layer_map/alexnet_less_variation_iteration=1.json +6 -0
  4. brainscore_vision/models/alexnet_less_variation_1/setup.py +29 -0
  5. brainscore_vision/models/alexnet_less_variation_1/test.py +3 -0
  6. brainscore_vision/models/alexnet_less_variation_2/__init__.py +6 -0
  7. brainscore_vision/models/alexnet_less_variation_2/model.py +200 -0
  8. brainscore_vision/models/alexnet_less_variation_2/region_layer_map/alexnet_less_variation_iteration=2.json +6 -0
  9. brainscore_vision/models/alexnet_less_variation_2/setup.py +29 -0
  10. brainscore_vision/models/alexnet_less_variation_2/test.py +3 -0
  11. brainscore_vision/models/alexnet_less_variation_4/__init__.py +6 -0
  12. brainscore_vision/models/alexnet_less_variation_4/model.py +200 -0
  13. brainscore_vision/models/alexnet_less_variation_4/region_layer_map/alexnet_less_variation_iteration=4.json +6 -0
  14. brainscore_vision/models/alexnet_less_variation_4/setup.py +29 -0
  15. brainscore_vision/models/alexnet_less_variation_4/test.py +3 -0
  16. brainscore_vision/models/alexnet_no_specular_2/__init__.py +6 -0
  17. brainscore_vision/models/alexnet_no_specular_2/model.py +200 -0
  18. brainscore_vision/models/alexnet_no_specular_2/region_layer_map/alexnet_no_specular_iteration=2.json +6 -0
  19. brainscore_vision/models/alexnet_no_specular_2/setup.py +29 -0
  20. brainscore_vision/models/alexnet_no_specular_2/test.py +3 -0
  21. brainscore_vision/models/alexnet_no_specular_4/__init__.py +6 -0
  22. brainscore_vision/models/alexnet_no_specular_4/model.py +200 -0
  23. brainscore_vision/models/alexnet_no_specular_4/region_layer_map/alexnet_no_specular_iteration=4.json +6 -0
  24. brainscore_vision/models/alexnet_no_specular_4/setup.py +29 -0
  25. brainscore_vision/models/alexnet_no_specular_4/test.py +3 -0
  26. brainscore_vision/models/alexnet_no_variation_4/__init__.py +6 -0
  27. brainscore_vision/models/alexnet_no_variation_4/model.py +200 -0
  28. brainscore_vision/models/alexnet_no_variation_4/region_layer_map/alexnet_no_variation_iteration=4.json +6 -0
  29. brainscore_vision/models/alexnet_no_variation_4/setup.py +29 -0
  30. brainscore_vision/models/alexnet_no_variation_4/test.py +3 -0
  31. brainscore_vision/models/alexnet_original_3/__init__.py +6 -0
  32. brainscore_vision/models/alexnet_original_3/model.py +200 -0
  33. brainscore_vision/models/alexnet_original_3/region_layer_map/alexnet_original_iteration=3.json +6 -0
  34. brainscore_vision/models/alexnet_original_3/setup.py +29 -0
  35. brainscore_vision/models/alexnet_original_3/test.py +3 -0
  36. brainscore_vision/models/alexnet_wo_shading_4/__init__.py +6 -0
  37. brainscore_vision/models/alexnet_wo_shading_4/model.py +200 -0
  38. brainscore_vision/models/alexnet_wo_shading_4/region_layer_map/alexnet_wo_shading_iteration=4.json +6 -0
  39. brainscore_vision/models/alexnet_wo_shading_4/setup.py +29 -0
  40. brainscore_vision/models/alexnet_wo_shading_4/test.py +3 -0
  41. brainscore_vision/models/alexnet_wo_shadows_5/__init__.py +6 -0
  42. brainscore_vision/models/alexnet_wo_shadows_5/model.py +200 -0
  43. brainscore_vision/models/alexnet_wo_shadows_5/region_layer_map/alexnet_wo_shadows_iteration=5.json +6 -0
  44. brainscore_vision/models/alexnet_wo_shadows_5/setup.py +29 -0
  45. brainscore_vision/models/alexnet_wo_shadows_5/test.py +3 -0
  46. brainscore_vision/models/alexnet_z_axis_1/__init__.py +6 -0
  47. brainscore_vision/models/alexnet_z_axis_1/model.py +200 -0
  48. brainscore_vision/models/alexnet_z_axis_1/region_layer_map/alexnet_z_axis_iteration=1.json +6 -0
  49. brainscore_vision/models/alexnet_z_axis_1/setup.py +29 -0
  50. brainscore_vision/models/alexnet_z_axis_1/test.py +3 -0
  51. brainscore_vision/models/alexnet_z_axis_2/__init__.py +6 -0
  52. brainscore_vision/models/alexnet_z_axis_2/model.py +200 -0
  53. brainscore_vision/models/alexnet_z_axis_2/region_layer_map/alexnet_z_axis_iteration=2.json +6 -0
  54. brainscore_vision/models/alexnet_z_axis_2/setup.py +29 -0
  55. brainscore_vision/models/alexnet_z_axis_2/test.py +3 -0
  56. brainscore_vision/models/alexnet_z_axis_3/__init__.py +6 -0
  57. brainscore_vision/models/alexnet_z_axis_3/model.py +200 -0
  58. brainscore_vision/models/alexnet_z_axis_3/region_layer_map/alexnet_z_axis_iteration=3.json +6 -0
  59. brainscore_vision/models/alexnet_z_axis_3/setup.py +29 -0
  60. brainscore_vision/models/alexnet_z_axis_3/test.py +3 -0
  61. brainscore_vision/models/alexnet_z_axis_4/__init__.py +6 -0
  62. brainscore_vision/models/alexnet_z_axis_4/model.py +200 -0
  63. brainscore_vision/models/alexnet_z_axis_4/region_layer_map/alexnet_z_axis_iteration=4.json +6 -0
  64. brainscore_vision/models/alexnet_z_axis_4/setup.py +29 -0
  65. brainscore_vision/models/alexnet_z_axis_4/test.py +3 -0
  66. brainscore_vision/models/artResNet18_1/__init__.py +5 -0
  67. brainscore_vision/models/artResNet18_1/model.py +66 -0
  68. brainscore_vision/models/artResNet18_1/requirements.txt +4 -0
  69. brainscore_vision/models/artResNet18_1/test.py +12 -0
  70. brainscore_vision/models/barlow_twins_custom/__init__.py +5 -0
  71. brainscore_vision/models/barlow_twins_custom/model.py +58 -0
  72. brainscore_vision/models/barlow_twins_custom/requirements.txt +4 -0
  73. brainscore_vision/models/barlow_twins_custom/test.py +12 -0
  74. brainscore_vision/models/blt-vs/__init__.py +15 -0
  75. brainscore_vision/models/blt-vs/model.py +962 -0
  76. brainscore_vision/models/blt-vs/pretrained.py +219 -0
  77. brainscore_vision/models/blt-vs/region_layer_map/blt_vs.json +6 -0
  78. brainscore_vision/models/blt-vs/setup.py +22 -0
  79. brainscore_vision/models/blt-vs/test.py +0 -0
  80. brainscore_vision/models/cifar_resnet18_1/__init__.py +5 -0
  81. brainscore_vision/models/cifar_resnet18_1/model.py +68 -0
  82. brainscore_vision/models/cifar_resnet18_1/requirements.txt +4 -0
  83. brainscore_vision/models/cifar_resnet18_1/test.py +10 -0
  84. brainscore_vision/models/resnet18_random/__init__.py +5 -0
  85. brainscore_vision/models/resnet18_random/archive_name.zip +0 -0
  86. brainscore_vision/models/resnet18_random/model.py +42 -0
  87. brainscore_vision/models/resnet18_random/requirements.txt +2 -0
  88. brainscore_vision/models/resnet18_random/test.py +12 -0
  89. brainscore_vision/models/resnet50_less_variation_1/__init__.py +6 -0
  90. brainscore_vision/models/resnet50_less_variation_1/model.py +200 -0
  91. brainscore_vision/models/resnet50_less_variation_1/region_layer_map/resnet50_less_variation_iteration=1.json +6 -0
  92. brainscore_vision/models/resnet50_less_variation_1/setup.py +29 -0
  93. brainscore_vision/models/resnet50_less_variation_1/test.py +3 -0
  94. brainscore_vision/models/resnet50_less_variation_2/__init__.py +6 -0
  95. brainscore_vision/models/resnet50_less_variation_2/model.py +200 -0
  96. brainscore_vision/models/resnet50_less_variation_2/region_layer_map/resnet50_less_variation_iteration=2.json +6 -0
  97. brainscore_vision/models/resnet50_less_variation_2/setup.py +29 -0
  98. brainscore_vision/models/resnet50_less_variation_2/test.py +3 -0
  99. brainscore_vision/models/resnet50_less_variation_3/__init__.py +6 -0
  100. brainscore_vision/models/resnet50_less_variation_3/model.py +200 -0
  101. brainscore_vision/models/resnet50_less_variation_3/region_layer_map/resnet50_less_variation_iteration=3.json +6 -0
  102. brainscore_vision/models/resnet50_less_variation_3/setup.py +29 -0
  103. brainscore_vision/models/resnet50_less_variation_3/test.py +3 -0
  104. brainscore_vision/models/resnet50_less_variation_4/__init__.py +6 -0
  105. brainscore_vision/models/resnet50_less_variation_4/model.py +200 -0
  106. brainscore_vision/models/resnet50_less_variation_4/region_layer_map/resnet50_less_variation_iteration=4.json +6 -0
  107. brainscore_vision/models/resnet50_less_variation_4/setup.py +29 -0
  108. brainscore_vision/models/resnet50_less_variation_4/test.py +3 -0
  109. brainscore_vision/models/resnet50_less_variation_5/__init__.py +6 -0
  110. brainscore_vision/models/resnet50_less_variation_5/model.py +200 -0
  111. brainscore_vision/models/resnet50_less_variation_5/region_layer_map/resnet50_less_variation_iteration=5.json +6 -0
  112. brainscore_vision/models/resnet50_less_variation_5/setup.py +29 -0
  113. brainscore_vision/models/resnet50_less_variation_5/test.py +3 -0
  114. brainscore_vision/models/resnet50_no_variation_1/__init__.py +6 -0
  115. brainscore_vision/models/resnet50_no_variation_1/model.py +200 -0
  116. brainscore_vision/models/resnet50_no_variation_1/region_layer_map/resnet50_no_variation_iteration=1.json +6 -0
  117. brainscore_vision/models/resnet50_no_variation_1/setup.py +29 -0
  118. brainscore_vision/models/resnet50_no_variation_1/test.py +3 -0
  119. brainscore_vision/models/resnet50_no_variation_2/__init__.py +6 -0
  120. brainscore_vision/models/resnet50_no_variation_2/model.py +200 -0
  121. brainscore_vision/models/resnet50_no_variation_2/region_layer_map/resnet50_no_variation_iteration=2.json +6 -0
  122. brainscore_vision/models/resnet50_no_variation_2/setup.py +29 -0
  123. brainscore_vision/models/resnet50_no_variation_2/test.py +3 -0
  124. brainscore_vision/models/resnet50_no_variation_5/__init__.py +6 -0
  125. brainscore_vision/models/resnet50_no_variation_5/model.py +200 -0
  126. brainscore_vision/models/resnet50_no_variation_5/region_layer_map/resnet50_no_variation_iteration=5.json +6 -0
  127. brainscore_vision/models/resnet50_no_variation_5/setup.py +29 -0
  128. brainscore_vision/models/resnet50_no_variation_5/test.py +3 -0
  129. brainscore_vision/models/resnet50_original_1/__init__.py +6 -0
  130. brainscore_vision/models/resnet50_original_1/model.py +200 -0
  131. brainscore_vision/models/resnet50_original_1/region_layer_map/resnet50_original_iteration=1.json +6 -0
  132. brainscore_vision/models/resnet50_original_1/setup.py +29 -0
  133. brainscore_vision/models/resnet50_original_1/test.py +3 -0
  134. brainscore_vision/models/resnet50_original_2/__init__.py +6 -0
  135. brainscore_vision/models/resnet50_original_2/model.py +200 -0
  136. brainscore_vision/models/resnet50_original_2/region_layer_map/resnet50_original_iteration=2.json +6 -0
  137. brainscore_vision/models/resnet50_original_2/setup.py +29 -0
  138. brainscore_vision/models/resnet50_original_2/test.py +3 -0
  139. brainscore_vision/models/resnet50_original_5/__init__.py +6 -0
  140. brainscore_vision/models/resnet50_original_5/model.py +200 -0
  141. brainscore_vision/models/resnet50_original_5/region_layer_map/resnet50_original_iteration=5.json +6 -0
  142. brainscore_vision/models/resnet50_original_5/setup.py +29 -0
  143. brainscore_vision/models/resnet50_original_5/test.py +3 -0
  144. brainscore_vision/models/resnet50_textures_1/__init__.py +6 -0
  145. brainscore_vision/models/resnet50_textures_1/model.py +200 -0
  146. brainscore_vision/models/resnet50_textures_1/region_layer_map/resnet50_textures_iteration=1.json +6 -0
  147. brainscore_vision/models/resnet50_textures_1/setup.py +29 -0
  148. brainscore_vision/models/resnet50_textures_1/test.py +3 -0
  149. brainscore_vision/models/resnet50_textures_2/__init__.py +6 -0
  150. brainscore_vision/models/resnet50_textures_2/model.py +200 -0
  151. brainscore_vision/models/resnet50_textures_2/region_layer_map/resnet50_textures_iteration=2.json +6 -0
  152. brainscore_vision/models/resnet50_textures_2/setup.py +29 -0
  153. brainscore_vision/models/resnet50_textures_2/test.py +3 -0
  154. brainscore_vision/models/resnet50_textures_3/__init__.py +6 -0
  155. brainscore_vision/models/resnet50_textures_3/model.py +200 -0
  156. brainscore_vision/models/resnet50_textures_3/region_layer_map/resnet50_textures_iteration=3.json +6 -0
  157. brainscore_vision/models/resnet50_textures_3/setup.py +29 -0
  158. brainscore_vision/models/resnet50_textures_3/test.py +3 -0
  159. brainscore_vision/models/resnet50_textures_4/__init__.py +6 -0
  160. brainscore_vision/models/resnet50_textures_4/model.py +200 -0
  161. brainscore_vision/models/resnet50_textures_4/region_layer_map/resnet50_textures_iteration=4.json +6 -0
  162. brainscore_vision/models/resnet50_textures_4/setup.py +29 -0
  163. brainscore_vision/models/resnet50_textures_4/test.py +3 -0
  164. brainscore_vision/models/resnet50_textures_5/__init__.py +6 -0
  165. brainscore_vision/models/resnet50_textures_5/model.py +200 -0
  166. brainscore_vision/models/resnet50_textures_5/region_layer_map/resnet50_textures_iteration=5.json +6 -0
  167. brainscore_vision/models/resnet50_textures_5/setup.py +29 -0
  168. brainscore_vision/models/resnet50_textures_5/test.py +3 -0
  169. brainscore_vision/models/resnet50_wo_shading_1/__init__.py +6 -0
  170. brainscore_vision/models/resnet50_wo_shading_1/model.py +200 -0
  171. brainscore_vision/models/resnet50_wo_shading_1/region_layer_map/resnet50_wo_shading_iteration=1.json +6 -0
  172. brainscore_vision/models/resnet50_wo_shading_1/setup.py +29 -0
  173. brainscore_vision/models/resnet50_wo_shading_1/test.py +3 -0
  174. brainscore_vision/models/resnet50_wo_shading_3/__init__.py +6 -0
  175. brainscore_vision/models/resnet50_wo_shading_3/model.py +200 -0
  176. brainscore_vision/models/resnet50_wo_shading_3/region_layer_map/resnet50_wo_shading_iteration=3.json +6 -0
  177. brainscore_vision/models/resnet50_wo_shading_3/setup.py +29 -0
  178. brainscore_vision/models/resnet50_wo_shading_3/test.py +3 -0
  179. brainscore_vision/models/resnet50_wo_shading_4/__init__.py +6 -0
  180. brainscore_vision/models/resnet50_wo_shading_4/model.py +200 -0
  181. brainscore_vision/models/resnet50_wo_shading_4/region_layer_map/resnet50_wo_shading_iteration=4.json +6 -0
  182. brainscore_vision/models/resnet50_wo_shading_4/setup.py +29 -0
  183. brainscore_vision/models/resnet50_wo_shading_4/test.py +3 -0
  184. brainscore_vision/models/resnet50_wo_shadows_4/__init__.py +6 -0
  185. brainscore_vision/models/resnet50_wo_shadows_4/model.py +200 -0
  186. brainscore_vision/models/resnet50_wo_shadows_4/region_layer_map/resnet50_wo_shadows_iteration=4.json +6 -0
  187. brainscore_vision/models/resnet50_wo_shadows_4/setup.py +29 -0
  188. brainscore_vision/models/resnet50_wo_shadows_4/test.py +3 -0
  189. brainscore_vision/models/resnet50_z_axis_1/__init__.py +6 -0
  190. brainscore_vision/models/resnet50_z_axis_1/model.py +200 -0
  191. brainscore_vision/models/resnet50_z_axis_1/region_layer_map/resnet50_z_axis_iteration=1.json +6 -0
  192. brainscore_vision/models/resnet50_z_axis_1/setup.py +29 -0
  193. brainscore_vision/models/resnet50_z_axis_1/test.py +3 -0
  194. brainscore_vision/models/resnet50_z_axis_2/__init__.py +6 -0
  195. brainscore_vision/models/resnet50_z_axis_2/model.py +200 -0
  196. brainscore_vision/models/resnet50_z_axis_2/region_layer_map/resnet50_z_axis_iteration=2.json +6 -0
  197. brainscore_vision/models/resnet50_z_axis_2/setup.py +29 -0
  198. brainscore_vision/models/resnet50_z_axis_2/test.py +3 -0
  199. brainscore_vision/models/resnet50_z_axis_3/__init__.py +6 -0
  200. brainscore_vision/models/resnet50_z_axis_3/model.py +200 -0
  201. brainscore_vision/models/resnet50_z_axis_3/region_layer_map/resnet50_z_axis_iteration=3.json +6 -0
  202. brainscore_vision/models/resnet50_z_axis_3/setup.py +29 -0
  203. brainscore_vision/models/resnet50_z_axis_3/test.py +3 -0
  204. brainscore_vision/models/resnet50_z_axis_5/__init__.py +6 -0
  205. brainscore_vision/models/resnet50_z_axis_5/model.py +200 -0
  206. brainscore_vision/models/resnet50_z_axis_5/region_layer_map/resnet50_z_axis_iteration=5.json +6 -0
  207. brainscore_vision/models/resnet50_z_axis_5/setup.py +29 -0
  208. brainscore_vision/models/resnet50_z_axis_5/test.py +3 -0
  209. {brainscore_vision-2.2.2.dist-info → brainscore_vision-2.2.4.dist-info}/METADATA +1 -1
  210. {brainscore_vision-2.2.2.dist-info → brainscore_vision-2.2.4.dist-info}/RECORD +213 -5
  211. {brainscore_vision-2.2.2.dist-info → brainscore_vision-2.2.4.dist-info}/LICENSE +0 -0
  212. {brainscore_vision-2.2.2.dist-info → brainscore_vision-2.2.4.dist-info}/WHEEL +0 -0
  213. {brainscore_vision-2.2.2.dist-info → brainscore_vision-2.2.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,219 @@
1
+
2
+ import hashlib
3
+ import requests
4
+ from pathlib import Path
5
+ import zipfile
6
+ from collections import OrderedDict
7
+ from warnings import warn
8
+ import json
9
+ import torch
10
+ import zipfile
11
+ _MODELS = {}
12
+ _ALIASES = {}
13
+
14
+
15
+ def get_file(fname, origin, file_hash=None, cache_dir=".cache", cache_subdir="datasets", extract=True):
16
+ """
17
+ Download a file from a URL, cache it locally, and optionally verify its hash and extract it.
18
+
19
+ Args:
20
+ fname (str): The name of the file to save locally.
21
+ origin (str): The URL to download the file from.
22
+ file_hash (str, optional): The expected hash of the file to verify integrity. Defaults to None.
23
+ cache_dir (str): The root cache directory. Defaults to ".cache".
24
+ cache_subdir (str): The subdirectory within the cache directory. Defaults to "datasets".
25
+ extract (bool): Whether to extract the file if it's a ZIP archive. Defaults to False.
26
+
27
+ Returns:
28
+ str: The path to the cached (and optionally extracted) file.
29
+ """
30
+ cache_path = Path(cache_dir) / cache_subdir
31
+ cache_path.mkdir(parents=True, exist_ok=True)
32
+
33
+ file_path = cache_path / fname
34
+
35
+ if not file_path.exists():
36
+ print(f"Downloading {origin} to {file_path}...")
37
+ response = requests.get(origin, stream=True)
38
+ response.raise_for_status()
39
+ with open(file_path, "wb") as f:
40
+ for chunk in response.iter_content(chunk_size=8192):
41
+ f.write(chunk)
42
+ print(f"Download complete: {file_path}")
43
+
44
+ if file_hash:
45
+ print("Verifying file hash...")
46
+ sha256 = hashlib.sha256()
47
+ with open(file_path, "rb") as f:
48
+ for chunk in iter(lambda: f.read(4096), b""):
49
+ sha256.update(chunk)
50
+ downloaded_file_hash = sha256.hexdigest()
51
+ if downloaded_file_hash != file_hash:
52
+ raise ValueError(f"File hash does not match! Expected {file_hash}, got {downloaded_file_hash}")
53
+ print("File hash verified.")
54
+
55
+ if extract and zipfile.is_zipfile(file_path):
56
+ extract_path = cache_path
57
+ json_file = extract_path / f"{fname.replace('.zip', '')}.json"
58
+ weight_file = extract_path / f"{fname.replace('.zip', '')}.pth"
59
+ if not json_file.exists() and not weight_file.exists():
60
+ print(f"Extracting {file_path} to {extract_path}")
61
+ with zipfile.ZipFile(file_path, "r") as zip_ref:
62
+ zip_ref.extractall(extract_path)
63
+ print(f"Extraction complete: {extract_path}")
64
+
65
+ return str(extract_path)
66
+
67
+ return str(file_path)
68
+
69
+
70
+ def clear_models_and_aliases(*cls):
71
+ if len(cls) == 0:
72
+ _MODELS.clear()
73
+ _ALIASES.clear()
74
+ else:
75
+ for c in cls:
76
+ if c in _MODELS:
77
+ del _MODELS[c]
78
+ if c in _ALIASES:
79
+ del _ALIASES[c]
80
+
81
+ def register_model(cls, key, url, hash):
82
+ # key must be a valid file/folder name in the file system
83
+ models = _MODELS.setdefault(cls, OrderedDict())
84
+ key not in models or warn(
85
+ "re-registering model '{}' (was already registered for '{}')".format(
86
+ key, cls.__name__
87
+ )
88
+ )
89
+ models[key] = dict(url=url, hash=hash)
90
+
91
+
92
+ def register_aliases(cls, key, *names):
93
+ # aliases can be arbitrary strings
94
+ if len(names) == 0:
95
+ return
96
+ models = _MODELS.get(cls, {})
97
+ key in models or ValueError(f"model '{key}' is not registered for '{cls.__name__}'")
98
+
99
+ aliases = _ALIASES.setdefault(cls, OrderedDict())
100
+ for name in names:
101
+ aliases.get(name, key) == key or warn(
102
+ "alias '{}' was previously registered with model '{}' for '{}'".format(
103
+ name, aliases[name], cls.__name__
104
+ )
105
+ )
106
+ aliases[name] = key
107
+
108
+
109
+ def get_registered_models(cls, return_aliases=True, verbose=False):
110
+ models = _MODELS.get(cls, {})
111
+ aliases = _ALIASES.get(cls, {})
112
+ model_keys = tuple(models.keys())
113
+ model_aliases = {
114
+ key: tuple(name for name in aliases if aliases[name] == key) for key in models
115
+ }
116
+ if verbose:
117
+ # this code is very messy and should be refactored...
118
+ _n = len(models)
119
+ _str_model = "model" if _n == 1 else "models"
120
+ _str_is_are = "is" if _n == 1 else "are"
121
+ _str_colon = ":" if _n > 0 else ""
122
+ print(
123
+ "There {is_are} {n} registered {model_s} for '{clazz}'{c}".format(
124
+ n=_n,
125
+ clazz=cls.__name__,
126
+ is_are=_str_is_are,
127
+ model_s=_str_model,
128
+ c=_str_colon,
129
+ )
130
+ )
131
+ if _n > 0:
132
+ print()
133
+ _maxkeylen = 2 + max(len(key) for key in models)
134
+ print("Name{s}Alias(es)".format(s=" " * (_maxkeylen - 4 + 3)))
135
+ print("────{s}─────────".format(s=" " * (_maxkeylen - 4 + 3)))
136
+ for key in models:
137
+ _aliases = " "
138
+ _m = len(model_aliases[key])
139
+ if _m > 0:
140
+ _aliases += "'%s'" % "', '".join(model_aliases[key])
141
+ else:
142
+ _aliases += "None"
143
+ _key = ("{s:%d}" % _maxkeylen).format(s="'%s'" % key)
144
+ print(f"{_key}{_aliases}")
145
+ return (model_keys, model_aliases) if return_aliases else model_keys
146
+
147
+
148
+ def get_model_details(cls, key_or_alias, verbose=True):
149
+ models = _MODELS.get(cls, {})
150
+
151
+ if key_or_alias in models:
152
+ key = key_or_alias
153
+ alias = None
154
+ else:
155
+ aliases = _ALIASES.get(cls, {})
156
+ alias = key_or_alias
157
+ alias in aliases or ValueError(f"'{alias}' is neither a key or alias for '{cls.__name__}'")
158
+ key = aliases[alias]
159
+ if verbose:
160
+ print(
161
+ "Found model '{model}'{alias_str} for '{clazz}'.".format(
162
+ model=key,
163
+ clazz=cls.__name__,
164
+ alias_str=("" if alias is None else " with alias '%s'" % alias),
165
+ )
166
+ )
167
+ return key, alias, models[key]
168
+
169
+
170
+
171
+ def get_model_folder(cls, key_or_alias):
172
+ key, alias, m = get_model_details(cls, key_or_alias)
173
+ target = Path("models") / cls.__name__ / key
174
+ path = Path(
175
+ get_file(
176
+ fname=key + ".zip",
177
+ origin=m["url"],
178
+ file_hash=m["hash"],
179
+ cache_subdir=target,
180
+ extract=True,
181
+ )
182
+ )
183
+
184
+ assert path.exists() and path.parent.exists()
185
+ return path.parent
186
+
187
+
188
+
189
+ def get_model_instance(cls, key_or_alias):
190
+ path = get_model_folder(cls, key_or_alias)
191
+ json_file = path /key_or_alias /f"{key_or_alias}.json"
192
+ weight_file = path / key_or_alias/f"{key_or_alias}.pth"
193
+
194
+ if not json_file or not weight_file:
195
+ raise FileNotFoundError("Required .json or .pth file not found in the model folder.")
196
+
197
+ with open(json_file, "r") as f:
198
+ config = json.load(f)
199
+
200
+ timesteps = config.get("timesteps", 1)
201
+ hook_type = config.get("hook_type", None)
202
+ bio_unroll = config.get("bio_unroll", False)
203
+ num_classes = config.get("num_classes", 1)
204
+
205
+ model = cls(timesteps=timesteps, hook_type=hook_type, bio_unroll=bio_unroll, num_classes=num_classes)
206
+
207
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
208
+ state_dict = torch.load(weight_file, map_location=device)
209
+
210
+ filtered_state_dict = {
211
+ k: v if not (isinstance(v, torch.Tensor) and v.dtype != torch.float64) else v.float()
212
+ for k, v in state_dict.items()
213
+ if not any(x in k for x in ["total_ops", "total_params"])
214
+ }
215
+
216
+ model.load_state_dict(filtered_state_dict)
217
+
218
+
219
+ return model
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "V1_5",
3
+ "V2": "V3_5",
4
+ "V4": "V1_5",
5
+ "IT": "V4_5"
6
+ }
@@ -0,0 +1,22 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ requirements = [ "torchvision",
4
+ "torch"
5
+ ]
6
+
7
+ setup(
8
+ packages=find_packages(exclude=['tests']),
9
+ include_package_data=True,
10
+ install_requires=requirements,
11
+ license="MIT license",
12
+ zip_safe=False,
13
+ keywords='brain-score template',
14
+ classifiers=[
15
+ 'Development Status :: 2 - Pre-Alpha',
16
+ 'Intended Audience :: Developers',
17
+ 'License :: OSI Approved :: MIT License',
18
+ 'Natural Language :: English',
19
+ 'Programming Language :: Python :: 3.7',
20
+ ],
21
+ test_suite='tests',
22
+ )
File without changes
@@ -0,0 +1,5 @@
1
+ from brainscore_vision import model_registry
2
+ from .model import get_model
3
+
4
+ # Register the Barlow Twins model with custom weights
5
+ model_registry['cifar_resnet18_1'] = lambda: get_model('cifar_resnet18_1')
@@ -0,0 +1,68 @@
1
+ import torch
2
+ from pathlib import Path
3
+ from torchvision.models import resnet18
4
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
5
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
6
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
7
+ from collections import OrderedDict
8
+ from urllib.request import urlretrieve
9
+ import functools
10
+ import os
11
+
12
+
13
+ # Custom model loader
14
+ def get_model(name):
15
+ assert name == 'cifar_resnet18_1'
16
+ url = " https://www.dropbox.com/scl/fi/maqzcf3j87m7tp4sm1pab/barlow-cifar10-otu5cw89-ep-999.ckpt?rlkey=ou425fqbxxy6pe9lc4mz400mp&st=va93bqox&dl=1"
17
+ fh, _ = urlretrieve(url)
18
+ print(f"Downloaded weights file: {fh}, Size: {os.path.getsize(fh)} bytes")
19
+
20
+ checkpoint = torch.load(fh, map_location="cpu")
21
+ state_dict = checkpoint['state_dict'] # Adjust key if necessary
22
+ # Filter out projector layers
23
+ backbone_state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items() if not k.startswith("projector.")}
24
+ # Initialize ResNet18 backbone
25
+
26
+ model = resnet18(pretrained=False)
27
+ model.conv1 = torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
28
+ # print("First conv layer weights AFTER loading:")
29
+ # print(model.conv1.weight[0, 0, 0])
30
+ model.load_state_dict(backbone_state_dict, strict=False)
31
+ # print(f"Missing keys: {missing_keys}")
32
+ # print(f"Unexpected keys: {unexpected_keys}")
33
+ # print("First conv layer weights AFTER loading:")
34
+ # print(model.conv1.weight[0, 0, 0])
35
+ # print(model)
36
+
37
+
38
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
39
+
40
+ activations_model = PytorchWrapper(identifier='cifar_resnet18_1', model=model, preprocessing=preprocessing)
41
+
42
+
43
+ return ModelCommitment(
44
+ identifier='cifar_resnet18_1',
45
+ activations_model=activations_model,
46
+ layers=['layer1', 'layer2', 'layer3', 'layer4', 'avgpool']
47
+ )
48
+
49
+ def get_model_list():
50
+ return ['cifar_resnet18_1']
51
+
52
+ # Specify layers to test
53
+ def get_layers(name):
54
+ assert name == 'cifar_resnet18_1'
55
+ return ['layer1', 'layer2', 'layer3', 'layer4', 'avgpool']
56
+
57
+ def get_bibtex(model_identifier):
58
+ return """
59
+ @misc{resnet18_test_consistency,
60
+ title={ArtResNet18 Barlow Twins},
61
+ author={Claudia Noche},
62
+ year={2024},
63
+ }
64
+ """
65
+
66
+ if __name__ == '__main__':
67
+ from brainscore_vision.model_helpers.check_submission import check_models
68
+ check_models.check_base_models(__name__)
@@ -0,0 +1,4 @@
1
+ torch
2
+ torchvision
3
+ requests
4
+ pathlib
@@ -0,0 +1,10 @@
1
+ import pytest
2
+ import brainscore_vision
3
+
4
+ @pytest.mark.travis_slow
5
+ def test_cifar_resnet18_1():
6
+ model = brainscore_vision.load_model('cifar_resnet18_1')
7
+ assert model.identifier == 'cifar_resnet18_1'
8
+
9
+
10
+
@@ -0,0 +1,5 @@
1
+ from brainscore_vision import model_registry
2
+ from .model import get_model
3
+
4
+ # Register the model with the identifier 'resnet18_random'
5
+ model_registry['resnet18_random'] = lambda: get_model('resnet18_random')
@@ -0,0 +1,42 @@
1
+ import torch
2
+ from torchvision.models import resnet18
3
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
4
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
5
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
6
+ import functools
7
+
8
+ # Define preprocessing (resize to 224x224 as required by ResNet)
9
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
10
+
11
+ # Define ResNet18 with random weights
12
+ def get_model(name):
13
+ assert name == 'resnet18_random'
14
+ # Load ResNet18 without pre-trained weights
15
+ model = resnet18(pretrained=False)
16
+ # Wrap the model with Brain-Score's PytorchWrapper
17
+ activations_model = PytorchWrapper(identifier='resnet18_random', model=model, preprocessing=preprocessing)
18
+ return ModelCommitment(
19
+ identifier='resnet18_random',
20
+ activations_model=activations_model,
21
+ # Specify layers for evaluation
22
+ layers=['layer1', 'layer2', 'layer3', 'layer4', 'avgpool']
23
+ )
24
+
25
+ # Specify layers to test
26
+ def get_layers(name):
27
+ assert name == 'resnet18_random'
28
+ return ['layer1', 'layer2', 'layer3', 'layer4', 'avgpool']
29
+
30
+ # Optional: Provide a BibTeX reference for the model
31
+ def get_bibtex(model_identifier):
32
+ return """
33
+ @misc{resnet18_test_consistency,
34
+ title={ResNet18 with Random Weights},
35
+ author={Clear Glue},
36
+ year={2024},
37
+ }
38
+ """
39
+
40
+ if __name__ == '__main__':
41
+ from brainscore_vision.model_helpers.check_submission import check_models
42
+ check_models.check_base_models(__name__)
@@ -0,0 +1,2 @@
1
+ torch
2
+ torchvision
@@ -0,0 +1,12 @@
1
+ import pytest
2
+ import brainscore_vision
3
+
4
+ @pytest.mark.travis_slow
5
+ def test_resnet18_random():
6
+ model = brainscore_vision.load_model('resnet18_random')
7
+ assert model.identifier == 'resnet18_random'
8
+
9
+
10
+
11
+ # AssertionError: No registrations found for resnet18_random
12
+ # ⚡ master ~/vision python -m brainscore_vision score --model_identifier='resnet50_tutorial' --benchmark_identifier='MajajHong2015public.IT-pls'
@@ -0,0 +1,6 @@
1
+
2
+ from brainscore_vision import model_registry
3
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
4
+ from .model import get_model, get_layers
5
+
6
+ model_registry['resnet50_less_variation_iteration=1'] = lambda: ModelCommitment(identifier='resnet50_less_variation_iteration=1', activations_model=get_model('resnet50_less_variation_iteration=1'), layers=get_layers('resnet50_less_variation_iteration=1'))
@@ -0,0 +1,200 @@
1
+
2
+ from brainscore_vision.model_helpers.check_submission import check_models
3
+ import functools
4
+ import numpy as np
5
+ import torch
6
+ from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper
7
+ from PIL import Image
8
+ from torch import nn
9
+ import pytorch_lightning as pl
10
+ import torchvision.models as models
11
+ import gdown
12
+ import glob
13
+ import os
14
+ from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images
15
+
16
+ def get_bibtex(model_identifier):
17
+ return 'VGG16'
18
+
19
+ def get_model_list():
20
+ return ['resnet50_less_variation_iteration=1']
21
+
22
+ def get_model(name):
23
+ keyword = 'less_variation'
24
+ iteration = 1
25
+ network = 'resnet50'
26
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_1.ckpt'
27
+ output = 'resnet50_less_variation_iteration=1.ckpt'
28
+ gdown.download(url, output)
29
+
30
+
31
+ if keyword != 'imagenet_trained' and keyword != 'no_training':
32
+ lx_whole = [f"resnet50_less_variation_iteration=1.ckpt"]
33
+ if len(lx_whole) > 1:
34
+ lx_whole = [lx_whole[-1]]
35
+ elif keyword == 'imagenet_trained' or keyword == 'no_training':
36
+ print('keyword is imagenet')
37
+ lx_whole = ['x']
38
+
39
+ for model_ckpt in lx_whole:
40
+ print(model_ckpt)
41
+ last_module_name = None
42
+ last_module = None
43
+ layers = []
44
+ if keyword == 'imagenet_trained' and network != 'clip':
45
+ model = torch.hub.load('pytorch/vision', network, pretrained=True)
46
+ for name, module in model.named_modules():
47
+ last_module_name = name
48
+ last_module = module
49
+ layers.append(name)
50
+ else:
51
+ model = torch.hub.load('pytorch/vision', network, pretrained=False)
52
+ if model_ckpt != 'x':
53
+ ckpt = torch.load(model_ckpt, map_location='cpu')
54
+ if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
55
+ ckpt2 = {}
56
+ for keys in ckpt['state_dict']:
57
+ print(keys)
58
+ print(ckpt['state_dict'][keys].shape)
59
+ print('---')
60
+ k2 = keys.split('model.')[1]
61
+ ckpt2[k2] = ckpt['state_dict'][keys]
62
+ model.load_state_dict(ckpt2)
63
+ if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
64
+ ckpt2 = {}
65
+ for keys in ckpt['state_dict']:
66
+ print(keys)
67
+ print(ckpt['state_dict'][keys].shape)
68
+ print('---')
69
+ k2 = keys.split('model.')[1]
70
+ ckpt2[k2] = ckpt['state_dict'][keys]
71
+ model.load_state_dict(ckpt2)
72
+ # Add more cases for other networks as needed
73
+ assert name == 'resnet50_less_variation_iteration=1'
74
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_1.ckpt'
75
+ output = 'resnet50_less_variation_iteration=1.ckpt'
76
+ gdown.download(url, output)
77
+ layers = []
78
+ for name, module in model._modules.items():
79
+ print(name, "->", module)
80
+ layers.append(name)
81
+
82
+ preprocessing = functools.partial(load_preprocess_images, image_size=224)
83
+ activations_model = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing)
84
+
85
+ return activations_model
86
+
87
+ def get_layers(name):
88
+ keyword = 'less_variation'
89
+ iteration = 1
90
+ network = 'resnet50'
91
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_1.ckpt'
92
+ output = 'resnet50_less_variation_iteration=1.ckpt'
93
+ gdown.download(url, output)
94
+
95
+
96
+ if keyword != 'imagenet_trained' and keyword != 'no_training':
97
+ lx_whole = [f"resnet50_less_variation_iteration=1.ckpt"]
98
+ if len(lx_whole) > 1:
99
+ lx_whole = [lx_whole[-1]]
100
+ elif keyword == 'imagenet_trained' or keyword == 'no_training':
101
+ print('keyword is imagenet')
102
+ lx_whole = ['x']
103
+
104
+
105
+ for model_ckpt in lx_whole:
106
+ print(model_ckpt)
107
+ last_module_name = None
108
+ last_module = None
109
+ if keyword == 'imagenet_trained' and network != 'clip':
110
+ model = torch.hub.load('pytorch/vision', network, pretrained=True)
111
+ for name, module in model.named_modules():
112
+ last_module_name = name
113
+ last_module = module
114
+ layers.append(name)
115
+ else:
116
+ model = torch.hub.load('pytorch/vision', network, pretrained=False)
117
+ if model_ckpt != 'x':
118
+ ckpt = torch.load(model_ckpt, map_location='cpu')
119
+ if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
120
+ ckpt2 = {}
121
+ for keys in ckpt['state_dict']:
122
+ print(keys)
123
+ print(ckpt['state_dict'][keys].shape)
124
+ print('---')
125
+ k2 = keys.split('model.')[1]
126
+ ckpt2[k2] = ckpt['state_dict'][keys]
127
+ model.load_state_dict(ckpt2)
128
+ if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
129
+ ckpt2 = {}
130
+ for keys in ckpt['state_dict']:
131
+ print(keys)
132
+ print(ckpt['state_dict'][keys].shape)
133
+ print('---')
134
+ k2 = keys.split('model.')[1]
135
+ ckpt2[k2] = ckpt['state_dict'][keys]
136
+ model.load_state_dict(ckpt2)
137
+ # Add more cases for other networks as needed
138
+ layers = []
139
+ for name, module in model._modules.items():
140
+ print(name, "->", module)
141
+ layers.append(name)
142
+ return layers
143
+
144
+ if __name__ == '__main__':
145
+ device = "cpu"
146
+ global model
147
+ global keyword
148
+ global network
149
+ global iteration
150
+ keyword = 'less_variation'
151
+ iteration = 1
152
+ network = 'resnet50'
153
+ url = 'https://eggerbernhard.ch/shreya/latest_resnet50/less_variation_1.ckpt'
154
+ output = 'resnet50_less_variation_iteration=1.ckpt'
155
+ gdown.download(url, output)
156
+
157
+
158
+ if keyword != 'imagenet_trained' and keyword != 'no_training':
159
+ lx_whole = [f"resnet50_less_variation_iteration=1.ckpt"]
160
+ if len(lx_whole) > 1:
161
+ lx_whole = [lx_whole[-1]]
162
+ elif keyword == 'imagenet_trained' or keyword == 'no_training':
163
+ print('keyword is imagenet')
164
+ lx_whole = ['x']
165
+
166
+ for model_ckpt in lx_whole:
167
+ print(model_ckpt)
168
+ last_module_name = None
169
+ last_module = None
170
+ layers = []
171
+ if keyword == 'imagenet_trained' and network != 'clip':
172
+ model = torch.hub.load('pytorch/vision', network, pretrained=True)
173
+ for name, module in model.named_modules():
174
+ last_module_name = name
175
+ last_module = module
176
+ layers.append(name)
177
+ else:
178
+ model = torch.hub.load('pytorch/vision', network, pretrained=False)
179
+ if model_ckpt != 'x':
180
+ ckpt = torch.load(model_ckpt, map_location='cpu')
181
+ if model_ckpt != 'x' and network == 'alexnet' and keyword != 'imagenet_trained':
182
+ ckpt2 = {}
183
+ for keys in ckpt['state_dict']:
184
+ print(keys)
185
+ print(ckpt['state_dict'][keys].shape)
186
+ print('---')
187
+ k2 = keys.split('model.')[1]
188
+ ckpt2[k2] = ckpt['state_dict'][keys]
189
+ model.load_state_dict(ckpt2)
190
+ if model_ckpt != 'x' and network == 'vgg16' and keyword != 'imagenet_trained':
191
+ ckpt2 = {}
192
+ for keys in ckpt['state_dict']:
193
+ print(keys)
194
+ print(ckpt['state_dict'][keys].shape)
195
+ print('---')
196
+ k2 = keys.split('model.')[1]
197
+ ckpt2[k2] = ckpt['state_dict'][keys]
198
+ model.load_state_dict(ckpt2)
199
+ # Add more cases for other networks as needed
200
+ check_models.check_base_models(__name__)
@@ -0,0 +1,6 @@
1
+ {
2
+ "V1": "maxpool",
3
+ "V2": "layer1",
4
+ "V4": "maxpool",
5
+ "IT": "layer1"
6
+ }
@@ -0,0 +1,29 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ from setuptools import setup, find_packages
5
+
6
+ requirements = [
7
+ "torchvision",
8
+ "torch",
9
+ "gdown",
10
+ "pytorch_lightning",
11
+ "brainscore_vision"
12
+ ]
13
+
14
+ setup(
15
+ packages=find_packages(exclude=['tests']),
16
+ include_package_data=True,
17
+ install_requires=requirements,
18
+ license="MIT license",
19
+ zip_safe=False,
20
+ keywords='brain-score template',
21
+ classifiers=[
22
+ 'Development Status :: 2 - Pre-Alpha',
23
+ 'Intended Audience :: Developers',
24
+ 'License :: OSI Approved :: MIT License',
25
+ 'Natural Language :: English',
26
+ 'Programming Language :: Python :: 3.7',
27
+ ],
28
+ test_suite='tests',
29
+ )
@@ -0,0 +1,3 @@
1
+
2
+ import pytest
3
+
@@ -0,0 +1,6 @@
1
+
2
+ from brainscore_vision import model_registry
3
+ from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
4
+ from .model import get_model, get_layers
5
+
6
+ model_registry['resnet50_less_variation_iteration=2'] = lambda: ModelCommitment(identifier='resnet50_less_variation_iteration=2', activations_model=get_model('resnet50_less_variation_iteration=2'), layers=get_layers('resnet50_less_variation_iteration=2'))