siibra 0.4a76__py3-none-any.whl → 0.5a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

Files changed (34) hide show
  1. siibra/VERSION +1 -1
  2. siibra/commons.py +3 -2
  3. siibra/configuration/configuration.py +6 -2
  4. siibra/configuration/factory.py +48 -27
  5. siibra/explorer/__init__.py +1 -0
  6. siibra/explorer/url.py +162 -0
  7. siibra/explorer/util.py +65 -0
  8. siibra/features/anchor.py +36 -9
  9. siibra/features/connectivity/__init__.py +6 -2
  10. siibra/features/connectivity/functional_connectivity.py +21 -0
  11. siibra/features/connectivity/regional_connectivity.py +91 -86
  12. siibra/features/dataset/ebrains.py +1 -1
  13. siibra/features/feature.py +331 -35
  14. siibra/features/tabular/bigbrain_intensity_profile.py +5 -2
  15. siibra/features/tabular/cell_density_profile.py +3 -1
  16. siibra/features/tabular/cortical_profile.py +14 -10
  17. siibra/features/tabular/gene_expression.py +0 -2
  18. siibra/features/tabular/layerwise_bigbrain_intensities.py +3 -2
  19. siibra/features/tabular/receptor_density_profile.py +7 -1
  20. siibra/features/tabular/regional_timeseries_activity.py +81 -102
  21. siibra/features/tabular/tabular.py +21 -9
  22. siibra/livequeries/bigbrain.py +11 -22
  23. siibra/locations/__init__.py +65 -1
  24. siibra/locations/boundingbox.py +0 -16
  25. siibra/locations/location.py +13 -0
  26. siibra/locations/pointset.py +1 -3
  27. siibra/retrieval/cache.py +5 -3
  28. siibra/retrieval/datasets.py +27 -27
  29. siibra/volumes/neuroglancer.py +6 -9
  30. {siibra-0.4a76.dist-info → siibra-0.5a1.dist-info}/METADATA +1 -1
  31. {siibra-0.4a76.dist-info → siibra-0.5a1.dist-info}/RECORD +34 -31
  32. {siibra-0.4a76.dist-info → siibra-0.5a1.dist-info}/WHEEL +1 -1
  33. {siibra-0.4a76.dist-info → siibra-0.5a1.dist-info}/LICENSE +0 -0
  34. {siibra-0.4a76.dist-info → siibra-0.5a1.dist-info}/top_level.txt +0 -0
@@ -14,26 +14,26 @@
14
14
  # limitations under the License.
15
15
 
16
16
  from . import tabular
17
+ from ..feature import Compoundable
17
18
 
18
19
  from .. import anchor as _anchor
19
-
20
- from ...commons import logger, QUIET, siibra_tqdm
20
+ from ...commons import QUIET
21
21
  from ...locations import pointset
22
22
  from ...retrieval.repositories import RepositoryConnector
23
+ from ...retrieval.requests import HttpRequest
23
24
 
24
- from typing import Callable, Dict, List
25
+ from typing import Callable, List
25
26
  import pandas as pd
26
27
  import numpy as np
27
28
 
28
29
 
29
- class RegionalTimeseriesActivity(tabular.Tabular):
30
+ class RegionalTimeseriesActivity(tabular.Tabular, Compoundable):
30
31
  """
31
32
  Datasets that provide regional activity over time.
32
33
  """
33
34
 
34
- DESCRIPTION = (
35
- ""
36
- )
35
+ _filter_attrs = ["modality", "cohort", "subject"]
36
+ _compound_attrs = ["modality", "cohort"]
37
37
 
38
38
  def __init__(
39
39
  self,
@@ -42,103 +42,88 @@ class RegionalTimeseriesActivity(tabular.Tabular):
42
42
  regions: list,
43
43
  connector: RepositoryConnector,
44
44
  decode_func: Callable,
45
- files: Dict[str, str],
45
+ filename: str,
46
46
  anchor: _anchor.AnatomicalAnchor,
47
47
  timestep: str,
48
48
  description: str = "",
49
49
  datasets: list = [],
50
- paradigm: str = ""
50
+ subject: str = "average"
51
51
  ):
52
52
  """
53
53
  """
54
54
  tabular.Tabular.__init__(
55
55
  self,
56
56
  modality=modality,
57
- description=description or '\n'.join({ds.description for ds in datasets}),
57
+ description=description,
58
58
  anchor=anchor,
59
59
  datasets=datasets,
60
60
  data=None # lazy loading below
61
61
  )
62
62
  self.cohort = cohort.upper()
63
- self._connector = connector
64
- self._files = files
63
+ if isinstance(connector, str) and connector:
64
+ self._connector = HttpRequest(connector, decode_func)
65
+ else:
66
+ self._connector = connector
67
+ self._filename = filename
65
68
  self._decode_func = decode_func
66
69
  self.regions = regions
67
- self._tables = {}
68
- self.paradigm = paradigm
69
- self.timestep = timestep
70
+ self._table = None
71
+ self._subject = subject
72
+ val, unit = timestep.split(" ")
73
+ self.timestep = (float(val), unit)
70
74
 
71
75
  @property
72
- def subjects(self):
73
- """
74
- Returns the subject identifiers for which signal tables are available.
75
- """
76
- return list(self._files.keys())
76
+ def subject(self):
77
+ """Returns the subject identifiers for which the matrix represents."""
78
+ return self._subject
77
79
 
78
80
  @property
79
81
  def name(self):
80
- supername = super().name
81
- return f"{supername} with paradigm {self.paradigm}"
82
+ return f"{super().name} with cohort {self.cohort} - subject {self.subject}"
82
83
 
83
- def get_table(self, subject: str = None):
84
+ @property
85
+ def data(self) -> pd.DataFrame:
84
86
  """
85
- Returns a pandas dataframe where the column headers are regions and the
86
- indcies indicate disctrete timesteps.
87
-
88
- Parameters
89
- ----------
90
- subject: str, default: None
91
- Name of the subject (see RegionalTimeseriesActivity.subjects for available names).
92
- If None, the mean is taken in case of multiple available data tables.
93
- Returns
94
- -------
95
- pd.DataFrame
96
- A table with region names as the column and timesteps as indices.
87
+ Returns a table as a pandas dataframe where the index is a timeseries.
97
88
  """
98
- assert len(self) > 0
99
- if (subject is None) and (len(self) > 1):
100
- # multiple signal tables available, but no subject given - return mean table
101
- logger.info(
102
- f"No subject name supplied, returning mean signal table across {len(self)} subjects. "
103
- "You might alternatively specify an individual subject."
104
- )
105
- if "mean" not in self._tables:
106
- all_arrays = [
107
- self._connector.get(fname, decode_func=self._decode_func)
108
- for fname in siibra_tqdm(
109
- self._files.values(),
110
- total=len(self),
111
- desc=f"Averaging {len(self)} signal tables"
112
- )
113
- ]
114
- self._tables['mean'] = self._array_to_dataframe(np.stack(all_arrays).mean(0))
115
- return self._tables['mean'].copy()
116
- if subject is None:
117
- subject = next(iter(self._files.keys()))
118
- if subject not in self._files:
119
- raise ValueError(f"Subject name '{subject}' not known, use one of: {', '.join(self._files)}")
120
- if subject not in self._tables:
121
- self._tables[subject] = self._load_table(subject)
122
- return self._tables[subject].copy()
89
+ if self._table is None:
90
+ self._load_table()
91
+ return self._table.copy()
123
92
 
124
- def _load_table(self, subject: str):
93
+ def _load_table(self):
125
94
  """
126
95
  Extract the timeseries table.
127
96
  """
128
- assert subject in self.subjects
129
- array = self._connector.get(self._files[subject], decode_func=self._decode_func)
130
- return self._array_to_dataframe(array)
131
-
132
- def __len__(self):
133
- return len(self._files)
97
+ array = self._connector.get(self._filename, decode_func=self._decode_func)
98
+ if not isinstance(array, np.ndarray):
99
+ assert isinstance(array, pd.DataFrame)
100
+ array = array.to_numpy()
101
+ ncols = array.shape[1]
102
+ self._table = pd.DataFrame(
103
+ array,
104
+ index=pd.TimedeltaIndex(
105
+ np.arange(0, array.shape[0]) * self.timestep[0],
106
+ unit=self.timestep[1],
107
+ name="time"
108
+ )
109
+ )
110
+ parcellations = self.anchor.represented_parcellations()
111
+ assert len(parcellations) == 1
112
+ parc = next(iter(parcellations))
113
+ with QUIET:
114
+ columnmap = {
115
+ i: parc.get_region(regionname, allow_tuple=True)
116
+ for i, regionname in enumerate(self.regions)
117
+ }
118
+ if len(columnmap) == ncols:
119
+ remapper = {
120
+ label - min(columnmap.keys()): region
121
+ for label, region in columnmap.items()
122
+ }
123
+ self._table = self._table.rename(columns=remapper)
134
124
 
135
125
  def __str__(self):
136
- return "{} with paradigm {} for {} from {} cohort ({} signal tables)".format(
137
- self.modality, self.paradigm,
138
- "_".join(p.name for p in self.anchor.parcellations),
139
- self.cohort,
140
- len(self._files),
141
- )
126
+ return self.name
142
127
 
143
128
  def compute_centroids(self, space):
144
129
  """
@@ -172,31 +157,8 @@ class RegionalTimeseriesActivity(tabular.Tabular):
172
157
  )
173
158
  return result
174
159
 
175
- def _array_to_dataframe(self, array: np.ndarray) -> pd.DataFrame:
176
- """
177
- Convert a numpy array with the regional activity data to
178
- a DataFrame with regions as column headers and timesteps as indices.
179
- """
180
- df = pd.DataFrame(array)
181
- parcellations = self.anchor.represented_parcellations()
182
- assert len(parcellations) == 1
183
- parc = next(iter(parcellations))
184
- with QUIET:
185
- indexmap = {
186
- i: parc.get_region(regionname, allow_tuple=True)
187
- for i, regionname in enumerate(self.regions)
188
- }
189
- ncols = array.shape[1]
190
- if len(indexmap) == ncols:
191
- remapper = {
192
- label - min(indexmap.keys()): region
193
- for label, region in indexmap.items()
194
- }
195
- df = df.rename(columns=remapper)
196
- return df
197
-
198
160
  def plot(
199
- self, subject: str = None, regions: List[str] = None, *args,
161
+ self, regions: List[str] = None, *args,
200
162
  backend="matplotlib", **kwargs
201
163
  ):
202
164
  """
@@ -214,12 +176,12 @@ class RegionalTimeseriesActivity(tabular.Tabular):
214
176
  if regions is None:
215
177
  regions = self.regions
216
178
  indices = [self.regions.index(r) for r in regions]
217
- table = self.get_table(subject).iloc[:, indices]
179
+ table = self.data.iloc[:, indices]
218
180
  table.columns = [str(r) for r in table.columns]
219
181
  return table.mean().plot(kind="bar", *args, backend=backend, **kwargs)
220
182
 
221
183
  def plot_carpet(
222
- self, subject: str = None, regions: List[str] = None, *args,
184
+ self, regions: List[str] = None, *args,
223
185
  backend="plotly", **kwargs
224
186
  ):
225
187
  """
@@ -238,12 +200,17 @@ class RegionalTimeseriesActivity(tabular.Tabular):
238
200
  if regions is None:
239
201
  regions = self.regions
240
202
  indices = [self.regions.index(r) for r in regions]
241
- table = self.get_table(subject).iloc[:, indices]
203
+ table = self.data.iloc[:, indices].reset_index(drop=True)
242
204
  table.columns = [str(r) for r in table.columns]
205
+ kwargs["title"] = kwargs.get("title", f"{self.modality}" + f" for subject={self.subject}")
206
+ kwargs["labels"] = kwargs.get("labels", {
207
+ "xlabel": self.data.index.to_numpy(dtype='timedelta64[ms]')}
208
+ )
243
209
  from plotly.express import imshow
244
210
  return imshow(
211
+ *args,
245
212
  table.T,
246
- title=f"{self.modality}" + f" for subject={subject}" if subject else ""
213
+ **kwargs
247
214
  )
248
215
 
249
216
 
@@ -256,4 +223,16 @@ class RegionalBOLD(
256
223
  Blood-oxygen-level-dependent (BOLD) signals per region.
257
224
  """
258
225
 
259
- pass
226
+ _filter_attrs = RegionalTimeseriesActivity._filter_attrs + ["paradigm"]
227
+ _compound_attrs = RegionalTimeseriesActivity._compound_attrs + ["paradigm"]
228
+
229
+ def __init__(self, paradigm: str, **kwargs):
230
+ RegionalTimeseriesActivity.__init__(self, **kwargs)
231
+ self.paradigm = paradigm
232
+
233
+ # paradign is used to distinguish functional connectivity features from each other.
234
+ assert self.paradigm, "RegionalBOLD must have paradigm defined!"
235
+
236
+ @property
237
+ def name(self):
238
+ return f"{super().name}, paradigm {self.paradigm}"
@@ -74,6 +74,14 @@ class Tabular(feature.Feature):
74
74
  **kwargs
75
75
  takes Matplotlib.pyplot keyword arguments
76
76
  """
77
+ wrapwidth = kwargs.pop("textwrap") if "textwrap" in kwargs else 40
78
+ kwargs["title"] = kwargs.get(
79
+ "title",
80
+ "\n".join(wrap(
81
+ f"{self.modality} in {', '.join({_.name for _ in self.anchor.regions})}",
82
+ wrapwidth
83
+ ))
84
+ )
77
85
  kwargs["kind"] = kwargs.get("kind", "bar")
78
86
  kwargs["y"] = kwargs.get("y", self.data.columns[0])
79
87
  if backend == "matplotlib":
@@ -82,8 +90,6 @@ class Tabular(feature.Feature):
82
90
  except ImportError:
83
91
  commons.logger.error("matplotlib not available. Plotting of fingerprints disabled.")
84
92
  return None
85
-
86
- wrapwidth = kwargs.pop("textwrap") if "textwrap" in kwargs else 40
87
93
  # default kwargs
88
94
  if kwargs.get("error_y") is None:
89
95
  kwargs["yerr"] = kwargs.get("yerr", 'std' if 'std' in self.data.columns else None)
@@ -93,10 +99,6 @@ class Tabular(feature.Feature):
93
99
  "ylabel",
94
100
  f"{kwargs['y']}{yerr_label} {self.unit if hasattr(self, 'unit') else ''}"
95
101
  )
96
- kwargs["title"] = kwargs.get(
97
- "title",
98
- "\n".join(wrap(f"{self.modality} in {', '.join({_.name for _ in self.anchor.regions})}", wrapwidth))
99
- )
100
102
  kwargs["grid"] = kwargs.get("grid", True)
101
103
  kwargs["legend"] = kwargs.get("legend", False)
102
104
  ax = self.data.plot(*args, backend=backend, **kwargs)
@@ -105,14 +107,24 @@ class Tabular(feature.Feature):
105
107
  plt.tight_layout()
106
108
  return ax
107
109
  elif backend == "plotly":
110
+ kwargs["title"] = kwargs["title"].replace("\n", "<br>")
108
111
  kwargs["error_y"] = kwargs.get("error_y", 'std' if 'std' in self.data.columns else None)
109
112
  error_y_label = f" &plusmn; {kwargs.get('error_y')}" if kwargs.get('error_y') else ''
110
113
  kwargs["labels"] = {
111
- "index": kwargs.pop("xlabel", ""),
112
- "value": kwargs.pop("ylabel", f"{kwargs.get('y')}{error_y_label} {self.unit if hasattr(self, 'unit') else ''}")
114
+ "index": kwargs.pop("xlabel", None) or kwargs.pop("index", ""),
115
+ "value": kwargs.pop("ylabel", None) or kwargs.pop(
116
+ "value",
117
+ f"{kwargs.get('y')}{error_y_label} {self.unit if hasattr(self, 'unit') else ''}"
118
+ )
113
119
  }
114
120
  fig = self.data.plot(*args, backend=backend, **kwargs)
115
- fig.update_layout(yaxis_title=kwargs["labels"]['value'])
121
+ fig.update_layout(
122
+ yaxis_title=kwargs["labels"]['value'],
123
+ title=dict(
124
+ automargin=True, yref="container", xref="container",
125
+ pad=dict(t=40), xanchor="left", yanchor="top"
126
+ )
127
+ )
116
128
  return fig
117
129
  else:
118
130
  return self.data.plot(*args, backend=backend, **kwargs)
@@ -29,15 +29,10 @@ from os import path
29
29
 
30
30
  class WagstylProfileLoader:
31
31
 
32
- REPO = "https://github.com/kwagstyl/cortical_layers_tutorial"
33
- BRANCH = "main"
34
- PROFILES_FILE_LEFT = "https://data-proxy.ebrains.eu/api/v1/public/buckets/d-26d25994-634c-40af-b88f-2a36e8e1d508/profiles/profiles_left.txt"
35
- PROFILES_FILE_RIGHT = "https://data-proxy.ebrains.eu/api/v1/public/buckets/d-26d25994-634c-40af-b88f-2a36e8e1d508/profiles/profiles_right.txt"
32
+ REPO = "https://github.com/kwagstyl/cortical_layers_tutorial/raw/main"
33
+ PROFILES_FILE_LEFT = "data/profiles_left.npy"
36
34
  THICKNESSES_FILE_LEFT = "data/thicknesses_left.npy"
37
- THICKNESSES_FILE_RIGHT = ""
38
- MESH_FILE_LEFT = "gray_left_327680.surf.gii"
39
- MESH_FILE_RIGHT = "gray_right_327680.surf.gii"
40
- BASEURL = "https://ftp.bigbrainproject.org/bigbrain-ftp/BigBrainRelease.2015/3D_Surfaces/Apr7_2016/gii/"
35
+ MESH_FILE_LEFT = "data/gray_left_327680.surf.gii"
41
36
  _profiles = None
42
37
  _vertices = None
43
38
  _boundary_depths = None
@@ -53,31 +48,25 @@ class WagstylProfileLoader:
53
48
  @classmethod
54
49
  def _load(cls):
55
50
  # read thicknesses, in mm, and normalize by their last column which is the total thickness
56
- thickness_left = requests.HttpRequest(f"{cls.REPO}/raw/{cls.BRANCH}/{cls.THICKNESSES_FILE_LEFT}").data.T
57
- thickness_right = np.zeros(shape=thickness_left.shape) # TODO: replace with thickness data for te right hemisphere
58
- thickness = np.concatenate((thickness_left[:, :-1], thickness_right[:, :-1])) # last column is the computed total thickness
59
- total_thickness = thickness.sum(1)
51
+ thickness = requests.HttpRequest(f"{cls.REPO}/{cls.THICKNESSES_FILE_LEFT}").data.T
52
+ total_thickness = thickness[:, :-1].sum(1) # last column is the computed total thickness
60
53
  valid = np.where(total_thickness > 0)[0]
61
54
  cls._boundary_depths = np.c_[np.zeros_like(valid), (thickness[valid, :] / total_thickness[valid, None]).cumsum(1)]
62
55
  cls._boundary_depths[:, -1] = 1 # account for float calculation errors
63
56
 
64
57
  # find profiles with valid thickness
65
- if not all(
66
- path.exists(cache.CACHE.build_filename(url))
67
- for url in [cls.PROFILES_FILE_LEFT, cls.PROFILES_FILE_RIGHT]
68
- ):
58
+ profile_l_url = f"{cls.REPO}/{cls.PROFILES_FILE_LEFT}"
59
+ if not path.exists(cache.CACHE.build_filename(profile_l_url)):
69
60
  logger.info(
70
61
  "First request to BigBrain profiles. Preprocessing the data "
71
62
  "now. This may take a little."
72
63
  )
73
- profiles_l = requests.HttpRequest(cls.PROFILES_FILE_LEFT).data.to_numpy()
74
- profiles_r = requests.HttpRequest(cls.PROFILES_FILE_RIGHT).data.to_numpy()
75
- cls._profiles = np.concatenate((profiles_l, profiles_r))[valid, :]
64
+ profiles_l_all = requests.HttpRequest(profile_l_url).data
65
+ cls._profiles = profiles_l_all[valid, :]
76
66
 
77
67
  # read mesh vertices
78
- mesh_left = requests.HttpRequest(f"{cls.BASEURL}/{cls.MESH_FILE_LEFT}").data
79
- mesh_right = requests.HttpRequest(f"{cls.BASEURL}/{cls.MESH_FILE_RIGHT}").data
80
- mesh_vertices = np.concatenate((mesh_left.darrays[0].data, mesh_right.darrays[0].data))
68
+ mesh_left = requests.HttpRequest(f"{cls.REPO}/{cls.MESH_FILE_LEFT}").data
69
+ mesh_vertices = mesh_left.darrays[0].data
81
70
  cls._vertices = mesh_vertices[valid, :]
82
71
 
83
72
  logger.debug(f"{cls._profiles.shape[0]} BigBrain intensity profiles.")
@@ -14,7 +14,71 @@
14
14
  # limitations under the License.
15
15
  """Handles spatial concepts and spatial operation like warping between spaces."""
16
16
 
17
- from .location import WholeBrain
17
+ from .location import WholeBrain, Location
18
18
  from .point import Point
19
19
  from .pointset import PointSet
20
20
  from .boundingbox import BoundingBox
21
+
22
+
23
+ def reassign_union(loc0: 'Location', loc1: 'Location') -> 'Location':
24
+ """
25
+ Add two locations of same or diffrent type to find their union as a
26
+ Location object.
27
+ Note
28
+ ----
29
+ `loc1` will be warped to `loc0` they are not in the same space.
30
+ Parameters
31
+ ----------
32
+ loc0 : Location
33
+ _description_
34
+ loc1 : Location
35
+ _description_
36
+ Returns
37
+ -------
38
+ Location
39
+ - Point U Point = PointSet
40
+ - Point U PointSet = PointSet
41
+ - PointSet U PointSet = PointSet
42
+ - BoundingBox U BoundingBox = BoundingBox
43
+ - BoundingBox U PointSet = BoundingBox
44
+ - BoundingBox U Point = BoundingBox
45
+ - WholeBrain U Location = NotImplementedError
46
+ (all operations are commutative)
47
+ """
48
+ if loc0 is None or loc1 is None:
49
+ return loc0 or loc1
50
+
51
+ if isinstance(loc0, WholeBrain) or isinstance(loc1, WholeBrain):
52
+ raise NotImplementedError("Union of WholeBrains is not yet implemented.")
53
+
54
+ loc1_w = loc1.warp(loc0.space) # adopt the space of the first location
55
+
56
+ if isinstance(loc0, Point): # turn Points to PointSets
57
+ return reassign_union(
58
+ PointSet([loc0], space=loc0.space, sigma_mm=loc0.sigma), loc1_w
59
+ )
60
+
61
+ if isinstance(loc0, PointSet):
62
+ if isinstance(loc1_w, PointSet):
63
+ points = set(loc0.points + loc1_w.points)
64
+ return PointSet(
65
+ points,
66
+ space=loc0.space,
67
+ sigma_mm=[p.sigma for p in points],
68
+ )
69
+ if isinstance(loc1_w, BoundingBox):
70
+ return reassign_union(loc0.boundingbox, loc1_w)
71
+
72
+ if isinstance(loc0, BoundingBox) and isinstance(loc1_w, BoundingBox):
73
+ points = [loc0.minpoint, loc0.maxpoint, loc1_w.minpoint, loc1_w.maxpoint]
74
+ return BoundingBox(
75
+ point1=[min(p[i] for p in points) for i in range(3)],
76
+ point2=[max(p[i] for p in points) for i in range(3)],
77
+ space=loc0.space,
78
+ sigma_mm=[loc0.minpoint.sigma, loc0.maxpoint.sigma]
79
+ )
80
+
81
+ return reassign_union(loc1_w, loc0)
82
+
83
+
84
+ Location.union = reassign_union
@@ -273,22 +273,6 @@ class BoundingBox(location.Location):
273
273
  .boundingbox
274
274
  )
275
275
 
276
- def union(self, other):
277
- """
278
- Computes the union of this boudning box with another one.
279
-
280
- Args:
281
- other (BoundingBox): Another bounding box
282
- """
283
- warped = other.warp(self.space)
284
- points = [self.minpoint, self.maxpoint, warped.minpoint, warped.maxpoint]
285
- return BoundingBox(
286
- point1=[min(p[i] for p in points) for i in range(3)],
287
- point2=[max(p[i] for p in points) for i in range(3)],
288
- space=self.space,
289
- sigma_mm=[self.minpoint.sigma, self.maxpoint.sigma]
290
- )
291
-
292
276
  def clip(self, xyzmax, xyzmin=(0, 0, 0)):
293
277
  """
294
278
  Returns a new bounding box obtained by clipping at the given maximum coordinate.
@@ -98,6 +98,19 @@ class Location(ABC):
98
98
  f"[{','.join(str(l) for l in iter(self))}]"
99
99
  )
100
100
 
101
+ def __repr__(self):
102
+ return f"{self.__class__.__name__}: {self}"
103
+
104
+ @staticmethod
105
+ def union(loc0: 'Location', loc1: 'Location') -> 'Location':
106
+ """
107
+ Reassigned at the locations module level for static typing and to avoid
108
+ circular imports. See siibra.locations.__init__.reassign_union()
109
+ """
110
+ raise NotImplementedError(
111
+ "This method is designed to be reassigned at the module level"
112
+ )
113
+
101
114
 
102
115
  class WholeBrain(Location):
103
116
  """
@@ -159,9 +159,7 @@ class PointSet(location.Location):
159
159
  return len(self.points)
160
160
 
161
161
  def __str__(self):
162
- return f"Set of points {self.space.name}: " + ", ".join(
163
- f"({','.join(str(v) for v in p)})" for p in self
164
- )
162
+ return f"Set of {len(self)} points in the {self.boundingbox}"
165
163
 
166
164
  @property
167
165
  def boundingbox(self):
siibra/retrieval/cache.py CHANGED
@@ -19,7 +19,7 @@ import os
19
19
  from appdirs import user_cache_dir
20
20
  import tempfile
21
21
 
22
- from ..commons import logger, SIIBRA_CACHEDIR
22
+ from ..commons import logger, SIIBRA_CACHEDIR, SKIP_CACHEINIT_MAINTENANCE
23
23
 
24
24
 
25
25
  def assert_folder(folder):
@@ -64,7 +64,10 @@ class Cache:
64
64
  cls.folder = SIIBRA_CACHEDIR
65
65
  cls.folder = assert_folder(cls.folder)
66
66
  cls._instance = cls.__new__(cls)
67
- cls._instance.run_maintenance()
67
+ if SKIP_CACHEINIT_MAINTENANCE:
68
+ logger.debug("Will not run maintenance on cache as SKIP_CACHE_MAINTENANCE is set to True.")
69
+ else:
70
+ cls._instance.run_maintenance()
68
71
  return cls._instance
69
72
 
70
73
  def clear(self):
@@ -77,7 +80,6 @@ class Cache:
77
80
  def run_maintenance(self):
78
81
  """ Shrinks the cache by deleting oldest files first until the total size
79
82
  is below cache size (Cache.SIZE) given in GiB."""
80
-
81
83
  # build sorted list of cache files and their os attributes
82
84
  files = [os.path.join(self.folder, fname) for fname in os.listdir(self.folder)]
83
85
  sfiles = sorted([(fn, os.stat(fn)) for fn in files], key=lambda t: t[1].st_atime)