siibra 0.5a2__py3-none-any.whl → 1.0.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

Files changed (83) hide show
  1. siibra/VERSION +1 -1
  2. siibra/__init__.py +20 -12
  3. siibra/commons.py +145 -90
  4. siibra/configuration/__init__.py +1 -1
  5. siibra/configuration/configuration.py +22 -17
  6. siibra/configuration/factory.py +177 -128
  7. siibra/core/__init__.py +1 -8
  8. siibra/core/{relation_qualification.py → assignment.py} +17 -14
  9. siibra/core/atlas.py +66 -35
  10. siibra/core/concept.py +81 -39
  11. siibra/core/parcellation.py +83 -67
  12. siibra/core/region.py +569 -263
  13. siibra/core/space.py +7 -39
  14. siibra/core/structure.py +111 -0
  15. siibra/exceptions.py +63 -0
  16. siibra/experimental/__init__.py +19 -0
  17. siibra/experimental/contour.py +61 -0
  18. siibra/experimental/cortical_profile_sampler.py +57 -0
  19. siibra/experimental/patch.py +98 -0
  20. siibra/experimental/plane3d.py +256 -0
  21. siibra/explorer/__init__.py +16 -0
  22. siibra/explorer/url.py +112 -52
  23. siibra/explorer/util.py +31 -9
  24. siibra/features/__init__.py +73 -8
  25. siibra/features/anchor.py +75 -196
  26. siibra/features/connectivity/__init__.py +1 -1
  27. siibra/features/connectivity/functional_connectivity.py +2 -2
  28. siibra/features/connectivity/regional_connectivity.py +99 -10
  29. siibra/features/connectivity/streamline_counts.py +1 -1
  30. siibra/features/connectivity/streamline_lengths.py +1 -1
  31. siibra/features/connectivity/tracing_connectivity.py +1 -1
  32. siibra/features/dataset/__init__.py +1 -1
  33. siibra/features/dataset/ebrains.py +3 -3
  34. siibra/features/feature.py +219 -110
  35. siibra/features/image/__init__.py +1 -1
  36. siibra/features/image/image.py +21 -13
  37. siibra/features/image/sections.py +1 -1
  38. siibra/features/image/volume_of_interest.py +1 -1
  39. siibra/features/tabular/__init__.py +1 -1
  40. siibra/features/tabular/bigbrain_intensity_profile.py +24 -13
  41. siibra/features/tabular/cell_density_profile.py +111 -69
  42. siibra/features/tabular/cortical_profile.py +82 -16
  43. siibra/features/tabular/gene_expression.py +117 -6
  44. siibra/features/tabular/layerwise_bigbrain_intensities.py +7 -9
  45. siibra/features/tabular/layerwise_cell_density.py +9 -24
  46. siibra/features/tabular/receptor_density_fingerprint.py +11 -6
  47. siibra/features/tabular/receptor_density_profile.py +12 -15
  48. siibra/features/tabular/regional_timeseries_activity.py +74 -18
  49. siibra/features/tabular/tabular.py +17 -8
  50. siibra/livequeries/__init__.py +1 -7
  51. siibra/livequeries/allen.py +139 -77
  52. siibra/livequeries/bigbrain.py +104 -128
  53. siibra/livequeries/ebrains.py +7 -4
  54. siibra/livequeries/query.py +1 -2
  55. siibra/locations/__init__.py +32 -25
  56. siibra/locations/boundingbox.py +153 -127
  57. siibra/locations/location.py +45 -80
  58. siibra/locations/point.py +97 -83
  59. siibra/locations/pointcloud.py +349 -0
  60. siibra/retrieval/__init__.py +1 -1
  61. siibra/retrieval/cache.py +107 -13
  62. siibra/retrieval/datasets.py +9 -14
  63. siibra/retrieval/exceptions/__init__.py +2 -1
  64. siibra/retrieval/repositories.py +147 -53
  65. siibra/retrieval/requests.py +64 -29
  66. siibra/vocabularies/__init__.py +2 -2
  67. siibra/volumes/__init__.py +7 -9
  68. siibra/volumes/parcellationmap.py +396 -253
  69. siibra/volumes/providers/__init__.py +20 -0
  70. siibra/volumes/providers/freesurfer.py +113 -0
  71. siibra/volumes/{gifti.py → providers/gifti.py} +29 -18
  72. siibra/volumes/{neuroglancer.py → providers/neuroglancer.py} +204 -92
  73. siibra/volumes/{nifti.py → providers/nifti.py} +64 -44
  74. siibra/volumes/providers/provider.py +107 -0
  75. siibra/volumes/sparsemap.py +159 -260
  76. siibra/volumes/volume.py +720 -152
  77. {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/METADATA +25 -28
  78. siibra-1.0.0a1.dist-info/RECORD +84 -0
  79. {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/WHEEL +1 -1
  80. siibra/locations/pointset.py +0 -198
  81. siibra-0.5a2.dist-info/RECORD +0 -74
  82. {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/LICENSE +0 -0
  83. {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/top_level.txt +0 -0
siibra/volumes/volume.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,27 +13,86 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
  """A specific mesh or 3D array."""
16
+
17
+ from .providers import provider as _provider
18
+
16
19
  from .. import logger
17
20
  from ..retrieval import requests
18
- from ..locations import boundingbox as _boundingbox
19
- from ..core import space
20
-
21
- import nibabel as nib
22
- from abc import ABC, abstractmethod
21
+ from ..core import space as _space, structure
22
+ from ..locations import point, pointcloud, boundingbox
23
+ from ..commons import resample_img_to_img, siibra_tqdm, affine_scaling, connected_components
24
+ from ..exceptions import NoMapAvailableError, SpaceWarpingFailedError, EmptyPointCloudError
25
+
26
+ from dataclasses import dataclass
27
+ from nibabel import Nifti1Image
28
+ import numpy as np
23
29
  from typing import List, Dict, Union, Set, TYPE_CHECKING
24
- import json
25
30
  from time import sleep
31
+ import json
32
+ from skimage import feature as skimage_feature, filters
33
+ from functools import lru_cache
26
34
 
27
35
  if TYPE_CHECKING:
28
36
  from ..retrieval.datasets import EbrainsDataset
29
37
  TypeDataset = EbrainsDataset
30
38
 
31
39
 
32
- class ColorVolumeNotSupported(NotImplementedError):
33
- pass
40
+ @dataclass
41
+ class ComponentSpatialProperties:
42
+ """
43
+ Centroid and nonzero volume of an image.
44
+ """
45
+ centroid: point.Point
46
+ volume: int
34
47
 
48
+ @staticmethod
49
+ def compute_from_image(
50
+ img: Nifti1Image,
51
+ space: Union[str, "_space.Space"],
52
+ split_components: bool = True
35
53
 
36
- class Volume:
54
+ ) -> List["ComponentSpatialProperties"]:
55
+ """
56
+ Find the center of an image in its (non-zero) voxel space and and its
57
+ volume.
58
+
59
+ Parameters
60
+ ----------
61
+ img: Nifti1Image
62
+ space: str, Space
63
+ split_components: bool, default: True
64
+ If True, finds the spatial properties for each connected component
65
+ found by skimage.measure.label.
66
+ """
67
+ scale = affine_scaling(img.affine)
68
+ if split_components:
69
+ iter_components = lambda img: connected_components(
70
+ np.asanyarray(img.dataobj),
71
+ connectivity=None
72
+ )
73
+ else:
74
+ iter_components = lambda img: [(0, np.asanyarray(img.dataobj))]
75
+
76
+ spatial_props: List[ComponentSpatialProperties] = []
77
+ for _, component in iter_components(img):
78
+ nonzero: np.ndarray = np.c_[np.nonzero(component)]
79
+ spatial_props.append(
80
+ ComponentSpatialProperties(
81
+ centroid=point.Point(
82
+ np.dot(img.affine, np.r_[nonzero.mean(0), 1])[:3],
83
+ space=space
84
+ ),
85
+ volume=nonzero.shape[0] * scale,
86
+ )
87
+ )
88
+
89
+ # sort by volume
90
+ spatial_props.sort(key=lambda cmp: cmp.volume, reverse=True)
91
+
92
+ return spatial_props
93
+
94
+
95
+ class Volume(structure.BrainStructure):
37
96
  """
38
97
  A volume is a specific mesh or 3D array,
39
98
  which can be accessible via multiple providers in different formats.
@@ -49,7 +108,9 @@ class Volume:
49
108
  "neuroglancer/precompmesh",
50
109
  "neuroglancer/precompmesh/surface",
51
110
  "gii-mesh",
52
- "gii-label"
111
+ "gii-label",
112
+ "freesurfer-annot",
113
+ "zip/freesurfer-annot",
53
114
  ]
54
115
 
55
116
  SUPPORTED_FORMATS = IMAGE_FORMATS + MESH_FORMATS
@@ -62,32 +123,47 @@ class Volume:
62
123
  "nii": ["nii", "zip/nii"]
63
124
  }
64
125
 
126
+ _FETCH_CACHE = {} # we keep a cache of the most recently fetched volumes
127
+ _FETCH_CACHE_MAX_ENTRIES = 3
128
+
65
129
  def __init__(
66
130
  self,
67
131
  space_spec: dict,
68
- providers: List['VolumeProvider'],
132
+ providers: List['_provider.VolumeProvider'],
69
133
  name: str = "",
70
134
  variant: str = None,
71
135
  datasets: List['TypeDataset'] = [],
136
+ bbox: "boundingbox.BoundingBox" = None
72
137
  ):
73
- self._name_cached = name # see lazy implementation below
138
+ self._name = name
74
139
  self._space_spec = space_spec
75
140
  self.variant = variant
76
- self._providers: Dict[str, 'VolumeProvider'] = {}
141
+ self._providers: Dict[str, _provider.VolumeProvider] = {}
77
142
  self.datasets = datasets
143
+ self._boundingbox = bbox
78
144
  for provider in providers:
79
145
  srctype = provider.srctype
80
146
  assert srctype not in self._providers
81
147
  self._providers[srctype] = provider
82
148
  if len(self._providers) == 0:
83
- logger.debug(f"No provider for volume {self}")
149
+ logger.debug(f"No provider for volume {name}")
150
+
151
+ def __hash__(self):
152
+ return super().__hash__()
153
+
154
+ def __eq__(self, other: 'Volume'):
155
+ return (
156
+ isinstance(other, Volume)
157
+ and self.name == other.name
158
+ and self.space == other.space
159
+ and self.variant == other.variant
160
+ and self._providers == other._providers
161
+ )
84
162
 
85
163
  @property
86
164
  def name(self):
87
- """
88
- Allows derived classes to implement a lazy name specification.
89
- """
90
- return self._name_cached
165
+ """Allows derived classes to implement a lazy name specification."""
166
+ return self._name
91
167
 
92
168
  @property
93
169
  def providers(self):
@@ -100,28 +176,71 @@ class Volume:
100
176
  for srctype, prov in self._providers.items()
101
177
  }
102
178
 
103
- @property
104
- def boundingbox(self):
105
- for provider in self._providers.values():
179
+ @lru_cache(2)
180
+ def get_boundingbox(self, clip: bool = True, background: float = 0.0, **fetch_kwargs) -> "boundingbox.BoundingBox":
181
+ """
182
+ Obtain the bounding box in physical coordinates of this volume.
183
+
184
+ Parameters
185
+ ----------
186
+ clip : bool, default: True
187
+ Whether to clip the background of the volume.
188
+ background : float, default: 0.0
189
+ The background value to clip.
190
+ Note
191
+ ----
192
+ To use it, clip must be True.
193
+ fetch_kwargs:
194
+ key word arguments that are used for fetchin volumes,
195
+ such as voi or resolution_mm. Currently, only possible for
196
+ Neuroglancer volumes except for `format`.
197
+
198
+ Raises
199
+ ------
200
+ RuntimeError
201
+ If the volume provider does not have a bounding box calculator.
202
+ """
203
+ if self._boundingbox is not None and len(fetch_kwargs) == 0:
204
+ return self._boundingbox
205
+
206
+ if not self.provides_image:
207
+ raise NotImplementedError("Bounding box calculation of meshes is not implemented yet.")
208
+
209
+ if clip: # clippin requires fetching the image
210
+ img = self.fetch(**fetch_kwargs)
211
+ assert isinstance(img, Nifti1Image)
212
+ return boundingbox.from_array(
213
+ array=np.asanyarray(img.dataobj),
214
+ background=background,
215
+ ).transform(img.affine, space=self.space)
216
+
217
+ # if clipping is not required, providers migth have methods of creating
218
+ # bounding boxes without fetching the image
219
+ fmt = fetch_kwargs.get("format")
220
+ if (fmt is not None) and (fmt not in self.formats):
221
+ raise ValueError(
222
+ f"Requested format {fmt} is not available as provider of "
223
+ "this volume. See `volume.formats` for possible options."
224
+ )
225
+ providers = [self._providers[fmt]] if fmt else self._providers.values()
226
+ for provider in providers:
106
227
  try:
107
- bbox = provider.boundingbox
108
- if bbox.space is None: # provider does usually not know the space!
109
- bbox.space = self.space
110
- bbox.minpoint.space = self.space
111
- bbox.maxpoint.space = self.space
112
- except NotImplementedError as e:
113
- print(str(e))
228
+ assert clip is False
229
+ bbox = provider.get_boundingbox(
230
+ background=background, **fetch_kwargs
231
+ )
232
+ if bbox.space is None: # provider do not know the space!
233
+ bbox._space_cached = self.space
234
+ bbox.minpoint._space_cached = self.space
235
+ bbox.maxpoint._space_cached = self.space
236
+ except NotImplementedError:
114
237
  continue
115
238
  return bbox
116
239
  raise RuntimeError(f"No bounding box specified by any volume provider of {str(self)}")
117
240
 
118
241
  @property
119
242
  def formats(self) -> Set[str]:
120
- result = set()
121
- for fmt in self._providers:
122
- result.add(fmt)
123
- result.add('mesh' if fmt in self.MESH_FORMATS else 'image')
124
- return result
243
+ return {fmt for fmt in self._providers}
125
244
 
126
245
  @property
127
246
  def provides_mesh(self):
@@ -147,17 +266,168 @@ class Volume:
147
266
  def space(self):
148
267
  for key in ["@id", "name"]:
149
268
  if key in self._space_spec:
150
- return space.Space.get_instance(self._space_spec[key])
151
- return space.Space(None, "Unspecified space", species=space.Species.UNSPECIFIED_SPECIES)
269
+ return _space.Space.get_instance(self._space_spec[key])
270
+ return _space.Space(None, "Unspecified space", species=_space.Species.UNSPECIFIED_SPECIES)
271
+
272
+ @property
273
+ def species(self):
274
+ s = self.space
275
+ return None if s is None else s.species
152
276
 
153
277
  def __str__(self):
154
- if self.space is None:
155
- return f"{self.__class__.__name__} '{self.name}'"
156
- else:
157
- return f"{self.__class__.__name__} '{self.name}' in space '{self.space.name}'"
278
+ return (
279
+ f"{self.__class__.__name__} {f'{self.name}' if self.name else ''}"
280
+ f"{f' in space {self.space.name}' if self.space else ''}"
281
+ )
158
282
 
159
283
  def __repr__(self):
160
- return self.__str__()
284
+ return (
285
+ f"<{self.__class__.__name__}(space_spec={self._space_spec}, "
286
+ f"name='{self.name}', providers={self._providers})>"
287
+ )
288
+
289
+ def evaluate_points(
290
+ self,
291
+ points: Union['point.Point', 'pointcloud.PointCloud'],
292
+ outside_value: Union[int, float] = 0,
293
+ **fetch_kwargs
294
+ ) -> np.ndarray:
295
+ """
296
+ Evaluate the image at the positions of the given points.
297
+
298
+ Note
299
+ ----
300
+ Uses nearest neighbor interpolation. Other interpolation schemes are not
301
+ yet implemented.
302
+
303
+ Note
304
+ ----
305
+ If points are not on the same space as the map, they will be warped to
306
+ the space of the volume.
307
+
308
+ Parameters
309
+ ----------
310
+ points: PointCloud
311
+ outside_value: int, float. Default: 0
312
+ fetch_kwargs: dict
313
+ Any additional arguments are passed to the `fetch()` call for
314
+ retrieving the image data.
315
+
316
+ Returns
317
+ -------
318
+ values: numpy.ndarray
319
+ The values of the volume at the voxels points correspond to.
320
+
321
+ Raises
322
+ ------
323
+ SpaceWarpingFailedError
324
+ If warping of the points fails.
325
+ """
326
+ if not self.provides_image:
327
+ raise NotImplementedError("Filtering of points by pure mesh volumes not yet implemented.")
328
+
329
+ # make sure the points are in the same physical space as this volume
330
+ as_pointcloud = pointcloud.from_points([points]) if isinstance(points, point.Point) else points
331
+ warped = as_pointcloud.warp(self.space)
332
+ assert warped is not None, SpaceWarpingFailedError
333
+
334
+ # get the voxel array of this volume
335
+ img = self.fetch(format='image', **fetch_kwargs)
336
+ arr = np.asanyarray(img.dataobj)
337
+
338
+ # transform the points to the voxel space of the volume for extracting values
339
+ phys2vox = np.linalg.inv(img.affine)
340
+ voxels = warped.transform(phys2vox, space=None)
341
+ XYZ = voxels.coordinates.astype('int')
342
+
343
+ # temporarily set all outside voxels to (0,0,0) so that the index access doesn't fail
344
+ inside = np.all((XYZ < arr.shape) & (XYZ > 0), axis=1)
345
+ XYZ[~inside, :] = 0
346
+
347
+ # read out the values
348
+ X, Y, Z = XYZ.T
349
+ values = arr[X, Y, Z]
350
+
351
+ # fix the outside voxel values, which might have an inconsistent value now
352
+ values[~inside] = outside_value
353
+
354
+ return values
355
+
356
+ def _points_inside(
357
+ self,
358
+ points: Union['point.Point', 'pointcloud.PointCloud'],
359
+ keep_labels: bool = True,
360
+ outside_value: Union[int, float] = 0,
361
+ **fetch_kwargs
362
+ ) -> 'pointcloud.PointCloud':
363
+ """
364
+ Reduce a pointcloud to the points which fall inside nonzero pixels of
365
+ this map.
366
+
367
+
368
+ Paramaters
369
+ ----------
370
+ points: PointCloud
371
+ keep_labels: bool
372
+ If False, the returned PointCloud will be labeled with their indices
373
+ in the original PointCloud.
374
+ fetch_kwargs: dict
375
+ Any additional arguments are passed to the `fetch()` call for
376
+ retrieving the image data.
377
+
378
+ Returns
379
+ -------
380
+ PointCloud
381
+ A new PointCloud containing only the points inside the volume.
382
+ Labels reflect the indices of the original points if `keep_labels`
383
+ is False.
384
+ """
385
+ ptset = pointcloud.from_points([points]) if isinstance(points, point.Point) else points
386
+ values = self.evaluate_points(ptset, outside_value=outside_value, **fetch_kwargs)
387
+ inside = list(np.where(values != outside_value)[0])
388
+ return pointcloud.from_points(
389
+ [ptset[i] for i in inside],
390
+ newlabels=None if keep_labels else inside
391
+ )
392
+
393
+ def intersection(self, other: structure.BrainStructure, **fetch_kwargs) -> structure.BrainStructure:
394
+ """
395
+ Compute the intersection of a location with this volume. This will
396
+ fetch actual image data. Any additional arguments are passed to fetch.
397
+ """
398
+ if isinstance(other, (pointcloud.PointCloud, point.Point)):
399
+ try:
400
+ points_inside = self._points_inside(other, keep_labels=False, **fetch_kwargs)
401
+ except EmptyPointCloudError:
402
+ return None # BrainStructure.intersects checks for not None
403
+ if isinstance(other, point.Point): # preserve the type
404
+ return points_inside[0]
405
+ return points_inside
406
+ elif isinstance(other, boundingbox.BoundingBox):
407
+ return self.get_boundingbox(clip=True, background=0.0, **fetch_kwargs).intersection(other)
408
+ elif isinstance(other, Volume):
409
+ if self.space != other.space:
410
+ raise NotImplementedError("Cannot intersect volumes from different spaces. Try comparing their boudning boxes.")
411
+ format = fetch_kwargs.pop('format', 'image')
412
+ v1 = self.fetch(format=format, **fetch_kwargs)
413
+ v2 = other.fetch(format=format, **fetch_kwargs)
414
+ arr1 = np.asanyarray(v1.dataobj)
415
+ arr2 = np.asanyarray(resample_img_to_img(v2, v1).dataobj)
416
+ pointwise_min = np.minimum(arr1, arr2)
417
+ if np.any(pointwise_min):
418
+ return from_array(
419
+ data=pointwise_min,
420
+ affine=v1.affine,
421
+ space=self.space,
422
+ name=f"Intersection between {self} and {other} computed as their pointwise minimum"
423
+ )
424
+ else:
425
+ return None
426
+ else: # other BrainStructures should have intersection with locations implemented.
427
+ try:
428
+ return other.intersection(self)
429
+ except NoMapAvailableError:
430
+ return None
161
431
 
162
432
  def fetch(
163
433
  self,
@@ -181,144 +451,442 @@ class Volume:
181
451
 
182
452
  Returns
183
453
  -------
184
- An image or mesh
454
+ An image (Nifti1Image) or mesh (Dict['verts': ndarray, 'faces': ndarray, 'labels': ndarray])
185
455
  """
456
+ kwargs_serialized = json.dumps({k: hash(v) for k, v in kwargs.items()}, sort_keys=True)
457
+
458
+ if "resolution_mm" in kwargs and format is None:
459
+ if 'neuroglancer/precomputed' not in self.formats:
460
+ raise ValueError("'resolution_mm' is only available for volumes with 'neuroglancer/precomputed' formats.")
461
+ format = 'neuroglancer/precomputed'
186
462
 
187
463
  if format is None:
188
- requested_formats = self.SUPPORTED_FORMATS
464
+ # preseve fetch order in SUPPORTED_FORMATS
465
+ possible_formats = [f for f in self.SUPPORTED_FORMATS if f in self.formats]
189
466
  elif format in self._FORMAT_LOOKUP: # allow use of aliases
190
- requested_formats = self._FORMAT_LOOKUP[format]
467
+ possible_formats = [f for f in self._FORMAT_LOOKUP[format] if f in self.formats]
191
468
  elif format in self.SUPPORTED_FORMATS:
192
- requested_formats = [format]
469
+ possible_formats = [format] if format in self.formats else []
193
470
  else:
194
- raise ValueError(f"Invalid format requested: {format}")
195
-
196
- # select the first source unless the user specifically requests a format
197
- for fmt in requested_formats:
198
- if fmt in self.formats:
199
- selected_format = fmt
200
- logger.debug(f"Requested format was '{format}', selected format is '{selected_format}'")
471
+ possible_formats = []
472
+ if len(possible_formats) == 0:
473
+ raise ValueError(
474
+ f"Invalid format requested: {format}. Possible values for this "
475
+ f"volume are: {self.formats}"
476
+ )
477
+
478
+ # ensure the voi is inside the template
479
+ voi = kwargs.get("voi", None)
480
+ if voi is not None and voi.space is not None:
481
+ assert isinstance(voi, boundingbox.BoundingBox)
482
+ tmplt_bbox = voi.space.get_template().get_boundingbox(clip=False)
483
+ intersection_bbox = voi.intersection(tmplt_bbox)
484
+ if intersection_bbox is None:
485
+ raise RuntimeError(f"voi provided ({voi}) lies out side the voxel space of the {voi.space.name} template.")
486
+ if intersection_bbox != voi:
487
+ logger.info(
488
+ f"Since provided voi lies outside the template ({voi.space}) it is clipped as: {intersection_bbox}"
489
+ )
490
+ kwargs["voi"] = intersection_bbox
491
+
492
+ result = None
493
+ # try each possible format
494
+ for fmt in possible_formats:
495
+ fetch_hash = hash((hash(self), hash(fmt), hash(kwargs_serialized)))
496
+ # cached
497
+ if fetch_hash in self._FETCH_CACHE:
498
+ break
499
+ # Repeat in case of too many requests only
500
+ fwd_args = {k: v for k, v in kwargs.items() if k != "format"}
501
+ for try_count in range(6):
502
+ try:
503
+ if fmt in ["gii-label", "freesurfer-annot", "zip/freesurfer-annot"]:
504
+ tpl = self.space.get_template(variant=kwargs.get('variant'))
505
+ mesh = tpl.fetch(**kwargs)
506
+ labels = self._providers[fmt].fetch(**fwd_args)
507
+ result = dict(**mesh, **labels)
508
+ else:
509
+ result = self._providers[fmt].fetch(**fwd_args)
510
+ except requests.SiibraHttpRequestError as e:
511
+ if e.status_code == 429: # too many requests
512
+ sleep(0.1)
513
+ logger.error(f"Cannot access {self._providers[fmt]}", exc_info=(try_count == 5))
514
+ continue
515
+ else:
516
+ break
517
+ except Exception as e:
518
+ logger.info(e, exc_info=1)
519
+ break
520
+ else:
521
+ break
522
+ # udpate the cache if fetch is successful
523
+ if result is not None:
524
+ self._FETCH_CACHE[fetch_hash] = result
525
+ while len(self._FETCH_CACHE) >= self._FETCH_CACHE_MAX_ENTRIES:
526
+ # remove oldest entry
527
+ self._FETCH_CACHE.pop(next(iter(self._FETCH_CACHE)))
201
528
  break
202
529
  else:
203
- raise ValueError(f"Invalid format requested: {format}")
530
+ # unsuccessful: do not poison the cache if none fetched
531
+ logger.error(f"Could not fetch any formats from {possible_formats}.")
532
+ return None
204
533
 
205
- # try the selected format only
206
- for try_count in range(6):
207
- try:
208
- if selected_format == "gii-label":
209
- tpl = self.space.get_template(variant=kwargs.get('variant'))
210
- mesh = tpl.fetch(**kwargs)
211
- labels = self._providers[selected_format].fetch(**kwargs)
212
- return dict(**mesh, **labels)
213
- else:
214
- return self._providers[selected_format].fetch(**kwargs)
215
- except requests.SiibraHttpRequestError as e:
216
- if e.status_code == 429: # too many requests
217
- sleep(0.1)
218
- logger.error(f"Cannot access {self._providers[selected_format]}", exc_info=(try_count == 5))
219
- if format is None and len(self.formats) > 1:
220
- logger.info(
221
- f"No format was specified and auto-selected format '{selected_format}' "
222
- "was unsuccesful. You can specify another format from "
223
- f"{set(self.formats) - set(selected_format)} to try.")
224
- return None
534
+ return self._FETCH_CACHE[fetch_hash]
225
535
 
536
+ def fetch_connected_components(self, **fetch_kwargs):
537
+ """
538
+ Provide an generator over masks of connected components in the volume
539
+ """
540
+ img = self.fetch(**fetch_kwargs)
541
+ assert isinstance(img, Nifti1Image), NotImplementedError(
542
+ f"Connected components for type {type(img)} is not yet implemeneted."
543
+ )
544
+ for label, component in connected_components(np.asanyarray(img.dataobj)):
545
+ yield (
546
+ label,
547
+ Nifti1Image(component, img.affine)
548
+ )
226
549
 
227
- class Subvolume(Volume):
228
- """
229
- Wrapper class for exposing a z level of a 4D volume to be used like a 3D volume.
230
- """
550
+ def compute_spatial_props(self, split_components: bool = True, **fetch_kwargs) -> List[ComponentSpatialProperties]:
551
+ """
552
+ Find the center of this volume in its (non-zero) voxel space and and its
553
+ volume.
231
554
 
232
- def __init__(self, parent_volume: Volume, z: int):
233
- Volume.__init__(
234
- self,
235
- space_spec=parent_volume._space_spec,
236
- providers=[
237
- SubvolumeProvider(p, z=z)
238
- for p in parent_volume._providers.values()
239
- ]
555
+ Parameters
556
+ ----------
557
+ split_components: bool, default: True
558
+ If True, finds the spatial properties for each connected component
559
+ found by skimage.measure.label.
560
+ """
561
+ assert self.provides_image, NotImplementedError("Spatial properties can currently on be calculated for images.")
562
+ img = self.fetch(format=fetch_kwargs.pop("format", "image"), **fetch_kwargs)
563
+ return ComponentSpatialProperties.compute_from_image(
564
+ img=img,
565
+ space=self.space,
566
+ split_components=split_components
240
567
  )
241
568
 
569
+ def draw_samples(self, N: int, sample_size: int = 100, e: float = 1, sigma_mm=None, invert=False, **kwargs):
570
+ """
571
+ Draw samples from the volume, by interpreting its values as an
572
+ unnormalized empirical probability distribtution.
573
+ Any keyword arguments are passed over to fetch()
574
+ """
575
+ if not self.provides_image:
576
+ raise NotImplementedError(
577
+ "Drawing samples is so far only implemented for image-type volumes, "
578
+ f"not {self.__class__.__name__}."
579
+ )
580
+ img = self.fetch(**kwargs)
581
+ array = np.asanyarray(img.dataobj)
582
+ samples = []
583
+ P = (array - array.min()) / (array.max() - array.min())
584
+ if invert:
585
+ P = 1 - P
586
+ P = P**e
587
+ while True:
588
+ pts = (np.random.rand(sample_size, 3) * max(P.shape))
589
+ inside = np.all(pts < P.shape, axis=1)
590
+ Y, X, Z = np.split(pts[inside, :].astype('int'), 3, axis=1)
591
+ T = np.random.rand(1)
592
+ choice = np.where(P[Y, X, Z] >= T)[0]
593
+ samples.extend(list(pts[inside, :][choice, :]))
594
+ if len(samples) > N:
595
+ break
596
+ voxels = pointcloud.PointCloud(
597
+ np.random.permutation(samples)[:N, :],
598
+ space=None
599
+ )
600
+ result = voxels.transform(img.affine, space='mni152')
601
+ result.sigma_mm = [sigma_mm for _ in result]
602
+ return result
242
603
 
243
- # TODO add mesh primitive. Check nibabel implementation? Use trimesh? Do we want to add yet another dependency?
244
- VolumeData = Union[nib.Nifti1Image, Dict]
604
+ def find_peaks(self, mindist=5, sigma_mm=0, **kwargs):
605
+ """
606
+ Find local peaks in the volume.
607
+ Additional keyword arguments are passed over to fetch()
608
+ """
609
+ if not self.provides_image:
610
+ raise NotImplementedError(
611
+ "Finding peaks is so far only implemented for image-type volumes, "
612
+ f"not {self.__class__.__name__}."
613
+ )
614
+ img = self.fetch(**kwargs)
615
+ array = np.asanyarray(img.dataobj)
616
+ voxels = skimage_feature.peak_local_max(array, min_distance=mindist)
617
+ points = pointcloud.PointCloud(voxels, space=None, labels=list(range(len(voxels)))).transform(img.affine, space=self.space)
618
+ points.sigma_mm = [sigma_mm for _ in points]
619
+ return points
245
620
 
246
621
 
247
- class VolumeProvider(ABC):
622
+ class FilteredVolume(Volume):
248
623
 
249
- def __init_subclass__(cls, srctype: str) -> None:
250
- cls.srctype = srctype
251
- return super().__init_subclass__()
624
+ def __init__(
625
+ self,
626
+ parent_volume: Volume,
627
+ label: int = None,
628
+ fragment: str = None,
629
+ threshold: float = None,
630
+ ):
631
+ """
632
+ A prescribed Volume to fetch specified label and fragment.
633
+ If threshold is defined, a mask of the values above the threshold.
252
634
 
253
- @property
254
- @abstractmethod
255
- def boundingbox(self) -> _boundingbox.BoundingBox:
256
- raise NotImplementedError
635
+ Parameters
636
+ ----------
637
+ parent_volume : Volume
638
+ label : int, default: None
639
+ Get the mask of value equal to label.
640
+ fragment : str, default None
641
+ If a volume is fragmented, get a specified one.
642
+ threshold : float, default None
643
+ Provide a float value to threshold the image.
644
+ """
645
+ name = parent_volume.name
646
+ if label:
647
+ name += f" - label: {label}"
648
+ if fragment:
649
+ name += f" - fragment: {fragment}"
650
+ if threshold:
651
+ name += f" - threshold: {threshold}"
652
+ Volume.__init__(
653
+ self,
654
+ space_spec=parent_volume._space_spec,
655
+ providers=list(parent_volume._providers.values()),
656
+ name=name
657
+ )
658
+ self.fragment = fragment
659
+ self.label = label
660
+ self.threshold = threshold
257
661
 
258
- @property
259
- def fragments(self) -> List[str]:
260
- return []
662
+ def fetch(
663
+ self,
664
+ format: str = None,
665
+ **kwargs
666
+ ):
667
+ if "fragment" in kwargs:
668
+ assert kwargs.get("fragment") == self.fragment, f"This is a filtered volume that can only fetch fragment '{self.fragment}'."
669
+ else:
670
+ kwargs["fragment"] = self.fragment
671
+ if "label" in kwargs:
672
+ assert kwargs.get("label") == self.label, f"This is a filtered volume that can only fetch label '{self.label}' only."
673
+ else:
674
+ kwargs["label"] = self.label
261
675
 
262
- @abstractmethod
263
- def fetch(self, *args, **kwargs) -> VolumeData:
264
- raise NotImplementedError
676
+ result = super().fetch(format=format, **kwargs)
265
677
 
266
- @property
267
- @abstractmethod
268
- def _url(self) -> Union[str, Dict[str, str]]:
269
- """
270
- This is needed to provide urls to applications that can utilise such resources directly.
271
- e.g. siibra-api
272
- """
273
- return {}
678
+ if self.threshold is not None:
679
+ assert self.label is None
680
+ if not isinstance(result, Nifti1Image):
681
+ raise NotImplementedError("Cannot threshold meshes.")
682
+ imgdata = np.asanyarray(result.dataobj)
683
+ return Nifti1Image(
684
+ dataobj=(imgdata > self.threshold).astype("uint8"),
685
+ affine=result.affine,
686
+ dtype="uint8"
687
+ )
274
688
 
689
+ return result
275
690
 
276
- class SubvolumeProvider(VolumeProvider, srctype="subvolume"):
277
- """
278
- This provider wraps around an existing volume provider,
279
- but is preconfigured to always fetch a fixed subvolume.
280
- The primary use is to provide a fixed z coordinate
281
- of a 4D volume provider as a 3D volume under the
282
- interface of a normal volume provider.
283
- """
691
+ def get_boundingbox(
692
+ self,
693
+ clip: bool = True,
694
+ background: float = 0.0,
695
+ **fetch_kwargs
696
+ ) -> "boundingbox.BoundingBox":
697
+ # NOTE: since some providers enable different simpllified ways to create a
698
+ # bounding box without fetching the image, the correct kwargs must be
699
+ # forwarded since FilteredVolumes enforce their specs to be fetched.
700
+ return super().get_boundingbox(
701
+ clip=clip,
702
+ background=background,
703
+ **fetch_kwargs
704
+ )
284
705
 
285
- _USE_CACHING = False
286
- _FETCHED_VOLUMES = {}
287
706
 
288
- class UseCaching:
289
- def __enter__(self):
290
- SubvolumeProvider._USE_CACHING = True
707
+ class Subvolume(Volume):
708
+ """
709
+ Wrapper class for exposing a z level of a 4D volume to be used like a 3D volume.
710
+ """
291
711
 
292
- def __exit__(self, et, ev, tb):
293
- SubvolumeProvider._USE_CACHING = False
294
- SubvolumeProvider._FETCHED_VOLUMES = {}
712
+ def __init__(self, parent_volume: Volume, z: int):
713
+ Volume.__init__(
714
+ self,
715
+ space_spec=parent_volume._space_spec,
716
+ providers=[
717
+ _provider.SubvolumeProvider(p, z=z)
718
+ for p in parent_volume._providers.values()
719
+ ],
720
+ name=parent_volume.name + f" - z: {z}"
721
+ )
295
722
 
296
- def __init__(self, parent_provider: VolumeProvider, z: int):
297
- VolumeProvider.__init__(self)
298
- self.provider = parent_provider
299
- self.srctype = parent_provider.srctype
300
- self.z = z
301
723
 
302
- @property
303
- def boundingbox(self) -> _boundingbox.BoundingBox:
304
- return self.provider.boundingbox
305
-
306
- def fetch(self, **kwargs):
307
- # activate caching at the caller using "with SubvolumeProvider.UseCaching():""
308
- if self.__class__._USE_CACHING:
309
- data_key = json.dumps(self.provider._url, sort_keys=True) \
310
- + json.dumps(kwargs, sort_keys=True)
311
- if data_key not in self.__class__._FETCHED_VOLUMES:
312
- vol = self.provider.fetch(**kwargs)
313
- self.__class__._FETCHED_VOLUMES[data_key] = vol
314
- vol = self.__class__._FETCHED_VOLUMES[data_key]
724
+ def from_file(filename: str, space: str, name: str) -> Volume:
725
+ """ Builds a nifti volume from a filename. """
726
+ from ..core.concept import get_registry
727
+ from .providers.nifti import NiftiProvider
728
+ spaceobj = get_registry("Space").get(space)
729
+ return Volume(
730
+ space_spec={"@id": spaceobj.id},
731
+ providers=[NiftiProvider(filename)],
732
+ name=filename if name is None else name,
733
+ )
734
+
735
+
736
+ def from_nifti(nifti: Nifti1Image, space: str, name: str) -> Volume:
737
+ """Builds a nifti volume from a Nifti image."""
738
+ from ..core.concept import get_registry
739
+ from .providers.nifti import NiftiProvider
740
+ spaceobj = get_registry("Space").get(space)
741
+ return Volume(
742
+ space_spec={"@id": spaceobj.id},
743
+ providers=[NiftiProvider((np.asanyarray(nifti.dataobj), nifti.affine))],
744
+ name=name
745
+ )
746
+
747
+
748
+ def from_array(
749
+ data: np.ndarray,
750
+ affine: np.ndarray,
751
+ space: Union[str, Dict[str, str]],
752
+ name: str
753
+ ) -> Volume:
754
+ """Builds a siibra volume from an array and an affine matrix."""
755
+ if len(name) == 0:
756
+ raise ValueError("Please provide a non-empty string for `name`")
757
+ from ..core.concept import get_registry
758
+ from .providers.nifti import NiftiProvider
759
+ spacespec = next(iter(space.values())) if isinstance(space, dict) else space
760
+ spaceobj = get_registry("Space").get(spacespec)
761
+ return Volume(
762
+ space_spec={"@id": spaceobj.id},
763
+ providers=[NiftiProvider((data, affine))],
764
+ name=name,
765
+ )
766
+
767
+
768
+ def from_pointcloud(
769
+ points: pointcloud.PointCloud,
770
+ label: int = None,
771
+ target: Volume = None,
772
+ normalize=True,
773
+ **kwargs
774
+ ) -> Volume:
775
+ """
776
+ Get the kernel density estimate as a volume from the points using their
777
+ average uncertainty on target volume.
778
+
779
+ Parameters
780
+ ----------
781
+ points: pointcloud.PointCloud
782
+ label: int, default: None
783
+ If None, finds the KDE for all points. Otherwise, selects the points
784
+ labelled with this integer value.
785
+ target: Volume, default: None
786
+ If None, the template of the space points are defined on will be used.
787
+ normalize: bool, default: True
788
+
789
+ Raises
790
+ ------
791
+ RuntimeError
792
+ If no points with labels found
793
+ """
794
+ if target is None:
795
+ target = points.space.get_template()
796
+ targetimg = target.fetch(**kwargs)
797
+ voxels = points.transform(np.linalg.inv(targetimg.affine), space=None)
798
+
799
+ if (label is None) or (points.labels is None):
800
+ selection = [True for _ in points]
801
+ else:
802
+ assert label in points.labels, f"No points with the label {label} in the set: {set(points.labels)}"
803
+ selection = points.labels == label
804
+
805
+ voxelcount_img = np.zeros_like(targetimg.get_fdata())
806
+ unique_coords, counts = np.unique(
807
+ np.array(voxels.as_list(), dtype='int')[selection, :],
808
+ axis=0,
809
+ return_counts=True
810
+ )
811
+ voxelcount_img[tuple(unique_coords.T)] = counts
812
+
813
+ # TODO: consider how to handle pointclouds with varied sigma_mm
814
+ sigmas = np.array(points.sigma_mm)[selection]
815
+ bandwidth = np.mean(sigmas)
816
+ if len(np.unique(sigmas)) > 1:
817
+ logger.warning(f"KDE of pointcloud uses average bandwith {bandwidth} instead of the points' individual sigmas.")
818
+
819
+ filtered_arr = filters.gaussian(voxelcount_img, bandwidth)
820
+ if normalize:
821
+ filtered_arr /= filtered_arr.sum()
822
+
823
+ return from_array(
824
+ data=filtered_arr,
825
+ affine=targetimg.affine,
826
+ space=target.space,
827
+ name=f'KDE map of {points}{f"labelled {label}" if label else ""}'
828
+ )
829
+
830
+
831
+ def merge(volumes: List[Volume], labels: List[int] = [], **fetch_kwargs) -> Volume:
832
+ """
833
+ Merge a list of nifti volumes in the same space into a single volume.
834
+
835
+ Note
836
+ ----
837
+ In case of voxel conflicts, the volumes will be override the previous values
838
+ in the given order.
839
+
840
+ Parameters
841
+ ----------
842
+ volumes : List[Volume]
843
+ labels : List[int], optional
844
+ Supply new labels to replace exisiting values per volume.
845
+
846
+ Returns
847
+ -------
848
+ Volume
849
+ """
850
+ if len(volumes) == 1:
851
+ logger.debug("Only one volume supplied returning as is (kwargs are ignored).")
852
+ return volumes[0]
853
+
854
+ assert len(volumes) > 1, "Need to supply at least two volumes to merge."
855
+ if labels:
856
+ assert len(volumes) == len(labels), "Need to supply as many labels as volumes."
857
+
858
+ space = volumes[0].space
859
+ assert all(v.space == space for v in volumes), "Cannot merge volumes from different spaces."
860
+
861
+ if len(labels) > 0:
862
+ dtype = 'int32'
863
+ elif FilteredVolume in {type(v) for v in volumes}:
864
+ dtype = 'uint8'
865
+ else:
866
+ dtype = volumes[0].fetch().dataobj.dtype
867
+ template_img = space.get_template().fetch(**fetch_kwargs)
868
+ merged_array = np.zeros(template_img.shape, dtype=dtype)
869
+
870
+ for i, vol in siibra_tqdm(
871
+ enumerate(volumes),
872
+ unit=" volume",
873
+ desc=f"Resampling volumes to {space.name} and merging",
874
+ total=len(volumes),
875
+ disable=len(volumes) < 3
876
+ ):
877
+ img = vol.fetch(**fetch_kwargs)
878
+ resampled_arr = np.asanyarray(
879
+ resample_img_to_img(img, template_img).dataobj
880
+ )
881
+ nonzero_voxels = resampled_arr > 0
882
+ if labels:
883
+ merged_array[nonzero_voxels] = labels[i]
315
884
  else:
316
- vol = self.provider.fetch(**kwargs)
317
- return vol.slicer[:, :, :, self.z]
318
-
319
- def __getattr__(self, attr):
320
- return self.provider.__getattribute__(attr)
321
-
322
- @property
323
- def _url(self) -> Union[str, Dict[str, str]]:
324
- return super()._url
885
+ merged_array[nonzero_voxels] = resampled_arr[nonzero_voxels]
886
+
887
+ return from_array(
888
+ data=merged_array,
889
+ affine=template_img.affine,
890
+ space=space,
891
+ name=f"Resampled and merged volumes: {','.join([v.name for v in volumes])}"
892
+ )